diff --git a/.circleci/config.yml b/.circleci/config.yml index c459d04c0a..0ec3856e60 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,844 +1,140 @@ -version: 2.1 +version: 2.1 # use CircleCI 2.0 -orbs: - codecov: codecov/codecov@1.0.2 - -params: - sector_builder_tests_param: §or_builder_tests_param - sector_builder_tests: - description: "Run the sector builder integration tests" - type: boolean - default: false - nightly_param: &nightly_param - nightly: - description: "job is being invoked from nightly workflow" - type: boolean - default: false - user_devnet_param: &user_devnet_param - user_devnet: - description: "job is being invoked from user devnet workflow" - type: boolean - default: false - staging_devnet_param: &staging_devnet_param - staging_devnet: - description: "job is being invoked from staging devnet workflow" - type: boolean - default: false - -jobs: - build_macos: - macos: - xcode: "10.0.0" - working_directory: ~/go/src/github.com/filecoin-project/go-filecoin - resource_class: large - steps: - - run: - name: Configure environment variables - command: | - echo 'export PATH="/usr/local/go/bin:${PATH}:${HOME}/go/bin:${HOME}/.bin"' >> $BASH_ENV - echo 'export FIL_PROOFS_PARAMETER_CACHE="${HOME}/filecoin-proof-parameters/"' >> $BASH_ENV - echo 'source $BASH_ENV' >> $HOME/.bashrc - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - - checkout - - update_submodules - - generate_rust_submodules_checksums - - restore_cache: - key: v9-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/go-filecoin/build/main.go" }}-{{ checksum "~/go/src/github.com/filecoin-project/go-filecoin/go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - # The second checkout ensures we have the most recent code since the - # restore_cache step above can override the go-filecoin code with cached code - - git_fetch_all_tags - - checkout - - restore_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - run: - name: Install go - command: | - curl -O https://dl.google.com/go/go1.13.4.darwin-amd64.pkg && \ - sudo installer -pkg go1.13.4.darwin-amd64.pkg -target / - - run: - name: Install pkg-config - command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config - - run: go version - - run: - name: Install Rust - command: | - curl https://sh.rustup.rs -sSf | sh -s -- -y - - run: - name: Install jq - command: | - mkdir $HOME/.bin - curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output $HOME/.bin/jq - chmod +x $HOME/.bin/jq - - run: - name: Load submodules - command: git submodule update --init --recursive - - go_build: - cmd: "deps" - no_output_timeout: "60m" - # groth parameters are generated by the paramcache binary, run as part - # of both deps and smartdeps commands - - save_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - paths: - - "~/filecoin-proof-parameters/" - - save_cache: - key: v9-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/go-filecoin/build/main.go" }}-{{ checksum "~/go/src/github.com/filecoin-project/go-filecoin/go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - paths: - - "~/go/pkg" - - "~/go/src/gx" - - "~/go/src/github.com" - - "~/go/src/golang.org" - - "~/.rustup" - - "~/.cargo" - - go_build: - cmd: "build" - no_output_timeout: "60m" - # place the filecoin binary in PATH for IPTB - - go_build: - cmd: "install" - # rebuild as the previous "install" step will move the binary and some tests - # still expect that it be in the repo dir - - go_build: - cmd: "build" - - mkdir_test_results - - go_test - - run: - name: Create macos bundle - command: ./scripts/build-bundle.sh - - store_artifacts: - path: "~/go/src/github.com/filecoin-project/go-filecoin/bundle/" - destination: bundle - - store_test_results: - path: test-results - - persist_to_workspace: - root: "." - paths: - - "bundle/" - - deps_linux: - docker: - - image: circleci/golang:1.13.1-stretch - working_directory: /go/src/github.com/filecoin-project/go-filecoin - resource_class: xlarge +commands: + setup_environment: steps: - - linux_configure - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - checkout - - run: - name: Check if any bash script sets xtrace - command: | - grep -r '^set\ \-\w*[x]\w*' . && exit 1 || exit 0 - - update_submodules - # Save the Git SHA of the rust-fil-proofs submodule so that we can use it when creating a cache key - - generate_rust_submodules_checksums - - restore_cache: - keys: - - v9-go-deps-{{ .Branch }}-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - v9-go-deps-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - # The second checkout ensures we have the most recent code since the - # restore_cache step above can override the go-filecoin code with cached code - - git_fetch_all_tags - - checkout - - restore_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - run: - name: Install Rust toolchain (for rust-fil-proofs) - command: | - (sudo apt-get update && sudo apt-get install -y clang libssl-dev && which cargo && which rustc) || (curl https://sh.rustup.rs -sSf | sh -s -- -y) - - go_build: - cmd: "deps" - no_output_timeout: "60m" - # groth parameters are generated by the paramcache binary, run as part - # of both deps and smartdeps commands - - save_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - paths: - - "/home/circleci/filecoin-proof-parameters/" - - save_cache: - key: v9-go-deps-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - paths: - - "/go/pkg" - - "/go/src/gx" - - "/go/src/github.com" - - "/go/src/golang.org" - - "/home/circleci/.rustup" - - "/home/circleci/.cargo" - - save_cache: - key: v9-go-deps-{{ .Branch }}-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - paths: - - "/go/pkg" - - "/go/src/gx" - - "/go/src/github.com" - - "/go/src/golang.org" - - "/home/circleci/.rustup" - - "/home/circleci/.cargo" - - build_linux: - docker: - - image: circleci/golang:1.13.1-stretch - working_directory: /go/src/github.com/filecoin-project/go-filecoin - resource_class: xlarge + - run: | + git submodule sync + git submodule update --init --recursive + go mod tidy + sudo apt-get update + sudo apt install python-is-python3 ocl-icd-opencl-dev libhwloc-dev + echo '-->>>' branch:$(git symbolic-ref --short HEAD), commit:$(git describe --always --match=NeVeRmAtCh --dirty) '\<<<--' + make deps + test: + description: | + Run tests with gotestsum. parameters: - <<: *nightly_param + target: + type: string + description: Import paths of packages to be tested. + coverage: + type: string + default: -coverprofile=coverage.txt -coverpkg=./... + description: Coverage flag. Set to the empty string to disable. + go-test-flags: + type: string + default: "-timeout 30m" + description: Flags passed to go test. + suite: + type: string + default: unit + description: Test suite name to report to CircleCI. + gotestsum-format: + type: string + default: standard-verbose + description: gotestsum format. https://github.com/gotestyourself/gotestsum#format + codecov-upload: + type: boolean + default: false + description: | + Upload coverage report to https://codecov.io/. Requires the codecov API token to be + set as an environment variable for private projects. + display-name: + type: string + default: unit test steps: - - linux_configure - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - - checkout - - update_submodules - # Save the Git SHA of the rust-fil-proofs submodule so that we can use it when creating a cache key - - generate_rust_submodules_checksums - - restore_cache: - keys: - - v9-go-deps-{{ .Branch }}-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - v9-go-deps-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - # The second checkout ensures we have the most recent code since the - # restore_cache step above can override the go-filecoin code with cached code - - git_fetch_all_tags - - checkout - - restore_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - go_build: - cmd: "lint" - - go_build: - cmd: "build" - # place the filecoin binary in PATH for IPTB - - go_build: - cmd: "install" - # rebuild as the previous "install" step will move the binary and some tests - # still expect that it be in the repo dir - - go_build: - cmd: "build" - run: - name: Create linux bundle - command: ./scripts/build-bundle.sh + name: << parameters.display-name >> + command: | + mkdir -p /tmp/test-reports/<< parameters.suite >> + mkdir -p /tmp/test-artifacts + gotestsum \ + --format << parameters.gotestsum-format >> \ + --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ + --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \ + -- \ + << parameters.coverage >> \ + << parameters.go-test-flags >> \ + << parameters.target >> + no_output_timeout: 30m - store_artifacts: - path: "/go/src/github.com/filecoin-project/go-filecoin/bundle/" - destination: bundle + path: /tmp/test-artifacts/<< parameters.suite >>.json - when: - condition: << parameters.nightly >> + condition: << parameters.codecov-upload >> steps: - - create_nightly_version - - persist_to_workspace: - root: "." - paths: - - "bundle/" - - "tools/gengen/gengen" - - "fixtures" - - "go-filecoin" - - "tools" - - unit_test_linux: - docker: - - image: circleci/golang:1.13.1-stretch - parallelism: 2 # Check .codecov.yml "after_n_builds" if changing this - working_directory: /go/src/github.com/filecoin-project/go-filecoin - resource_class: xlarge - steps: - - linux_configure - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - - checkout - - update_submodules - # Save the Git SHA of the rust-fil-proofs submodule so that we can use it when creating a cache key - - generate_rust_submodules_checksums - - restore_cache: - keys: - - v9-go-deps-{{ .Branch }}-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - v9-go-deps-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - # The second checkout ensures we have the most recent code since the - # restore_cache step above can override the go-filecoin code with cached code - - git_fetch_all_tags - - checkout - - restore_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - mkdir_test_results - - go_test: - functional: false - integration: false - - store_test_results: - path: test-results + - run: + shell: /bin/bash -eo pipefail + command: | + bash <(curl -s https://codecov.io/bash) - integration_test_linux: +executors: + golang: docker: - - image: circleci/golang:1.13.1-stretch - parallelism: 2 # Check .codecov.yml "after_n_builds" if changing this - working_directory: /go/src/github.com/filecoin-project/go-filecoin - resource_class: xlarge - steps: - - linux_configure - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - - checkout - - update_submodules - # Save the Git SHA of the rust-fil-proofs submodule so that we can use it when creating a cache key - - generate_rust_submodules_checksums - - restore_cache: - keys: - - v9-go-deps-{{ .Branch }}-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - v9-go-deps-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - # The second checkout ensures we have the most recent code since the - # restore_cache step above can override the go-filecoin code with cached code - - git_fetch_all_tags - - checkout - - restore_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - attach_workspace: - at: "." - - mkdir_test_results - - go_test: - unit: false - integration: true - - store_test_results: - path: test-results + - image: cimg/go:1.18.1 - functional_test_linux: - docker: - - image: circleci/golang:1.13.1-stretch - parallelism: 2 - working_directory: /go/src/github.com/filecoin-project/go-filecoin - resource_class: xlarge - parameters: - <<: *sector_builder_tests_param - steps: - - linux_configure - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - - checkout - - update_submodules - # Save the Git SHA of the rust-fil-proofs submodule so that we can use it when creating a cache key - - generate_rust_submodules_checksums - - restore_cache: - keys: - - v9-go-deps-{{ .Branch }}-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - v9-go-deps-{{ arch }}-{{ checksum "build/main.go" }}-{{ checksum "go.mod" }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - # The second checkout ensures we have the most recent code since the - # restore_cache step above can override the go-filecoin code with cached code - - git_fetch_all_tags - - checkout - - restore_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - attach_workspace: - at: "." - - mkdir_test_results - - go_test: - sector_builder_tests: "<< parameters.sector_builder_tests >>" - unit: false - integration: false - functional: true +jobs: + test_all: + executor: golang + steps: + - setup_environment + - test: + display-name: unit_test_chain + suite: "unit_test_chain" + target: "./pkg/chain/..." + - test: + display-name: unit_test_beacon + suite: "unit_test_beacon" + target: "./pkg/beacon/..." + - test: + display-name: unit_test_chainsync + suite: "unit_test_chainsync" + target: "./pkg/chainsync/..." + - test: + display-name: unit_test_clock + suite: "unit_test_clock" + target: "./pkg/clock/..." + - test: + display-name: unit_test_consensus + suite: "unit_test_consensus" + target: "./pkg/consensus/..." + - test: + display-name: unit_test_crypto + suite: "unit_test_crypto" + target: "./pkg/crypto/..." + - test: + display-name: unit_test_market + suite: "unit_test_market" + target: "./pkg/market/..." + - test: + display-name: unit_test_messagepool + suite: "unit_test_messagepool" + target: "./pkg/messagepool/..." + - test: + display-name: unit_test_net + suite: "unit_test_net" + target: "./pkg/net/..." + - test: + display-name: unit_test_paychmgr + suite: "unit_test_paychmgr" + target: "./pkg/paychmgr/..." + - test: + display-name: unit_test_repo + suite: "unit_test_repo" + target: "./pkg/repo/..." + - test: + display-name: unit_test_state + suite: "unit_test_state" + target: "./pkg/state/..." + - test: + display-name: unit_test_wallet + suite: "unit_test_wallet" + target: "./pkg/wallet/..." - store_test_results: - path: test-results - - publish_release: - docker: - - image: circleci/golang:1.13.1-stretch - resource_class: small - parameters: - <<: *nightly_param - steps: - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - - setup_remote_docker: - docker_layer_caching: true - - checkout - - git_fetch_all_tags - - checkout - - attach_workspace: - at: "." - - when: - condition: << parameters.nightly >> - steps: - - get_nightly_version - - run: - name: Publish new release - command: | - ./scripts/publish-release.sh - - build_docker_img: - docker: - - image: circleci/golang:1.13.1-stretch - resource_class: xlarge - parameters: - <<: *nightly_param - <<: *user_devnet_param - <<: *staging_devnet_param - working_directory: "~/docker_build" - steps: - - add_ssh_keys: - fingerprints: - - "1e:73:c5:15:75:e0:e4:98:54:3c:2b:9e:e8:94:14:2e" - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Install AWS CLI - command: | - sudo apt-get update - sudo apt-get install -y python-pip libyaml-dev python-dev jq - sudo pip install awscli - - run: - name: login to ECR - command: | - export AWS_ACCESS_KEY_ID=$AWS_ECR_ACCESS_KEY_ID - export AWS_SECRET_ACCESS_KEY=$AWS_ECR_SECRET_ACCESS_KEY - eval $(aws --region us-east-1 ecr --no-include-email get-login) - - checkout - - update_submodules - - generate_rust_submodules_checksums - - attach_workspace: - at: "." - - restore_cache: - key: v26.0-proof-params-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - run: - name: build a base image - command: | - docker build -t filecoin:all --target=base --file ./docker/Dockerfile.ci.base . - no_output_timeout: 20m - - when: - condition: << parameters.nightly >> - steps: - - get_nightly_version - - when: - condition: << parameters.user_devnet >> - steps: - - get_user_devnet_version - - when: - condition: << parameters.staging_devnet >> - steps: - - get_staging_devnet_version - - run: - name: build & push image - genesis file server - command: | - export SHORT_GIT_SHA=$(echo $CIRCLE_SHA1 | cut -c -6) - docker build -f ./docker/Dockerfile.ci.genesis --label "version=$SHORT_GIT_SHA" -t 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:$SHORT_GIT_SHA --cache-from filecoin:all . - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:$SHORT_GIT_SHA - docker tag 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:$SHORT_GIT_SHA 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:latest - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:latest - if [[ ! -z "$FILECOIN_BINARY_VERSION" ]]; then - docker tag 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:$SHORT_GIT_SHA 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:${FILECOIN_BINARY_VERSION} - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-genesis-file-server:${FILECOIN_BINARY_VERSION} - fi - - run: - name: build & push image - faucet - command: | - export SHORT_GIT_SHA=$(echo $CIRCLE_SHA1 | cut -c -6) - docker build -f ./docker/Dockerfile.ci.faucet --label "version=$SHORT_GIT_SHA" -t 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:$SHORT_GIT_SHA --cache-from filecoin:all . - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:$SHORT_GIT_SHA - docker tag 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:$SHORT_GIT_SHA 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:latest - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:latest - if [[ ! -z "$FILECOIN_BINARY_VERSION" ]]; then - docker tag 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:$SHORT_GIT_SHA 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:${FILECOIN_BINARY_VERSION} - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin-faucet:${FILECOIN_BINARY_VERSION} - fi - - restore_cache: - key: v26.0-proof-params-large-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - - run: - name: build & push image - filecoin - command: | - export SHORT_GIT_SHA=$(echo $CIRCLE_SHA1 | cut -c -6) - export ARTIFACT_TAG="${CIRCLE_TAG:-$SHORT_GIT_SHA}" - export FIL_PROOFS_PARAMETER_CACHE="./filecoin-proof-parameters" - tar -xf "bundle/filecoin-$ARTIFACT_TAG-Linux.tar.gz" - ./filecoin/paramcache - docker build -f ./docker/Dockerfile.ci.filecoin --label "version=$SHORT_GIT_SHA" -t 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:$SHORT_GIT_SHA --cache-from filecoin:all . - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:$SHORT_GIT_SHA - docker tag 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:$SHORT_GIT_SHA 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:latest - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:latest - if [[ ! -z "$FILECOIN_BINARY_VERSION" ]]; then - docker tag 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:$SHORT_GIT_SHA 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:${FILECOIN_BINARY_VERSION} - docker push 657871693752.dkr.ecr.us-east-1.amazonaws.com/filecoin:${FILECOIN_BINARY_VERSION} - fi - - save_cache: - key: v26.0-proof-params-large-{{ arch }}-{{ checksum "/tmp/filecoin-ffi-checksum.txt" }} - paths: - - "./filecoin-proof-parameters/" - - trigger_nightly_devnet_deploy: - docker: - - image: circleci/golang:1.13.1-stretch - resource_class: small - steps: - - setup_remote_docker - - checkout - - attach_workspace: - at: "." - - get_nightly_version - # The -f flag is require to override the local tag (may exist from previous runs or during checkout) - # and the force flag on push is required as we are going to be overrride the tag which is not allowed by default - # We currently create annotated flags to keep track of the timestamp when the tag was created - - run: - name: create & push git tag - command: | - git config user.email dev-helper@filecoin.io - git config user.name filecoin-helper - git tag -f -a ${FILECOIN_BINARY_VERSION} -m "$(date -u '+%Y-%m-%dT%H:%M:%S%z')" - git push -f https://${GITHUB_TOKEN}@github.com/filecoin-project/go-filecoin.git ${FILECOIN_BINARY_VERSION} - - trigger_infra_build: - job: deploy_nightly_devnet - branch: filecoin-nightly - - update_badge: - filename: "nightly-devnet.json" - - run: - name: cleanup nightly releases - command: ./tools/prerelease-tool/prerelease-tool - - - trigger_devnet_deploy: - parameters: - network: - type: string - default: staging - <<: *user_devnet_param - <<: *staging_devnet_param - docker: - - image: circleci/golang:1.13.1-stretch - resource_class: small - steps: - - checkout - - when: - condition: << parameters.user_devnet >> - steps: - - get_user_devnet_version - - when: - condition: << parameters.staging_devnet>> - steps: - - get_staging_devnet_version - - trigger_infra_build: - job: deploy_<< parameters.network >>_devnet - branch: filecoin-<< parameters.network >>net - - update_badge: - filename: "<< parameters.network >>-devnet.json" - -filters: - master_filter: &master_filter - filters: - branches: - only: - - master - staging_devnet_filter: &staging_devnet_filter - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^staging\-devnet\-\d+\.\d+\.\d+$/ - user_devnet_filter: &user_devnet_filter - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^\d+\.\d+\.\d+$/ + path: /tmp/test-reports workflows: - version: 2 - test_all: + ci: jobs: - - deps_linux - - build_linux: - requires: - - deps_linux - - unit_test_linux: - requires: - - deps_linux - - integration_test_linux: - requires: - - build_linux - - functional_test_linux: - requires: - - build_linux - - build_nightly_osx: - triggers: - - schedule: - # every day at 6:00 UTC - cron: "0 6 * * *" - <<: *master_filter - jobs: - - build_macos - - run_sector_builder_tests: - triggers: - - schedule: - cron: "0 0,6,12,18 * * *" - <<: *master_filter - jobs: - - deps_linux - - build_linux: - requires: - - deps_linux - - functional_test_linux: - sector_builder_tests: true - requires: - - build_linux - - build_nightly_devnet: - triggers: - - schedule: - # every day at 6:00 UTC - cron: "0 6 * * *" - <<: *master_filter - jobs: - - deps_linux - - build_linux: - nightly: true - requires: - - deps_linux - - unit_test_linux: - requires: - - deps_linux - - integration_test_linux: - requires: - - build_linux - - functional_test_linux: - requires: - - build_linux - - build_macos - - build_docker_img: - nightly: true - requires: - - build_linux - - unit_test_linux - - integration_test_linux - - functional_test_linux - - publish_release: - nightly: true - requires: - - build_linux - - build_macos - - trigger_nightly_devnet_deploy - - trigger_nightly_devnet_deploy: - requires: - - build_docker_img - - build_user_devnet: - jobs: - - build_macos: - <<: *user_devnet_filter - - deps_linux: - <<: *user_devnet_filter - - build_linux: - requires: - - deps_linux - <<: *user_devnet_filter - - unit_test_linux: - requires: - - deps_linux - <<: *user_devnet_filter - - integration_test_linux: - requires: - - build_linux - <<: *user_devnet_filter - - functional_test_linux: - requires: - - build_linux - <<: *user_devnet_filter - - publish_release: - requires: - - build_linux - - unit_test_linux - - integration_test_linux - - functional_test_linux - - build_macos - <<: *user_devnet_filter - - build_docker_img: - user_devnet: true - requires: - - build_linux - - unit_test_linux - - integration_test_linux - - functional_test_linux - <<: *user_devnet_filter - - approve_deploy: - type: approval - requires: - - build_docker_img - <<: *user_devnet_filter - - trigger_devnet_deploy: - user_devnet: true - network: "user" - requires: - - approve_deploy - <<: *user_devnet_filter - - build_staging_devnet: - jobs: - - build_macos: - <<: *staging_devnet_filter - - deps_linux: - <<: *staging_devnet_filter - - build_linux: - requires: - - deps_linux - <<: *staging_devnet_filter - - unit_test_linux: - requires: - - deps_linux - <<: *staging_devnet_filter - - integration_test_linux: - requires: - - build_linux - <<: *staging_devnet_filter - - functional_test_linux: - requires: - - build_linux - <<: *staging_devnet_filter - - publish_release: - requires: - - build_linux - - unit_test_linux - - integration_test_linux - - functional_test_linux - - build_macos - <<: *staging_devnet_filter - - build_docker_img: - staging_devnet: true - requires: - - build_linux - - unit_test_linux - - integration_test_linux - - functional_test_linux - <<: *staging_devnet_filter - - trigger_devnet_deploy: - staging_devnet: true - network: "staging" - requires: - - build_docker_img - <<: *staging_devnet_filter - -commands: - create_nightly_version: - steps: - - run: - name: create and export nightly FILECOIN_BINARY_VERSION - command: | - echo "nightly-${CIRCLE_BUILD_NUM}-$(echo $CIRCLE_SHA1 | cut -c -6)" > release-version-nightly.txt - - persist_to_workspace: - root: "." - paths: - - "release-version-nightly.txt" - get_nightly_version: - steps: - - run: - name: read and export nightly FILECOIN_BINARY_VERSION - command: | - echo "export FILECOIN_BINARY_VERSION="$(cat release-version-nightly.txt)"" >> $BASH_ENV - get_staging_devnet_version: - steps: - - run: - name: read and export staging devnet FILECOIN_BINARY_VERSION - command: | - echo "export FILECOIN_BINARY_VERSION="${CIRCLE_TAG}"" >> $BASH_ENV - get_user_devnet_version: - steps: - - run: - name: read and export user devnet FILECOIN_BINARY_VERSION - command: | - echo "export FILECOIN_BINARY_VERSION="${CIRCLE_TAG}"" >> $BASH_ENV - git_fetch_all_tags: - steps: - - run: - name: fetch all tags - command: | - git fetch --all - go_build: - parameters: - cmd: - type: string - no_output_timeout: - type: string - default: "10m" - steps: - - run: - command: go run ./build/*.go << parameters.cmd >> - no_output_timeout: << parameters.no_output_timeout >> - go_test: - parameters: - <<: *sector_builder_tests_param - unit: - type: boolean - default: true - integration: - type: boolean - default: true - functional: - type: boolean - default: false - steps: - - run: - name: Test - no_output_timeout: 30m - command: | - trap "go run github.com/jstemmer/go-junit-report < test-results/go-test-suite/go-test.out > test-results/go-test-suite/go-test-report.xml" EXIT - export TEST_PACKAGES="$(go list ./... | circleci tests split)" - # Parallelism and timeout set to support medium-class containers, for builds on forked repos. - go run ./build test -cover -coverprofile coverage.out -covermode=atomic -timeout=30m -parallel=4 -functional=<< parameters.functional >> -integration=<< parameters.integration >> -sectorbuilder=<< parameters.sector_builder_tests >> -unit=<< parameters.unit >> -v 2>&1 | tee test-results/go-test-suite/go-test.out - mkdir -p /tmp/artifacts - mv coverage.out /tmp/artifacts/coverage.out - - codecov/upload: - file: /tmp/artifacts/coverage.out - linux_configure: - steps: - - run: - name: Install OpenCL, a build-time requirement of libfilecoin - command: | - sudo apt-get update - sudo apt-get install ocl-icd-opencl-dev - - run: - name: Configure environment variables - command: | - echo 'export FIL_PROOFS_PARAMETER_CACHE="${HOME}/filecoin-proof-parameters/"' >> $BASH_ENV - echo 'source $BASH_ENV' >> $HOME/.bashrc - mkdir_test_results: - steps: - - run: - name: Create directories for test results - command: mkdir -p test-results/go-test-suite - generate_rust_submodules_checksums: - steps: - - run: - name: generate filecoin-ffi checksum (used as key for Groth parameters) - command: git rev-parse @:./vendors/filecoin-ffi > /tmp/filecoin-ffi-checksum.txt - trigger_infra_build: - parameters: - branch: - type: string - job: - type: string - steps: - - run: - name: Trigger a build in go-filecoin-infra - command: | - sudo apt-get install -y curl - # FILECOIN_BINARY_VERSION must be set in BASH_ENV of job calling this command - curl -d build_parameters[FILECOIN_BINARY_VERSION]=${FILECOIN_BINARY_VERSION} -d build_parameters[CIRCLE_JOB]=<< parameters.job >> https://circleci.com/api/v1.1/project/github/filecoin-project/go-filecoin-infra/tree/<< parameters.branch >>?circle-token=$CIRCLE_API_TOKEN - update_badge: - parameters: - filename: - description: "filename of badge to update" - type: string - steps: - - run: - name: install jq and git - command: | - sudo apt-get update - sudo apt-get install -y jq git - - run: - name: Update badge << parameters.filename >> - command: | - ./scripts/update-badge.sh << parameters.filename >> - update_submodules: - steps: - - run: - name: Update submodules - command: git submodule update --init --recursive + - test_all diff --git a/.codecov.yml b/.codecov.yml index a772f7e6a9..b2f855a0c7 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -26,7 +26,3 @@ codecov: # yes: will delay sending notifications until all ci is finished # no: will send notifications without checking ci status and wait till "after_n_builds" are uploaded require_ci_to_pass: false - # number of expected builds to receive before sending notifications - # we can set this to prevent notifications of partial results due to CI parallelism - # set this with respect to the sum of test parallelism in Circle CI configuration. - after_n_builds: 6 diff --git a/.docker/Dockerfile b/.docker/Dockerfile deleted file mode 100644 index 90547b73ef..0000000000 --- a/.docker/Dockerfile +++ /dev/null @@ -1,87 +0,0 @@ -FROM golang:1.12.1-stretch AS builder -MAINTAINER Filecoin Dev Team - -RUN apt-get update && apt-get install -y ca-certificates file sudo clang jq -RUN curl -sSf https://sh.rustup.rs | sh -s -- -y - -# This docker file is a modified version of -# https://github.com/ipfs/go-ipfs/blob/master/Dockerfile -# Thanks Lars :) - -ENV SRC_DIR /go/src/github.com/filecoin-project/go-filecoin -ENV GO111MODULE on -ARG FIL_PROOFS_PARAMETER_CACHE="./tmp/filecoin-proof-parameters" -ARG FILECOIN_USE_PRECOMPILED_RUST_PROOFS=yes -ARG FILECOIN_USE_PRECOMPILED_BLS_SIGNATURES=yes - -COPY . $SRC_DIR - -# Build the thing. -RUN cd $SRC_DIR \ - && . $HOME/.cargo/env \ - && git submodule update --init --recursive \ - && go run ./build/*go deps \ - && go run ./build/*go build - -# Get su-exec, a very minimal tool for dropping privileges, -# and tini, a very minimal init daemon for containers -ENV SUEXEC_VERSION v0.2 -ENV TINI_VERSION v0.16.1 -RUN set -x \ - && cd /tmp \ - && git clone https://github.com/ncopa/su-exec.git \ - && cd su-exec \ - && git checkout -q $SUEXEC_VERSION \ - && make \ - && cd /tmp \ - && wget -q -O tini https://github.com/krallin/tini/releases/download/$TINI_VERSION/tini \ - && chmod +x tini - - -# Now comes the actual target image, which aims to be as small as possible. -FROM busybox:1.30.1-glibc AS filecoin -MAINTAINER Filecoin Dev Team - -# Get the filecoin binary, entrypoint script, and TLS CAs from the build container. -ENV SRC_DIR /go/src/github.com/filecoin-project/go-filecoin -COPY --from=builder $SRC_DIR/tmp/filecoin-proof-parameters/* /tmp/filecoin-proof-parameters/ -COPY --from=builder $SRC_DIR/go-filecoin /usr/local/bin/go-filecoin -COPY --from=builder $SRC_DIR/bin/container_daemon /usr/local/bin/start_filecoin -COPY --from=builder $SRC_DIR/bin/devnet_start /usr/local/bin/devnet_start -COPY --from=builder $SRC_DIR/bin/node_restart /usr/local/bin/node_restart -COPY --from=builder $SRC_DIR/fixtures/ /data/ -COPY --from=builder $SRC_DIR/tools/gengen/gengen /usr/local/bin/gengen -COPY --from=builder /tmp/su-exec/su-exec /sbin/su-exec -COPY --from=builder /tmp/tini /sbin/tini -COPY --from=builder /etc/ssl/certs /etc/ssl/certs - -# This shared lib (part of glibc) doesn't seem to be included with busybox. -COPY --from=builder /lib/x86_64-linux-gnu/libdl-2.24.so /lib/libdl.so.2 -COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/librt.so.1 -COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/libgcc_s.so.1 -COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/libutil.so.1 - -# need jq for parsing genesis output -RUN wget -q -O /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 \ - && chmod +x /usr/local/bin/jq - -# Ports for Swarm and CmdAPI -EXPOSE 6000 -EXPOSE 3453 - -# Create the fs-repo directory and switch to a non-privileged user. -ENV FILECOIN_PATH /data/filecoin -RUN mkdir -p $FILECOIN_PATH \ - && adduser -D -h $FILECOIN_PATH -u 1000 -G users filecoin \ - && chown filecoin:users $FILECOIN_PATH - -# Expose the fs-repo as a volume. -# start_filecoin initializes an fs-repo if none is mounted. -# Important this happens after the USER directive so permission are correct. -VOLUME $FILECOIN_PATH - -# There's an fs-repo, and initializes one if there isn't. -ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start_filecoin"] - -# Execute the daemon subcommand by default -CMD ["daemon"] diff --git a/.docker/Dockerfile.ci.base b/.docker/Dockerfile.ci.base deleted file mode 100644 index ee7c2f167a..0000000000 --- a/.docker/Dockerfile.ci.base +++ /dev/null @@ -1,22 +0,0 @@ -FROM debian:stretch-20190204-slim AS base -MAINTAINER Filecoin Dev Team - -RUN apt-get update && apt-get install -y ca-certificates file sudo git build-essential wget - -# This docker file is a modified version of -# https://github.com/ipfs/go-ipfs/blob/master/.docker/Dockerfile -# Thanks Lars :) - -# Get su-exec, a very minimal tool for dropping privileges, -# and tini, a very minimal init daemon for containers -ENV SUEXEC_VERSION v0.2 -ENV TINI_VERSION v0.16.1 -RUN set -x \ -&& cd /tmp \ -&& git clone https://github.com/ncopa/su-exec.git \ -&& cd su-exec \ -&& git checkout -q $SUEXEC_VERSION \ -&& make \ -&& cd /tmp \ -&& wget -q -O tini https://github.com/krallin/tini/releases/download/$TINI_VERSION/tini \ -&& chmod +x tini diff --git a/.docker/Dockerfile.ci.faucet b/.docker/Dockerfile.ci.faucet deleted file mode 100644 index 6883912c90..0000000000 --- a/.docker/Dockerfile.ci.faucet +++ /dev/null @@ -1,19 +0,0 @@ -FROM busybox:1.30.1-glibc -MAINTAINER Filecoin Dev Team - -# Get the binary, entrypoint script, and TLS CAs from the build container. -COPY tools/faucet/faucet /usr/local/bin/faucet -COPY --from=filecoin:all /tmp/su-exec/su-exec /sbin/su-exec -COPY --from=filecoin:all /tmp/tini /sbin/tini -COPY --from=filecoin:all /etc/ssl/certs /etc/ssl/certs -COPY --from=filecoin:all /lib/x86_64-linux-gnu/libutil.so.1 /lib/libutil.so.1 -COPY --from=filecoin:all /lib/x86_64-linux-gnu/libdl-2.24.so /lib/libdl.so.2 -COPY --from=filecoin:all /lib/x86_64-linux-gnu/librt.so.1 /lib/librt.so.1 -COPY --from=filecoin:all /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/libgcc_s.so.1 - -RUN chmod +x /usr/local/bin/faucet - -EXPOSE 9797 - -# There's an fs-repo, and initializes one if there isn't. -ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/faucet"] diff --git a/.docker/Dockerfile.ci.filecoin b/.docker/Dockerfile.ci.filecoin deleted file mode 100644 index 195c84fe93..0000000000 --- a/.docker/Dockerfile.ci.filecoin +++ /dev/null @@ -1,46 +0,0 @@ -FROM busybox:1.30.1-glibc AS filecoin -MAINTAINER Filecoin Dev Team - -# Get the filecoin binary, entrypoint script, and TLS CAs from the build container. -ENV SRC_DIR /go/src/github.com/filecoin-project/go-filecoin -COPY filecoin-proof-parameters /var/tmp/filecoin-proof-parameters -COPY filecoin/go-filecoin /usr/local/bin/go-filecoin -COPY bin/container_daemon /usr/local/bin/start_filecoin -COPY bin/devnet_start /usr/local/bin/devnet_start -COPY bin/node_restart /usr/local/bin/node_restart -COPY fixtures/ /data/ -COPY tools/gengen/gengen /usr/local/bin/gengen -COPY --from=filecoin:all /tmp/su-exec/su-exec /sbin/su-exec -COPY --from=filecoin:all /tmp/tini /sbin/tini -COPY --from=filecoin:all /etc/ssl/certs /etc/ssl/certs - -# This shared lib (part of glibc) doesn't seem to be included with busybox. -COPY --from=filecoin:all /lib/x86_64-linux-gnu/libdl-2.24.so /lib/libdl.so.2 -COPY --from=filecoin:all /lib/x86_64-linux-gnu/librt.so.1 /lib/librt.so.1 -COPY --from=filecoin:all /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/libgcc_s.so.1 -COPY --from=filecoin:all /lib/x86_64-linux-gnu/libutil.so.1 /lib/libutil.so.1 - -# need jq for parsing genesis output -RUN wget -q -O /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 \ - && chmod +x /usr/local/bin/jq - -# Ports for Swarm and CmdAPI -EXPOSE 6000 -EXPOSE 3453 - -# Create the fs-repo directory and switch to a non-privileged user. -ENV FILECOIN_PATH /data/filecoin -RUN mkdir -p $FILECOIN_PATH \ -&& adduser -D -h $FILECOIN_PATH -u 1000 -G users filecoin \ -&& chown filecoin:users $FILECOIN_PATH - -# Expose the fs-repo as a volume. -# start_filecoin initializes an fs-repo if none is mounted. -# Important this happens after the USER directive so permission are correct. -VOLUME $FILECOIN_PATH - -# There's an fs-repo, and initializes one if there isn't. -ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start_filecoin"] - -# Execute the daemon subcommand by default -CMD ["daemon"] diff --git a/.docker/Dockerfile.ci.genesis b/.docker/Dockerfile.ci.genesis deleted file mode 100644 index 0a42de74d8..0000000000 --- a/.docker/Dockerfile.ci.genesis +++ /dev/null @@ -1,16 +0,0 @@ -FROM busybox:1.30.1-glibc -MAINTAINER Filecoin Dev Team - -# Get the binary, entrypoint script, and TLS CAs from the build container. -COPY tools/genesis-file-server/genesis-file-server /usr/local/bin/genesis-file-server -COPY fixtures/* /data/ -COPY --from=filecoin:all /tmp/su-exec/su-exec /sbin/su-exec -COPY --from=filecoin:all /tmp/tini /sbin/tini -COPY --from=filecoin:all /etc/ssl/certs /etc/ssl/certs - -RUN chmod +x /usr/local/bin/genesis-file-server - -EXPOSE 8080 - -# There's an fs-repo, and initializes one if there isn't. -ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/genesis-file-server", "-port=8080", "-genesis-file-path=/opt/genesis/genesis.car"] diff --git a/.docker/Dockerfile.faucet b/.docker/Dockerfile.faucet deleted file mode 100644 index 82254b6e8f..0000000000 --- a/.docker/Dockerfile.faucet +++ /dev/null @@ -1,59 +0,0 @@ -FROM golang:1.12.1-stretch -MAINTAINER Filecoin Dev Team - -RUN apt-get update && apt-get install -y ca-certificates file sudo clang jq -RUN curl -sSf https://sh.rustup.rs | sh -s -- -y - -# This docker file is a modified version of -# https://github.com/ipfs/go-ipfs/blob/master/.docker/Dockerfile -# Thanks Lars :) - -ENV SRC_DIR /go/src/github.com/filecoin-project/go-filecoin -ENV GO111MODULE on -ENV FILECOIN_USE_PRECOMPILED_RUST_PROOFS true -ENV FILECOIN_USE_PRECOMPILED_BLS_SIGNATURES true - -COPY . $SRC_DIR - -# Build faucet -RUN cd $SRC_DIR \ -&& . $HOME/.cargo/env \ -&& go run ./build/*go deps \ -&& go build -o ./faucet ./tools/faucet/main.go - -RUN cd - -# Get su-exec, a very minimal tool for dropping privileges, -# and tini, a very minimal init daemon for containers -ENV SUEXEC_VERSION v0.2 -ENV TINI_VERSION v0.16.1 -RUN set -x \ - && cd /tmp \ - && git clone https://github.com/ncopa/su-exec.git \ - && cd su-exec \ - && git checkout -q $SUEXEC_VERSION \ - && make \ - && cd /tmp \ - && wget -q -O tini https://github.com/krallin/tini/releases/download/$TINI_VERSION/tini \ - && chmod +x tini - - -# Now comes the actual target image, which aims to be as small as possible. -FROM busybox:1.30.1-glibc -MAINTAINER Filecoin Dev Team - -# Get the binary, entrypoint script, and TLS CAs from the build container. -ENV SRC_DIR /go/src/github.com/filecoin-project/go-filecoin -COPY --from=0 $SRC_DIR/faucet /usr/local/bin/faucet -COPY --from=0 /tmp/su-exec/su-exec /sbin/su-exec -COPY --from=0 /tmp/tini /sbin/tini -COPY --from=0 /etc/ssl/certs /etc/ssl/certs -COPY --from=0 /lib/x86_64-linux-gnu/libutil.so.1 /lib/libutil.so.1 -COPY --from=0 /lib/x86_64-linux-gnu/libdl-2.24.so /lib/libdl.so.2 -COPY --from=0 /lib/x86_64-linux-gnu/librt.so.1 /lib/librt.so.1 -COPY --from=0 /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/libgcc_s.so.1 - -EXPOSE 9797 - -# There's an fs-repo, and initializes one if there isn't. -ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/faucet"] diff --git a/.docker/Dockerfile.genesis b/.docker/Dockerfile.genesis deleted file mode 100644 index 072dd96cbb..0000000000 --- a/.docker/Dockerfile.genesis +++ /dev/null @@ -1,52 +0,0 @@ -FROM golang:1.12.1-stretch -MAINTAINER Filecoin Dev Team - -RUN apt-get update && apt-get install -y ca-certificates file sudo clang -RUN curl -sSf https://sh.rustup.rs | sh -s -- -y - -# This docker file is a modified version of -# https://github.com/ipfs/go-ipfs/blob/master/.docker/Dockerfile -# Thanks Lars :) - -ENV SRC_DIR /go/src/github.com/filecoin-project/go-filecoin -ENV GO111MODULE on - -COPY . $SRC_DIR - -# Build genesis server -RUN cd $SRC_DIR \ -&& . $HOME/.cargo/env \ -&& go build -o ./genesis-file-server ./tools/genesis-file-server/main.go - -RUN cd - -# Get su-exec, a very minimal tool for dropping privileges, -# and tini, a very minimal init daemon for containers -ENV SUEXEC_VERSION v0.2 -ENV TINI_VERSION v0.16.1 -RUN set -x \ - && cd /tmp \ - && git clone https://github.com/ncopa/su-exec.git \ - && cd su-exec \ - && git checkout -q $SUEXEC_VERSION \ - && make \ - && cd /tmp \ - && wget -q -O tini https://github.com/krallin/tini/releases/download/$TINI_VERSION/tini \ - && chmod +x tini - - -# Now comes the actual target image, which aims to be as small as possible. -FROM busybox:1.30.1-glibc -MAINTAINER Filecoin Dev Team - -# Get the binary, entrypoint script, and TLS CAs from the build container. -ENV SRC_DIR /go/src/github.com/filecoin-project/go-filecoin -COPY --from=0 $SRC_DIR/genesis-file-server /usr/local/bin/genesis-file-server -COPY --from=0 /tmp/su-exec/su-exec /sbin/su-exec -COPY --from=0 /tmp/tini /sbin/tini -COPY --from=0 /etc/ssl/certs /etc/ssl/certs - -EXPOSE 8080 - -# There's an fs-repo, and initializes one if there isn't. -ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/genesis-file-server", "-port=8080", "-genesis-file-path=/opt/genesis/genesis.car"] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 5d55ce3b40..0000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -labels: C-bug, candidate -assignees: '' - ---- - - -**Describe the bug**: - -**Expected behavior**: - -**Diagnostic information:** - - -- Filecoin Version: -- Filecoin Inspect Output: -- Initialization Command: diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000..e31e2c457e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,95 @@ +name: "Bug Report" +description: "报告 bug / File a bug report to help us improve" +labels: [C-bug] +body: +- type: checkboxes + attributes: + label: 链服务模块 / Chain Service Components + description: | + 选择涉及模块。 + Please select the related components. + options: + - label: venus + required: false + - label: venus-auth + required: false + - label: venus-gateway + required: false + - label: venus-messager + required: false + - label: venus-miner + required: false + - label: 文档 / docs + required: false +- type: checkboxes + attributes: + label: 订单服务模块 / Deal Service Components + description: | + 选择涉及模块。 + Please select the related components. + options: + - label: venus-market + required: false + - label: 文档 / docs + required: false +- type: checkboxes + attributes: + label: 算力服务模块 / Storage Power Service Components + description: | + 选择涉及模块。 + Please select the related components. + options: + - label: venus-sector-manager + required: false + - label: venus-worker + required: false + - label: 文档 / docs + required: false +- type: textarea + id: version + attributes: + label: 版本 / Version + render: text + description: | + 填写组件的版本。 + Enter version of the component if applicable. + placeholder: | + e.g. + { "Version": "1.6.0+git.3652863fa.dirty" } + venus message version v1.6.0--58feea4 + validations: + required: true +- type: textarea + id: description + attributes: + label: 描述 / Describe the Bug + description: | + 填写你所遇到的崩溃、异常信息或你认为与预期结果不符的行为。 + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + validations: + required: true +- type: textarea + id: logging + attributes: + label: 日志 / Logging Information + render: text + description: | + 填写可以帮助定位问题的日志信息。 + Please provide debug logs of the problem. + If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem. + validations: + required: true +- type: textarea + id: reproduce + attributes: + label: 重现步骤 / Repo Steps + description: | + 能够重现问题的步骤。 + Steps to reproduce the behavior. + placeholder: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/enhancement.yml b/.github/ISSUE_TEMPLATE/enhancement.yml new file mode 100644 index 0000000000..43afec7097 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.yml @@ -0,0 +1,56 @@ +name: 功能特性 / Enhancement +description: "提议新功能,或改善已有功能 / New feature request or enhancement suggestion" +labels: [C-enhancement] +body: +- type: checkboxes + attributes: + label: 链服务模块 / Chain Service Components + description: | + 选择涉及模块。 + Please select the related components. + options: + - label: venus + required: false + - label: venus-auth + required: false + - label: venus-gateway + required: false + - label: venus-messager + required: false + - label: venus-miner + required: false + - label: 文档 / docs + required: false +- type: checkboxes + attributes: + label: 订单服务模块 / Deal Service Components + description: | + 选择涉及模块。 + Please select the related components. + options: + - label: venus-market + required: false + - label: 文档 / docs + required: false +- type: checkboxes + attributes: + label: 算力服务模块 / Storage Power Service Components + description: | + 选择涉及模块。 + Please select the related components. + options: + - label: venus-sector-manager + required: false + - label: venus-worker + required: false + - label: 文档 / docs + required: false +- type: textarea + id: description + attributes: + label: 描述 / Description + placeholder: | + 是否可以考虑... + I suggest ... + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/new_story.md b/.github/ISSUE_TEMPLATE/new_story.md deleted file mode 100644 index 974e9d305c..0000000000 --- a/.github/ISSUE_TEMPLATE/new_story.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -name: New story -about: Create a new story. Typically created after design intent is written down and - subjected to constructive feedback (see CONTRIBUTING.md). -title: '' -labels: candidate -assignees: '' - ---- -### Description -Please first see README for how to get help before filing a new issue. - -### Acceptance criteria - -### Risks + pitfalls - -### Where to begin diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 6e5819703f..8bad546177 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,9 +1,24 @@ -### Motivation +## 关联的Issues (Related Issues) + + +close -### Proposed changes +## 改动 (Proposed Changes) + + -Closes # - +## 附注 (Additional Info) + + +## 自查清单 (Checklist) + +在你认为本 PR 满足被审阅的标准之前,需要确保 / Before you mark the PR ready for review, please make sure that: +- [ ] 符合Venus项目管理规范中关于PR的[相关标准](https://github.com/ipfs-force-community/dev-guidances/blob/master/%E9%A1%B9%E7%9B%AE%E7%AE%A1%E7%90%86/Venus/PR%E5%91%BD%E5%90%8D%E8%A7%84%E8%8C%83.md) / The PR follows the PR standards set out in the Venus project management guidelines +- [ ] 具有清晰明确的[commit message](https://github.com/ipfs-force-community/dev-guidances/blob/master/%E8%B4%A8%E9%87%8F%E7%AE%A1%E7%90%86/%E4%BB%A3%E7%A0%81/git%E4%BD%BF%E7%94%A8/commit-message%E9%A3%8E%E6%A0%BC%E8%A7%84%E8%8C%83.md) / All commits have a clear commit message. +- [ ] 包含相关的的[测试用例](https://github.com/ipfs-force-community/dev-guidances/blob/master/%E8%B4%A8%E9%87%8F%E7%AE%A1%E7%90%86/%E4%BB%A3%E7%A0%81/%E4%BB%A3%E7%A0%81%E5%BA%93/%E6%A3%80%E6%9F%A5%E9%A1%B9/%E5%8D%95%E5%85%83%E6%B5%8B%E8%AF%95.md)或者不需要新增测试用例 / This PR has tests for new functionality or change in behaviour or not need to add new tests. +- [ ] 存在兼容性问题(接口, 配置,数据,灰度),如果存在需要进行文档说明 / This PR has compatibility issues (API, Configuration, Data, GrayRelease), if so, need to be documented. +- [ ] 包含相关的的指南以及[文档](https://github.com/ipfs-force-community/dev-guidances/tree/master/%E8%B4%A8%E9%87%8F%E7%AE%A1%E7%90%86/%E6%96%87%E6%A1%A3)或者不需要新增文档 / This PR has updated usage guidelines and documentation or not need +- [ ] 通过必要的检查项 / All checks are green diff --git a/.github/workflows/baisc_checks.yml b/.github/workflows/baisc_checks.yml new file mode 100644 index 0000000000..3895adfbba --- /dev/null +++ b/.github/workflows/baisc_checks.yml @@ -0,0 +1,51 @@ +name: basic-check + +on: + push: + branches: + - master + pull_request: + branches: + - '**' + +jobs: + + check: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: install deps + run: | + sudo apt-get update + sudo apt-get -o Acquire::Retries=3 install make ncftp mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y + + - name: Build + env: + GOPROXY: "https://proxy.golang.org,direct" + GO111MODULE: "on" + run: | + make + + - name: Lint + run: | + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.50.0 + golangci-lint run --timeout 10m + + - name: Compatible all + run: | + make compatible-all + + - name: Gen all + run: | + make gen-all + + - name: Detect changes + run: | + git status --porcelain + test -z "$(git status --porcelain)" \ No newline at end of file diff --git a/.github/workflows/build_upload.yml b/.github/workflows/build_upload.yml new file mode 100644 index 0000000000..d077c5aa01 --- /dev/null +++ b/.github/workflows/build_upload.yml @@ -0,0 +1,23 @@ +name: build and upload + +on: + push: + branches: ['**'] + tags: ['**'] + +jobs: + build_upload: + uses: ./.github/workflows/common_build_upload.yml + with: + bin_name: 'venus' + has_ffi: true + secrets: + OSS_KEY_ID: ${{secrets.OSS_KEY_ID}} + OSS_KEY_SECRET: ${{secrets.OSS_KEY_SECRET}} + OSS_ENDPOINT: ${{secrets.OSS_ENDPOINT}} + OSS_BUCKET: ${{secrets.OSS_BUCKET}} + FTP_HOST: ${{secrets.FTP_HOST}} + FTP_USER: ${{secrets.FTP_USER}} + FTP_PWD: ${{secrets.FTP_PWD}} + GODEYE_URL: ${{secrets.GODEYE_URL}} + token: ${{secrets.FFI_API_TOKEN}} diff --git a/.github/workflows/common_build_upload.yml b/.github/workflows/common_build_upload.yml new file mode 100644 index 0000000000..f5cebb23cb --- /dev/null +++ b/.github/workflows/common_build_upload.yml @@ -0,0 +1,297 @@ +name: reuse build and upload + +on: + + workflow_call: + inputs: + bin_name: + type: string + has_ffi: + type: boolean + secrets: + OSS_KEY_ID: + OSS_KEY_SECRET: + OSS_ENDPOINT: + OSS_BUCKET: + FTP_HOST: + required: true + FTP_USER: + required: true + FTP_PWD: + required: true + GODEYE_URL: + required: true + token: + required: true + +jobs: + + build-ubuntu: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + submodules: 'true' + fetch-depth: '0' + + - name: vars + id: vars + run: | + export commit=$(git rev-parse HEAD) + export short=$(git rev-parse --short HEAD) + export github_tag=${{github.ref_name}} + export tag=$github_tag + export branch=$github_tag + export git_message=$(git rev-list --format=%s --max-count=1 HEAD | tail +2) + export repo_name=${GITHUB_REPOSITORY##*/} + export artifact_name=${repo_name}_$(git rev-parse --short HEAD).tar.gz + export pub_method=pushTest + export job_url=$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID + export oss_exists=0 + export ftp_exists=0 + export is_tag_create=false + + export rx_tag='^refs\/tags\/.*' + export rx_version_tag='^v([0-9]+\.){0,2}(\*|[0-9]+)(-rc[0-9]*){0,1}$' + if [[ "${{github.ref}}" =~ $rx_tag ]]; then + export is_tag_create=true + if [[ "${{github.ref_name}}" =~ $rx_version_tag ]]; then + export pub_method=pushRelease + fi + fi + + if [[ "${{secrets.OSS_KEY_ID}}" != "" && \ + "${{secrets.OSS_KEY_SECRET}}" != "" && \ + "${{secrets.OSS_ENDPOINT}}" != "" && \ + "${{secrets.OSS_BUCKET}}" != "" ]]; then + export oss_exists=1 + fi + if [[ "${{secrets.FTP_HOST}}" != "" ]]; then + export ftp_exists=1 + fi + + echo "::set-output name=commit::$commit" + echo "::set-output name=short::$short" + echo "::set-output name=github_tag::$github_tag" + echo "::set-output name=git_message::$git_message" + echo "::set-output name=repo_name::$repo_name" + echo "::set-output name=branch::$branch" + echo "::set-output name=tag::$tag" + echo "::set-output name=artifact_name::$artifact_name" + echo "::set-output name=job_url::$job_url" + echo "::set-output name=pub_method::$pub_method" + echo "::set-output name=is_tag_create::$is_tag_create" + echo "::set-output name=oss_exists::$oss_exists" + echo "::set-output name=ftp_exists::$ftp_exists" + + - name: show environment + run: | + echo bin_name = ${{inputs.bin_name}} + echo has_ffi = ${{inputs.has_ffi}} + echo event = ${{github.event_name}} + echo github_repository: $GITHUB_REPOSITORY + echo vars.commit = ${{steps.vars.outputs.commit}} + echo vars.short_commit = ${{steps.vars.outputs.short}} + echo vars.github_tag = ${{steps.vars.outputs.github_tag}} + echo vars.git_message = "${{steps.vars.outputs.git_message}}" + echo vars.repo_name = ${{steps.vars.outputs.repo_name}} + echo vars.branch = ${{steps.vars.outputs.branch}} + echo vars.tag = ${{steps.vars.outputs.tag}} + echo vars.artifact_name = ${{steps.vars.outputs.artifact_name}} + echo vars.pub_method = ${{steps.vars.outputs.pub_method}} + echo secrets.godeye_url = ${{ secrets.GODEYE_URL }} + echo vars.oss_exists = ${{steps.vars.outputs.oss_exists}} + echo vars.ftp_exists = ${{steps.vars.outputs.ftp_exists}} + echo vars.is_tag_create = ${{steps.vars.outputs.is_tag_create}} + echo github.ref = ${{github.ref}} + echo github.ref_name = ${{github.ref_name}} + echo vars.job_url = ${{steps.vars.outputs.job_url}} + echo ftp_url = ftp://${{secrets.FTP_HOST}}/${{steps.vars.outputs.repo_name}}/${{steps.vars.outputs.artifact_name}} + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: install deps + if: ${{ !inputs.has_ffi }} + run: | + sudo apt-get update + sudo apt-get install ncftp + + - name: install more deps + if: ${{ inputs.has_ffi }} + run: | + sudo apt-get update + sudo apt-get -o Acquire::Retries=3 install make ncftp mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y + + - name: Build + run: | + go clean --modcache && make + mkdir ./release + if [[ "${{steps.vars.outputs.repo_name}}" == "venus-market" ]]; then + mv ./market-client ./venus-market ./release + fi + if [[ "${{steps.vars.outputs.repo_name}}" != "venus-market" ]]; then + mv ./${{inputs.bin_name}} ./release + fi + + - name: Zip Release + uses: TheDoctor0/zip-release@0.6.0 + with: + filename: ${{steps.vars.outputs.artifact_name}} + path: ./release + type: tar + + - name: Rename bin file + if: startsWith(github.ref, 'refs/tags/') + run: | + cp ./${{steps.vars.outputs.artifact_name}} ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_ubuntu.tar.gz + + - name: shasum + if: startsWith(github.ref, 'refs/tags/') + run: shasum -a 256 ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_ubuntu.tar.gz > ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_ubuntu.sha256 + shell: bash + + - name: Release + uses: softprops/action-gh-release@v1 + if: startsWith(github.ref, 'refs/tags/') + continue-on-error: true + with: + files: | + ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_ubuntu.tar.gz + ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_ubuntu.sha256 + + - name: upload artifacts + uses: actions/upload-artifact@v2 + if: ${{ steps.vars.outputs.pub_method == 'pushRelease' }} + with: + name: ${{steps.vars.outputs.artifact_name}} + path: ./${{steps.vars.outputs.artifact_name}} + if-no-files-found: error + + - name: upload ftp + id: uploadftp + if: ${{ steps.vars.outputs.ftp_exists == '1' }} + continue-on-error: true + run: | + ncftpput -m -R -v -u ${{secrets.FTP_USER}} -p ${{secrets.FTP_PWD}} ${{secrets.FTP_HOST}} ./${{steps.vars.outputs.repo_name}} ./${{steps.vars.outputs.artifact_name}} + echo "upload file: ${{steps.vars.outputs.artifact_name}} successfully!" + + - name: setup oss + id: setuposs + if: ${{ steps.vars.outputs.oss_exists == '1' && steps.uploadftp.outcome != 'success' && steps.vars.outputs.pub_method == 'pushTest' }} + uses: manyuanrong/setup-ossutil@master + with: + endpoint: ${{secrets.OSS_ENDPOINT}} + access-key-id: ${{ secrets.OSS_KEY_ID }} + access-key-secret: ${{ secrets.OSS_KEY_SECRET }} + + - name: cp files to aliyun + id: cposs + if: ${{ steps.setuposs.outcome == 'success' }} + run: | + ossutil cp ./${{steps.vars.outputs.artifact_name}} ${{secrets.OSS_BUCKET}} + export signed_url=`ossutil sign ${{secrets.OSS_BUCKET}}/${{steps.vars.outputs.artifact_name}} --timeout 31104000 | sed -n 1p` + echo '::set-output name=oss_signed_url::$(signed_url)' + + - name: push god-eye + run: | + export link=${{steps.vars.outputs.job_url}} + if [[ "${{ steps.release.outcome }}" == "success" ]]; then + export link=$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/releases/download/${{steps.vars.outputs.github_tag}}/${{steps.vars.outputs.artifact_name}} + elif [[ "${{ steps.uploadftp.outcome }}" == "success" ]]; then + export link=ftp://${{secrets.FTP_HOST}}/${{steps.vars.outputs.repo_name}}/${{steps.vars.outputs.artifact_name}} + elif [[ "${{ steps.cposs.outcome }}" == "success" ]]; then + export link=${{steps.cposs.outputs.oss_signed_url}} + fi + echo download target file : $link + set +e + curl --max-time 20 -X PUT ${{secrets.GODEYE_URL}}/${{steps.vars.outputs.pub_method}} \ + --data-urlencode "type=1" \ + --data-urlencode "commitId=${{steps.vars.outputs.commit}}" \ + --data-urlencode "branch=${{steps.vars.outputs.branch}}" \ + --data-urlencode "programName=${{steps.vars.outputs.repo_name}}" \ + --data-urlencode "link=$link" \ + --data-urlencode "description=message:${{steps.vars.outputs.git_message}}, branch:${{steps.vars.outputs.branch}}, commit:${{steps.vars.outputs.short}}, tag:${{steps.vars.outputs.github_tag}}" \ + --data-urlencode "version=${{steps.vars.outputs.short}}" + set -e + + build-macos: + runs-on: macos-11 + steps: + - uses: actions/checkout@v2 + with: + submodules: 'true' + fetch-depth: '0' + + - name: vars + id: vars + run: | + export tag=${{github.ref_name}} + export repo_name=${GITHUB_REPOSITORY##*/} + export artifact_name=${repo_name}_$(git rev-parse --short HEAD).tar.gz + + echo "::set-output name=repo_name::$repo_name" + echo "::set-output name=tag::$tag" + echo "::set-output name=artifact_name::$artifact_name" + + - name: show environment + run: | + echo bin_name = ${{inputs.bin_name}} + echo has_ffi = ${{inputs.has_ffi}} + echo vars.repo_name = ${{steps.vars.outputs.repo_name}} + echo vars.tag = ${{steps.vars.outputs.tag}} + echo vars.artifact_name = ${{steps.vars.outputs.artifact_name}} + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: install more deps + if: ${{ inputs.has_ffi }} + run: | + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + brew install jq pkg-config hwloc + cargo install cargo-lipo + + - name: Build + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + make + mkdir ./release + if [[ "${{steps.vars.outputs.repo_name}}" == "venus-market" ]]; then + mv ./market-client ./venus-market ./release + fi + if [[ "${{steps.vars.outputs.repo_name}}" != "venus-market" ]]; then + mv ./${{inputs.bin_name}} ./release + fi + + - name: Zip Release + if: startsWith(github.ref, 'refs/tags/') + uses: TheDoctor0/zip-release@0.6.0 + with: + filename: ${{steps.vars.outputs.artifact_name}} + path: ./release + type: tar + + - name: Rename bin file + if: startsWith(github.ref, 'refs/tags/') + run: | + cp ./${{steps.vars.outputs.artifact_name}} ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_macos.tar.gz + + - name: shasum + if: startsWith(github.ref, 'refs/tags/') + run: shasum -a 256 ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_macos.tar.gz > ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_macos.sha256 + shell: bash + + - name: Release + uses: softprops/action-gh-release@v1 + if: startsWith(github.ref, 'refs/tags/') + continue-on-error: true + with: + files: | + ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_macos.tar.gz + ./${{steps.vars.outputs.repo_name}}_${{steps.vars.outputs.tag}}_macos.sha256 diff --git a/.github/workflows/common_docker_image.yml b/.github/workflows/common_docker_image.yml new file mode 100644 index 0000000000..3c869fda71 --- /dev/null +++ b/.github/workflows/common_docker_image.yml @@ -0,0 +1,53 @@ +name: Reuse Docker Image CI + +on: + + workflow_call: + secrets: + DOCKER_PASSWORD: + required: true + +jobs: + + build_docker_image: + + runs-on: ubuntu-20.04 + + steps: + - uses: actions/checkout@v3 + - name: Create vars + id: vars + run: | + export github_tag=${{ github.ref_name }} + export repo_name=${GITHUB_REPOSITORY##*/} + export is_tag_create=false + export docker_user_name='filvenus' + + export rx_tag='^refs\/tags\/.*' + export rx_version_tag='^v([0-9]+\.){0,2}(\*|[0-9]+)(-rc[0-9]*){0,1}$' + if [[ "${{github.ref}}" =~ $rx_tag ]]; then + export is_tag_create=true + fi + + echo "::set-output name=github_tag::$github_tag" + echo "::set-output name=repo_name::$repo_name" + echo "::set-output name=is_tag_create::$is_tag_create" + echo "::set-output name=docker_user_name::$docker_user_name" + + - name: Show environment + run: | + echo is_tag_create = ${{ steps.vars.outputs.is_tag_create }} + echo github_tag = ${{ steps.vars.outputs.github_tag }} + echo repo_name = ${{ steps.vars.outputs.repo_name }} + echo docker_user_name = ${{steps.vars.outputs.docker_user_name}} + echo docker_password = ${{secrets.DOCKER_PASSWORD}} + + - name: Build the Docker image + if: ${{ steps.vars.outputs.is_tag_create == 'true' }} + run: | + curl -O https://raw.githubusercontent.com/filecoin-project/venus-docs/master/script/dockerfile + make docker TAG=latest + docker tag ${{steps.vars.outputs.docker_user_name}}/${{steps.vars.outputs.repo_name}}:latest ${{steps.vars.outputs.docker_user_name}}/${{steps.vars.outputs.repo_name}}:${{ steps.vars.outputs.github_tag }} + docker login --username=${{steps.vars.outputs.docker_user_name}} --password ${{ secrets.DOCKER_PASSWORD }} + docker push ${{steps.vars.outputs.docker_user_name}}/${{steps.vars.outputs.repo_name}}:${{ steps.vars.outputs.github_tag }} + docker push ${{steps.vars.outputs.docker_user_name}}/${{steps.vars.outputs.repo_name}}:latest diff --git a/.github/workflows/common_go.yml b/.github/workflows/common_go.yml new file mode 100644 index 0000000000..68879dd75d --- /dev/null +++ b/.github/workflows/common_go.yml @@ -0,0 +1,73 @@ +name: build and golangci-lint and test + +on: + + workflow_call: + inputs: + test_timeout: + description: 'specifies the amount of minutes for test timeout' + required: false + default: 10 + type: number + has_ffi: + type: boolean + +jobs: + + check: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: vars + id: vars + run: | + export repo_name=${GITHUB_REPOSITORY##*/} + echo "::set-output name=repo_name::$repo_name" + + - name: show vars + run: | + echo vars.repo_name = ${{steps.vars.outputs.repo_name}} + + - name: install default deps + run: sudo apt-get -o Acquire::Retries=3 update && sudo apt-get -o Acquire::Retries=3 install make ftp git bzr curl wget -y && sudo apt upgrade -y + + - name: install deps + if: ${{ inputs.has_ffi }} + run: sudo apt-get -o Acquire::Retries=3 update && sudo apt-get -o Acquire::Retries=3 install hwloc libhwloc-dev mesa-opencl-icd ocl-icd-opencl-dev -y + + - name: Build + env: + GOPROXY: "https://proxy.golang.org,direct" + GO111MODULE: "on" + run: | + make + + - name: Lint + run: | + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.50.0 + golangci-lint run --timeout 10m + + - name: Detect changes + run: | + go mod tidy + git status --porcelain + test -z "$(git status --porcelain)" + + - name: Run coverage + run: go test -coverpkg=./... -race -coverprofile=coverage.txt -covermode=atomic ./... -v --timeout ${{ inputs.test_timeout }}m + + - name: Upload + uses: codecov/codecov-action@v2 + with: + token: + files: ./coverage.txt + flags: unittests + name: ${{steps.vars.outputs.repo_name}} + fail_ci_if_error: true + verbose: true diff --git a/.github/workflows/docker_image.yml b/.github/workflows/docker_image.yml new file mode 100644 index 0000000000..fb01aeec65 --- /dev/null +++ b/.github/workflows/docker_image.yml @@ -0,0 +1,13 @@ +name: Docker Image CI + +on: + push: + branches: ['prep/**', 'release/**', 'test/**', master] + tags: ['**'] + +jobs: + + build_docker_image: + uses: filecoin-project/venus/.github/workflows/common_docker_image.yml@master + secrets: + DOCKER_PASSWORD: ${{secrets.DOCKER_PASSWORD}} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000..cce44dd1a7 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,69 @@ +name: Test + +on: + push: + branches: ['**'] + pull_request: + branches: ['**'] + +jobs: + + test: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: install deps + run: | + sudo apt-get update + sudo apt-get -o Acquire::Retries=3 install make ncftp mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y + + - name: install statediff globally + run: | + ## statediff is optional; we succeed even if compilation fails. + mkdir -p /tmp/statediff + git clone https://github.com/filecoin-project/statediff.git /tmp/statediff + cd /tmp/statediff + go install ./cmd/statediff || exit 0 + + - name: Build + env: + GOPROXY: "https://proxy.golang.org,direct" + GO111MODULE: "on" + run: | + make + + - name: Download vk file + run: | + ./venus fetch --size=0 + + - name: GenTool + run: | + go build -o genesis-file-server ./tools/genesis-file-server + go build -o gengen ./tools/gengen + ./gengen --keypath ./fixtures/live --out-car ./fixtures/live/genesis.car --out-json ./fixtures/live/gen.json --config ./fixtures/setup.json + ./gengen --keypath ./fixtures/test --out-car ./fixtures/test/genesis.car --out-json ./fixtures/test/gen.json --config ./fixtures/setup.json + + - name: Venus Shared Test + run: go test -coverpkg=./... -coverprofile=coverage_venus_shared.txt -covermode=atomic -timeout=30m -parallel=4 -v ./venus-shared/... + + - name: Unit Test + run: go test -coverpkg=./... -coverprofile=coverage_unit.txt -covermode=atomic -timeout=30m -parallel=4 -v $(go list ./... | grep -v /venus-shared/) -integration=false -unit=true + + - name: Integration Test + run: go test -coverpkg=./... -coverprofile=coverage_integration.txt -covermode=atomic -timeout=30m -parallel=4 -v $(go list ./... | grep -v /venus-shared/) -integration=true -unit=false + + - name: Upload + uses: codecov/codecov-action@v3 + with: + # ssssdfsjfsjfsjfsjlflk + token: ${{ secrets.CODECOV_TOKEN }} + files: ./coverage_unit.txt,./coverage_integration.txt,./coverage_venus_shared.txt + name: venus + fail_ci_if_error: true + verbose: true diff --git a/.gitignore b/.gitignore index 6a8a5e2215..4fa4654fa0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,32 +1,42 @@ go-filecoin -!cmd/go-filecoin -!internal/app/go-filecoin - +venus +!cmd/venus +!app/venus +devgen.car +localnet.json +*.log .task .idea *.coverprofile +coverprofile.html *.out *.so +actor-bundles-v8.* + fixtures/test !fixtures/test/.gitkeep fixtures/live !fixtures/live/.gitkeep -// Tools +// Tools tools/gengen/gengen tools/gengen/gensetup tools/fast/bin/localnet tools/faucet/faucet tools/aggregator/aggregator tools/genesis-file-server/genesis-file-server -tools/migration/go-filecoin-migrate tools/prerelease-tool/prerelease-tool tools/chain-util/chain-util **/paramcache **/*.h **/*.a **/*.pc +.DS_Store +**/.DS_Store + +// venus-dev-tool +venus-dev-tool // IDE (vscode) .vscode @@ -34,3 +44,11 @@ debug.test // HomeBrew Brewfile.lock.json +build-dep +coverage.* +receipt.json +genesis-file-server +gengen +dockerfile +coverage_unit.txt +coverage_venus_shared.txt diff --git a/.gitmodules b/.gitmodules index 2f7faf0cf4..d4c59e5d37 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ -[submodule "vendors/filecoin-ffi"] - path = vendors/filecoin-ffi +[submodule "extern/filecoin-ffi"] + path = extern/filecoin-ffi url = https://github.com/filecoin-project/filecoin-ffi.git +[submodule "extern/test-vectors"] + path = extern/test-vectors + url = https://github.com/filecoin-project/test-vectors.git diff --git a/.golangci.yml b/.golangci.yml index d40bbb3a1f..f9354ac36c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,21 +1,53 @@ linters: disable-all: true enable: - - vet - gofmt + - govet - misspell - goconst - - golint + - revive - errcheck - unconvert - staticcheck - - varcheck - - structcheck - - deadcode - + - unused + - stylecheck + - gosimple + - goimports issues: + exclude: + - "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this" + - "Potential file inclusion via variable" + - "should have( a package)? comment" + - "Error return value of `logging.SetLogLevel` is not checked" + - "(func|method) \\w+ should be \\w+" + - "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`" + - "(G306|G301|G307|G108|G302|G204|G104)" + - "don't use ALL_CAPS in Go names" + - "string .* has .* occurrences, make it a constant" + - "a blank import should be only in a main or test package, or have a comment justifying it" + - "package comment should be of the form" + - "should be of the form" + + exclude-rules: + - path: pkg/constants + linters: + - errcheck + - path: pkg/specactors + linters: + - errcheck exclude-use-default: false linters-settings: goconst: min-occurrences: 6 + +run: + skip-dirs: + - pkg/constants$ + - pkg/util/test$ + - venus-shared/actors/adt$ + - venus-shared/actors/aerrors$ + - venus-shared/actors/builtin$ + - venus-shared/actors/policy$ + skip-files: + - ".*_gen\\.go$" # skip auto generated go files diff --git a/CHANGELOG.md b/CHANGELOG.md index 524723eaff..090a4a0b73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,750 +1 @@ -# go-filecoin changelog - -## go-filecoin 0.5.6 - -We're happy to announce go-filecoin 0.5.6. Highlights include an updated Proof-of-Spacetime implementation and an upgrade-capable network. - -### Features - -#### 🌳 Network upgrade capability - -Two changes have been made to enable software releases without restarting the network. First, a network name is now embedded in the genesis state, permitting multiple networks to follow different upgrade schedules. In addition, the Git SHA compatibility check has been removed from the Hello protocol, enabling nodes with different, but compatible, code to interoperate. Going forward, the user devnet will no longer be restarted with every software release; it will still be restarted as-needed. - -#### 🚀 Updated Proof-of-Spacetime (PoSt) - -A new proof construction, [Rational PoSt](https://github.com/filecoin-project/specs/blob/master/proof-of-spacetime.md), has been [implemented](https://github.com/filecoin-project/rust-fil-proofs/pull/763) and [integrated](https://github.com/filecoin-project/go-filecoin/pull/3318). This construction is the same shape as our candidate for testnet and resolves outstanding limitations on proving over many sectors. - -#### 🎟️ Block and consensus changes - -Block headers are now signed by miners, and election tickets form an array in each header. The election process is now split into two phases, ticket generation / validation and election winner generation / validation. Election tickets form an array in each header and mining a null block appends a ticket to this array. Block headers are now signed by miners. - -#### 🔗 Chain status command - -`go-filecoin chain status` is a new command that provides insight into chain sync progress. - -### Performance and Reliability - -#### ⚡ Chain syncing performance - -Previously in go-filecoin 0.4, we aimed to speed up chain syncing by focusing on the first phase: chain fetching. We have identified the worst of the fetching contention issues that caused forking and unreliable message processing in 0.4. Some of those fixes are now complete, while others such as [#3460](https://github.com/filecoin-project/go-filecoin/pull/3460) are in progress. There may still be some issues that could cause forking that we will continue to work on and update the coming weeks. Please let us know your feedback. - -go-filecoin 0.5 also continues with improvements to the second phase: chain validation. By switching from HAMT bitwidth 8 to HAMT bitwidth 5, we see a general average improvement in benchmarks of about 4-to-1, across memory usage, speed of operations, and bytes written to disk. Users are encouraged to measure and share their own benchmarks. In addition, optimizations to encoding and decoding of HAMT data structures may result in additional performance improvements. - -### Looking Ahead - -#### ✏️ New API design (WIP) - -Developers are invited to read and comment on the new [HTTP API design](https://github.com/filecoin-project/filecoin-http-api) (work in progress). This design will be implemented initially in go-filecoin and serve as a standard for interacting with Filecoin nodes across implementations. It will support most of operations offered by the current API and provide a framework for future API growth. - -### User Notes - -- The proving period is now configured to 300 rounds (2.5 hrs), down from 1000 rounds (10 hours). We’ve made this temporary change for more frequent node interaction and faster experimentation, and we expect to increase the proving period again in the future. -- Groth parameters are no longer fetched from the network, but instead locally generated when needed. This can take many minutes (but is more reliable than network). -- [Block header structure](https://github.com/filecoin-project/go-filecoin/blob/release-0.5.0/types/block.go) has changed, so tools which parse chain data will need updating. -- The default storage miner waits 15 rounds _after the start of the proving window_ before beginning a PoSt computation, but is not robust to a re-org of _more than 15 blocks_ that changes its challenge seed. -- If you are seeing panics or write failures during sealing, it may be related to disk space requirements. Currently the sector builder uses ~11GiB of free disk space, and assumes it is available on the `/tmp` partition. An proposal to make that directory configurable is in [#3497](https://github.com/filecoin-project/go-filecoin/issues/3497) - -### CLI diff - -| go-filecoin command | change | -| ------------------- | ------ | -| chain status | added | -| mining add-piece | added | -| mining seal-now | behavior changed[1] | - -[1] `mining seal-now` no longer stages a piece into a sector. It now has the same behavior as `--auto-seal-interval-seconds`. - -### Changelog - -A full list of all [67 PRs](https://github.com/filecoin-project/go-filecoin/pulls?utf8=✓&q=is%3Apr+is%3Amerged+merged%3A2019-09-03..2019-09-23+) in this release, including many bugfixes not listed here, can be found on Github. - -### Contributors - -❤️ Huge thank you to everyone that made this release possible! - -### 🙌🏽 Want to contribute? - -Would you like to contribute to the Filecoin project and don’t know how? Here are a few places you can get started: - -- Check out the [Contributing Guidelines](https://github.com/filecoin-project/go-filecoin/blob/master/CONTRIBUTING.md) -- Look for issues with the `good-first-issue` label in [go-filecoin](https://docs.google.com/document/d/1dfTVASs9cQMo4NPqJmXjEEX-Ju_M9Vw-4AelN1aHOV8/edit#) and [rust-fil-proofs](https://github.com/filecoin-project/rust-fil-proofs/issues?q=is%3Aissue+is%3Aopen+label%3A"good+first+issue") -- Join the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat), introduce yourself in #_fil-lobby, and let us know where you would like to contribute -- Join the [user devnet](https://github.com/filecoin-project/go-filecoin/wiki/Getting-Started) - -### ⁉️ Do you have questions? - -The best place to ask your questions about go-filecoin, how it works, and what you can do with it is at [discuss.filecoin.io](https://discuss.filecoin.io). We are also available at the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat). - ---- - -## go-filecoin 0.4.6 - -This release includes the first steps towards increasing the performance for new nodes joining the network through the graphsync protocol. -It also includes the new command `mining status` which allows users to understand the current state of their miner by providing the current proving period, as well as the current proving sector set. -We've also shipped piece inclusion proofs which allow for storage clients to verify the inclusion of their data in the sealed sector reported back by the storage miner. - -### Features - -#### Chain sync - -Chain downloads have switched to [go-graphsync](https://github.com/ipfs/go-graphsync) for more efficient downloads, and we’ve also reduced block header size by moving messages and receipts out of the header. -We’re expecting this to significantly reduce the memory requirements for running a node, and to greatly speed up the chain downloading & setup time. - -#### Piece inclusion proofs - -[Piece inclusion proofs](https://github.com/filecoin-project/specs/blob/840aa9a9777d955fdcd61017444741aabc96dbea/proofs.md#piece-inclusion-proof) are now calculated during deal proposal. -We expect small increases in (1) time to create a deal and (2) CPU consumption during sealing, but both should be minor. - -#### Miners validate piece commitments - -Miners now validate that a client has supplied the correct piece commitment (commP). -This is a computationally expensive operation that will be apparent for high-throughput miner operators. - -#### Slashing - -Storage slashing is now implemented as outlined in [Mining Spec](https://github.com/filecoin-project/specs/blob/master/mining.md#on-being-slashed-wip-needs-discussion) and [Storage Market Spec](https://github.com/filecoin-project/specs/blob/master/storage-market.md) on the actors. -Miners automatically monitor for storage faults by other miners and include `SlashStorageFault` messages in their own blocks (these messages will later carry a rewards). - -#### Free deals - -Previously when accepting storage deals with zero costs a payment channel would be created regardless. -This added additional costs to the deal that were not needed. -Miners now accept deals with zero price and will not require a payment channel to exist before accepting the deal. -This can simplify operations for miners automating deals among their own nodes. - -#### Sealing sectors without a deal - -Previously the only way to stage pieces into sectors was through the Storage Market. -This process required interacting with an additional node on the network and added unnecessary overhead for miners to gain power on the network. -Miners now can use the `mining seal-now` command to seal "empty" sectors directly to increase their power on the network for block mining. - -### CLI diff - -| go-filecoin command | change | -| --------------------------- | ------------ | -| mining status | added | -| mining seal-now | added | -| miner proving-period | added | -| show header | added | -| show messages | added | -| show receipts | added | -| client verify-deal-proposal | added | - - -### Important changes - -- The mining start command will fail if Groth parameters for the sector size which the miner is configured do not yet exists in the parameter cache. - Previously Groth parameters would be generated on demand if they were missing. -- The Groth parameters cache location has changed from `/tmp/filecoin-proof-parameters` to `/var/tmp/filecoin-proof-parameters`. -- Parameters are no longer downloaded by default. Nodes intending to mine must fetch parameters explicitly. See the [wiki](https://github.com/filecoin-project/go-filecoin/wiki/Mining-Filecoin#start-mining) for more information. -- The paramfetch binary now uses ipget to download Groth parameters and keys instead of hitting the IPFS (HTTP) Gateway. - This will make paramfetch slow, but more reliable. -- Proof logs will no longer be displayed in log output by default and must be enabled by setting `RUST_LOG=info` before starting the daemon. -- When building go-filecoin, `git submodules init --recursive` is required to be ran before `go run ./build deps`. -- Sector size for PoSt construction has been increased from 2 to 4. This has resulted in a slight increase of memory usage, but supports proof calculation over more storage within a single proving period. - ---- - -## go-filecoin 0.3.2 - -We're happy to announce go-filecoin 0.3.2. This release is a big step towards completing the filecoin storage protocol. It includes many changes to the miner actor builtin smart contract that will allow the network to securely account for verifiable storage power once fault handling is in place. Many less visible but high impact code and testing improvements ship with this release. 0.3.2 also includes a big UX improvement with the new and improved `go-filecoin deals` command for user friendly management of storage deals. Getting paid as a storage miner is now as simple as a single CLI call. - -### Features - -#### 🏇 Storage protocol nearing completeness - -Our number one goal is a network securely powered by verifiable storage. In order for this to work we need to penalize cheating miners who do not prove their storage on time. This release includes most of the groundwork needed, including fundamental data structures and encoding work for tracking sets of sectors, improved power tracking in the miner actor built-in smart contract, and charging fees for late storage proof (PoSt) submissions. Expect these changes to blossom into the [complete fault reporting mechanism](https://github.com/filecoin-project/specs/blob/master/faults.md#market-faults) in the next release. - -#### 👪 Multiple sector sizes - -In order for the network to scale gracefully, different miners may choose from a variety of different sector sizes to put data in and prove over: smaller sectors for faster and more nimble storage; larger sectors for slower but efficient storage. This release includes all of the software updates we need to support multiple sector sizes in a single network; however, we plan to properly vet network conditions with much bigger sectors before enabling multiple sectors sizes in the user devnet. Expect 1 GiB sectors on the user devnet in the next release. - -#### 🤝 Deal management and payments - -Both clients and miners can now easily inspect the fine details of all storage deals they have entered into using `go-filecoin deals list` and `go-filecoin deals show`. Miners can get paid for honoring a deal by running `go-filecoin deals redeem`. Additionally this release ships some improvements in payment channel safety for correct arbitration of deal disputes we want down the road. - -### Performance and Reliability - -#### 🌳 Upgrade in place - -This release drives home previous work on repo migrations. The `go-filecoin-migrate` tool (included in the go-filecoin source repo) is now complete. This release includes a proof of concept migration: upgrading on-disk chain metadata from JSON to the more compact CBOR. Landing this means we are confident that this major technical challenge is behind us, putting us one step closer to a reliable, persistent testnet. - -### Refactors and Endeavors - -#### 📈Major testing improvements - -Testing is the silent champion of reliability and development speed. This release includes [tons of ](https://github.com/filecoin-project/go-filecoin/pull/2972)[behind](https://github.com/filecoin-project/go-filecoin/pull/2700) [the scenes](https://github.com/filecoin-project/go-filecoin/pull/2990) [work](https://github.com/filecoin-project/go-filecoin/pull/2919) improving the quality of existing unit and integration tests as well as adding new tests to existing code. Continued improvements to the [FAST](https://github.com/filecoin-project/go-filecoin/tree/master/tools/fast) framework promise to further accelerate integration testing and devnet deployments. - -#### 💳 Tech debt paydown - -This release is not playing around when it comes to paying off technical debt. Fundamental chain refactors include an [improved immutable tipset type](https://github.com/filecoin-project/go-filecoin/pull/2837) and tipset cache sharing are at the top of the list. A major refactor of the [message](https://github.com/filecoin-project/go-filecoin/pull/2798) [handling](https://github.com/filecoin-project/go-filecoin/pull/2796) [system](https://github.com/filecoin-project/go-filecoin/pull/2795) into inbox and outbox queues is also a notable improvement. Don’t forget about a consistent internal attoFIL token type, a sleek new miner deal acceptance codepath, sector builder reliability fixes... the list goes on. We are excited to be shipping higher quality software with each release so that we can move faster towards a robust mainnet. - -### Changelog - -A full list of [all 207 PRs in this release](https://github.com/search?p=2&q=is%3Apr+merged%3A2019-05-09..2019-07-05+repo%3Afilecoin-project%2Fgo-filecoin+repo%3Afilecoin-project%2Frust-fil-proofs+repo%3Afilecoin-project%2Fspecs&type=Issues), including many bugfixes not listed here, can be found on Github. - -### CLI diff - -| go-filecoin command | change | -| ------------------- | ------------ | -| deals list | added | -| deals redeem | added | -| deals show | added | -| miner pledge | removed | -| mining status | added | -| show block | args changed | - -### Contributors - -❤️ Huge thank you to everyone that made this release possible! By alphabetical order, here are all the humans who contributed to this release: - -* [@a8159236](https://github.com/a8159236) (3 issues, 2 comments) -* [@Aboatlai](https://github.com/Aboatlai) (1 issue) -* [@acruikshank](https://github.com/acruikshank) (6 commits, 8 PRs, 21 issues, 23 comments) -* [@AkshitV](https://github.com/AkshitV) (1 issue, 2 comments) -* [@alanshaw](https://github.com/alanshaw) (1 comment) -* [@AndyChen1984](https://github.com/AndyChen1984) (1 issue, 5 comments) -* [@anorth](https://github.com/anorth) (22 commits, 24 PRs, 40 issues, 163 comments) -* [@arielgabizon](https://github.com/arielgabizon) (1 issue, 2 comments) -* [@benrogmans](https://github.com/benrogmans) (1 issue) -* [@bvohaska](https://github.com/bvohaska) (1 PR, 1 comment) -* [@callmez](https://github.com/callmez) (1 comment) -* [@carsonfly](https://github.com/carsonfly) (2 issues, 7 comments) -* [@chengzhigen](https://github.com/chengzhigen) (2 issues, 2 comments) -* [@chenhonghe](https://github.com/chenhonghe) (1 issue, 5 comments) -* [@chenxiaolin0105](https://github.com/chenxiaolin0105) (1 issue) -* [@chenzhi201901](https://github.com/chenzhi201901) (2 issues, 1 comment) -* [@codecov-io](https://github.com/codecov-io) (57 comments) -* [@Cryptovideos](https://github.com/Cryptovideos) (1 issue) -* [@dannyhchan](https://github.com/dannyhchan) (4 comments) -* [@dayu26](https://github.com/dayu26) (1 issue) -* [@decentralion](https://github.com/decentralion) (3 commits, 1 PR, 6 comments) -* [@deltazxm](https://github.com/deltazxm) (2 comments) -* [@dignifiedquire](https://github.com/dignifiedquire) (76 commits, 25 PRs, 14 issues, 139 comments) -* [@DrPeterVanNostrand](https://github.com/DrPeterVanNostrand) (1 commit, 1 PR, 2 comments) -* [@eshon](https://github.com/eshon) (1 issue, 8 comments) -* [@frrist](https://github.com/frrist) (14 commits, 18 PRs, 10 issues, 46 comments) -* [@gnunicorn](https://github.com/gnunicorn) (23 commits, 3 PRs, 1 issue, 17 comments) -* [@grandhelmsman](https://github.com/grandhelmsman) (3 issues, 2 comments) -* [@idotial](https://github.com/idotial) (1 issue) -* [@imrehg](https://github.com/imrehg) (1 PR, 1 comment) -* [@ingar](https://github.com/ingar) (5 commits, 6 PRs, 7 comments) -* [@ipfsmainofficial](https://github.com/ipfsmainofficial) (1 issue) -* [@jscode017](https://github.com/jscode017) (1 comment) -* [@Kentix](https://github.com/Kentix) (1 issue, 2 comments) -* [@kishansagathiya](https://github.com/kishansagathiya) (1 PR, 2 comments) -* [@Kubuxu](https://github.com/Kubuxu) (1 commit, 1 PR, 1 comment) -* [@laser](https://github.com/laser) (45 commits, 41 PRs, 24 issues, 97 comments) -* [@maybeuright](https://github.com/maybeuright) (1 comment) -* [@meiqimichelle](https://github.com/meiqimichelle) (1 comment) -* [@merced](https://github.com/merced) (1 issue, 3 comments) -* [@michellebrous](https://github.com/michellebrous) (1 comment) -* [@mishmosh](https://github.com/mishmosh) (3 commits, 2 PRs, 2 issues, 20 comments) -* [@mslipper](https://github.com/mslipper) (5 commits, 1 PR, 8 comments) -* [@nicola](https://github.com/nicola) (2 commits, 1 PR, 4 issues, 11 comments) -* [@nijynot](https://github.com/nijynot) (1 commit, 1 comment) -* [@no1lcy](https://github.com/no1lcy) (1 issue, 1 comment) -* [@ognots](https://github.com/ognots) (6 commits, 5 PRs, 1 issue, 11 comments) -* [@Peachooo](https://github.com/Peachooo) (2 issues, 1 comment) -* [@pooja](https://github.com/pooja) (12 commits, 5 PRs, 9 issues, 45 comments) -* [@porcuquine](https://github.com/porcuquine) (8 commits, 4 PRs, 7 issues, 42 comments) -* [@R-Niagra](https://github.com/R-Niagra) (1 issue, 1 comment) -* [@ridewindx](https://github.com/ridewindx) (1 commit, 1 PR) -* [@RobQuistNL](https://github.com/RobQuistNL) (2 comments) -* [@rogerlzp](https://github.com/rogerlzp) (1 comment) -* [@rosalinekarr](https://github.com/rosalinekarr) (15 commits, 15 PRs, 3 issues, 36 comments) -* [@schomatis](https://github.com/schomatis) (22 commits, 11 PRs, 3 issues, 28 comments) -* [@shannonwells](https://github.com/shannonwells) (8 commits, 8 PRs, 5 issues, 11 comments) -* [@sidke](https://github.com/sidke) (13 commits, 1 comment) -* [@Stebalien](https://github.com/Stebalien) (1 commit, 1 PR, 1 comment) -* [@sternhenri](https://github.com/sternhenri) (4 PRs, 1 issue, 24 comments) -* [@steven004](https://github.com/steven004) (1 commit, 1 PR, 3 issues, 7 comments) -* [@taoshengshi](https://github.com/taoshengshi) (2 issues, 6 comments) -* [@taylorshuang](https://github.com/taylorshuang) (2 issues, 6 comments) -* [@titilami](https://github.com/titilami) (3 issues, 2 comments) -* [@travisperson](https://github.com/travisperson) (3 commits, 3 PRs, 6 issues, 25 comments) -* [@urugang](https://github.com/urugang) (1 issue) -* [@vhosakot](https://github.com/vhosakot) (1 comment) -* [@vmx](https://github.com/vmx) (3 commits, 4 PRs, 14 comments) -* [@vyzo](https://github.com/vyzo) (1 comment) -* [@warpfork](https://github.com/warpfork) (3 comments) -* [@waynewyang](https://github.com/waynewyang) (3 commits, 4 PRs, 1 issue, 3 comments) -* [@whyrusleeping](https://github.com/whyrusleeping) (72 commits, 15 PRs, 11 issues, 73 comments) -* [@windemut](https://github.com/windemut) (1 issue, 5 comments) -* [@yangjian102621](https://github.com/yangjian102621) (2 issues, 5 comments) -* [@yaohcn](https://github.com/yaohcn) (1 commit, 1 PR, 1 comment) -* [@yusefnapora](https://github.com/yusefnapora) (1 comment) -* [@ZenGround0](https://github.com/ZenGround0) (9 commits, 9 PRs, 23 issues, 37 comments) -* [@zhengboowen](https://github.com/zhengboowen) (3 issues) -* [@zixuanzh](https://github.com/zixuanzh) (1 PR) - -### 🙌🏽 Want to contribute? - -Would you like to contribute to the Filecoin project and don’t know how? Here are a few places you can get started: - -- Check out the [Contributing Guidelines](https://github.com/filecoin-project/go-filecoin/blob/master/CONTRIBUTING.md) - -- Look for issues with the `good-first-issue` label in [go-filecoin](https://docs.google.com/document/d/1dfTVASs9cQMo4NPqJmXjEEX-Ju_M9Vw-4AelN1aHOV8/edit#) and [rust-fil-proofs](https://github.com/filecoin-project/rust-fil-proofs/issues?q=is%3Aissue+is%3Aopen+label%3A"good+first+issue") - -- Join the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat), introduce yourself in #_fil-lobby, and let us know where you would like to contribute - -- Join the [user devnet](https://github.com/filecoin-project/go-filecoin/wiki/Getting-Started) - -### ⁉️ Do you have questions? - -The best place to ask your questions about go-filecoin, how it works, and what you can do with it is at [discuss.filecoin.io](https://discuss.filecoin.io). We are also available at the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat). - ---- - -## go-filecoin 0.2.4 - -We're happy to announce go-filecoin 0.2.4. This is a patch release with block validation improvements. As a placeholder before full implementation of block validation, block time was hardcoded to 30 seconds. It was also possible to manually configure a shorter block time via the CLI — miners who did this gained an unfair block mining advantage. Over the past few weeks, a handful of enterprising devnet participants¹ 😉 increasingly used this undocumented option to the point of severely degrading the devnet for everyone else. To get the devnet running smoothly again, we are releasing partial [block validation](https://github.com/filecoin-project/specs/pull/289). - -#### 🌳 Features - -- Timestamp block | [go-filecoin #2897](https://github.com/filecoin-project/go-filecoin/pull/2897) -- Partial Block validation | [go-filecoin #2899](https://github.com/filecoin-project/go-filecoin/pull/2899), [go-filecoin #2882](https://github.com/filecoin-project/go-filecoin/pull/2882), [go-filecoin #2914](https://github.com/filecoin-project/go-filecoin/pull/2914) - -#### ☝🏽 Upgrade notice - -As a reminder, only the latest version of go-filecoin will connect to the user devnet until protocol upgrade work is complete. Users will need to upgrade to 0.2.4 to connect to the user devnet. - -[1] If that was you, we’d love to collaborate to see if you can find other ways to break our implementation! Please email us at [mining@filecoin.io](mailto:mining@filecoin.io). - ---- - -## go-filecoin 0.2.2 - -We're happy to announce go-filecoin 0.2.2. This is a maintenance release with bug fixes and debugging improvements. After the 0.2.1 release, we found a bug in the dht ([#2753](https://github.com/filecoin-project/go-filecoin/issues/2753)) that caused some nodes to panic. This was fixed in [#2754](https://github.com/filecoin-project/go-filecoin/pull/2754) by bumping the [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht) version from 0.0.4 to 0.0.8. - -#### 🐞 Bug fixes - -- Update to go-libp2p-kad-dht@v0.0.8 | [go-filecoin #2754](https://github.com/filecoin-project/go-filecoin/pull/2754) -- Fix output for set price | [go-filecoin #2727](https://github.com/filecoin-project/go-filecoin/pull/2727) - -#### 🌳 Features - -- Add an approval step to user devnet deploy | [go-filecoin #2765](https://github.com/filecoin-project/go-filecoin/pull/2765) -- Log messages proper printing | [go-filecoin #2728](https://github.com/filecoin-project/go-filecoin/pull/2728) -- Add filecoin version command to inspect output | [go-filecoin #2725](https://github.com/filecoin-project/go-filecoin/pull/2725) - -#### ☝🏽 Upgrade notice - -As a reminder, only the latest version of go-filecoin will connect to the user devnet until model for change work is complete. Users will need to upgrade to 0.2.2 to connect to the user devnet. - ---- - -## go-filecoin 0.2.1 - -We're happy to announce go-filecoin 0.2.1. This release is heavy on behind-the-scenes upgrades, including support for filesystem repo migrations and storage disputes, a better message pool, proofs improvements, and a bump to libp2p version for more reliable relays. User-facing improvements such as new commands and options, better status messages, and lots of bugfixes are also included. Get pumped! 🎁 - -### Install and Setup - -#### ⌛ Chain syncing status - -When a filecoin node is first created, it must download and verify the chain. We call this “chain syncing”. While initial commands (such as tapping the faucet or dashboard streaming) can be run immediately, any other commands (such as mining commands) will return errors until chain syncing is complete. Currently, this can take several hours. - -To clarify, we’ve added [wiki updates](https://github.com/filecoin-project/go-filecoin/wiki/Getting-Started#wait-for-chain-sync), better status messages, and cleaner console output for chain syncing. In future releases, we’ll also address the underlying problem of slow chain syncing. - -#### 💠 Sector storage configuration - -Where would you like the filecoin node to store client data? You can now choose! There are two ways to specify the location of the sector storage directory: the `sectorbase.rootdir` config entry, or the `--sectordir` option to `go-filecoin init`. - -If you don’t specify a location, data is stored in `$HOME/.filecoin_sectors` by default. - -### Features - -#### 🍄 Upgradeable repo - -In addition to sealed client data, Filecoin nodes also store other data on-disk such as configuration data, blockchain blocks, deal state, and encryption keys. As development progresses, we need a way to safely change the type and schema of this data. In this release, we include an accepted [design](https://docs.google.com/document/d/1THzh1mrNCKYbdk1zP72xV8pfr1yQBe2n3ptrSAYyVI8/) for filesystem repo migrations, and an initial layout for the migration tool. This paves the way for filecoin nodes to seamlessly update when running in production. - -For more information, check out the help text: - -``` -tools/migration/go-filecoin-migrate --help -``` - -#### 💎 Storage payments - -This release includes work towards storage protocol dispute resolution. Payment channels can now contain conditions that will query another actor before a voucher is redeemed. Payment channels can also be canceled by the payer. This will trigger an early close if the target of the channel does not redeem a payment. These features can be used together with piece inclusion proofs (coming soon) to enforce proof of storage when storage clients pay storage miners. - -#### 🐛 New debugging commands - -Three new commands (`inspect`, `protocol`, and `bitswap`) are now available for your debugging and exploring adventures: - -\* `go-filecoin inspect all` prints all the necessary information for opening a bug report on GitHub. This includes operating system details, your current go-filecoin config, and a few other commonly needed stats. - -\* `go-filecoin protocol` prints details regarding parameters for a node’s protocol, such as autoseal interval and sector size. These are helpful for debugging some of the internals of Filecoin’s proofs and protocol systems. - -\* `go-filecoin bitswap` prints details about a node’s libp2p bitswap system, such as blocks, data, and messages received and sent. These are commonly used in network debugging. - -For more details, run any command followed by the `--help` flag. - -### Performance and Reliability - -#### 🙌 Upgrade libp2p to 0.0.16 - -libp2p recently landed a bunch of improvements to relay functionality, addressing heavy resource usage in some production relay nodes. We’ve upgraded to [go-libp2p](http://github.com/libp2p/go-libp2p) 0.0.16 to enjoy the same fixes in filecoin. - -#### 📬 Better message validation - -We’ve taken several steps to harden the message pool. The pool now rejects messages that will obviously fail processing due to problems like invalid signature, insufficient funds, no gas, or non-existent actor. It also tracks nonces to ensure that messages are correctly sequenced, and that no account has too many messages in the pool. Finally, the pool now limits the total messages it will accept. - -#### 🔗 Proofs integration - -Behind the scenes, much groundwork has been laid for more flexible and powerful storage proofs. This release includes more efficient memory utilization when writing large pieces to a sector. It also includes initial support for piece inclusion proofs, [multiple sector sizes](https://github.com/filecoin-project/go-filecoin/issues/2530), and [variable proof lengths](https://github.com/filecoin-project/go-filecoin/pull/2607). - -#### 🔮 Proofs performance - -Over in `rust-fil-proofs`, progress is accelerating on more complete and efficient implementations. This includes switching to [mmap for more efficient merkle trees](https://github.com/filecoin-project/rust-fil-proofs/pull/529), [abstractions over the hasher](https://github.com/filecoin-project/rust-fil-proofs/pull/543), [limiting parallelism when generating groth proofs](https://github.com/filecoin-project/rust-fil-proofs/pull/582), and [calculating](https://github.com/filecoin-project/rust-fil-proofs/pull/621) and [aggregating](https://github.com/filecoin-project/rust-fil-proofs/pull/605) challenges across partitions. - -### Refactors and Endeavors - -#### 🏁 FAST (Filecoin Automation & System Toolkit) - -We have significantly improved the FAST testing system for Filecoin since the last release. FAST now automatically includes relevant log data and messages from testing nodes in the event of a test failure. FAST also has an all-new localnet tool to quickly and easily set up local Filecoin node clusters for testing and experimentation. See [the localnet readme](https://github.com/filecoin-project/go-filecoin/blob/master/tools/fast/bin/localnet/README.md) for details. - -#### 👾 Go modules - -With Go 1.11’s preliminary support for versioned modules, we have switched to [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management. This allows for easier dependency management and faster updates when dealing with updates from upstream dependencies. - -#### 😍 Design documents - -We regularly write [design docs](https://github.com/filecoin-project/designdocs/blob/master/designdocs.md) before coding begins on important features or components. These short documents are useful in capturing knowledge, formalizing our thinking, and sharing design intent. Going forward, you can find new design docs in the [designdocs](https://github.com/filecoin-project/designdocs/) repo. - -### Changelog - -A full list of [all 177 PRs in this release](https://github.com/search?q=is%3Apr+merged%3A2019-03-26..2019-05-10+repo%3Afilecoin-project%2Fgo-filecoin+repo%3Afilecoin-project%2Frust-fil-proofs+repo%3Afilecoin-project%2Fspecs&type=Issues), including many bugfixes not listed here, can be found on Github. - -### Contributors - -❤️ Huge thank you to everyone that made this release possible! By alphabetical order, here are all the humans who contributed to this release via the `go-filecoin`, `rust-fil-proofs`, and `specs` repos: - -* [@814556001](https://github.com/814556001) (1 comment) -* [@a8159236](https://github.com/a8159236) (3 issues, 9 comments) -* [@aaronhenshaw](https://github.com/aaronhenshaw) (1 issue, 1 comment) -* [@AbelLaker](https://github.com/AbelLaker) (2 issues, 2 comments) -* [@acruikshank](https://github.com/acruikshank) (47 commits, 24 PRs, 42 issues, 81 comments) -* [@aioloszcy](https://github.com/aioloszcy) (2 issues) -* [@alanshaw](https://github.com/alanshaw) (1 commit, 1 PR, 4 comments) -* [@anacrolix](https://github.com/anacrolix) (2 commits, 2 PRs, 17 comments) -* [@andrewxhill](https://github.com/andrewxhill) (1 issue) -* [@AndyChen1984](https://github.com/AndyChen1984) (5 issues, 9 comments) -* [@anorth](https://github.com/anorth) (61 commits, 65 PRs, 46 issues, 340 comments) -* [@arcalinea](https://github.com/arcalinea) (1 issue, 4 comments) -* [@arielgabizon](https://github.com/arielgabizon) (1 issue) -* [@arsstone](https://github.com/arsstone) (1 PR, 1 issue, 6 comments) -* [@aschmahmann](https://github.com/aschmahmann) (4 comments) -* [@bigs](https://github.com/bigs) (1 comment) -* [@block2020](https://github.com/block2020) (5 issues, 1 comment) -* [@btcioner](https://github.com/btcioner) (2 comments) -* [@bvohaska](https://github.com/bvohaska) (1 commit, 1 PR, 6 issues, 26 comments) -* [@Byte-Doctor](https://github.com/Byte-Doctor) (1 issue) -* [@cgwyx](https://github.com/cgwyx) (2 comments) -* [@chenminjian](https://github.com/chenminjian) (1 issue, 3 comments) -* [@comradekingu](https://github.com/comradekingu) (1 commit, 1 PR) -* [@contrun](https://github.com/contrun) (4 commits, 5 PRs, 1 issue, 7 comments) -* [@craigbranscom](https://github.com/craigbranscom) (1 issue) -* [@creationix](https://github.com/creationix) (1 comment) -* [@Cyanglacier](https://github.com/Cyanglacier) (1 issue) -* [@Daniel-Wang](https://github.com/Daniel-Wang) (1 commit, 1 PR, 1 comment) -* [@danigrant](https://github.com/danigrant) (2 commits, 2 PRs) -* [@dayou5168](https://github.com/dayou5168) (6 issues, 17 comments) -* [@dayu26](https://github.com/dayu26) (1 comment) -* [@deaswang](https://github.com/deaswang) (1 comment) -* [@decentralion](https://github.com/decentralion) (1 issue, 12 comments) -* [@deltazxm](https://github.com/deltazxm) (1 issue, 5 comments) -* [@dignifiedquire](https://github.com/dignifiedquire) (49 commits, 32 PRs, 16 issues, 151 comments) -* [@diwufeiwen](https://github.com/diwufeiwen) (3 issues, 3 comments) -* [@djdv](https://github.com/djdv) (2 comments) -* [@DonaldTsang](https://github.com/DonaldTsang) (1 issue) -* [@EbonyBelle](https://github.com/EbonyBelle) (1 comment) -* [@ebuchman](https://github.com/ebuchman) (1 issue) -* [@eefahy](https://github.com/eefahy) (1 comment) -* [@ElecRoastChicken](https://github.com/ElecRoastChicken) (2 comments) -* [@evildido](https://github.com/evildido) (1 issue, 3 comments) -* [@fengchenggang1](https://github.com/fengchenggang1) (1 issue) -* [@firmianavan](https://github.com/firmianavan) (1 commit, 2 PRs, 3 comments) -* [@fjl](https://github.com/fjl) (4 comments) -* [@frrist](https://github.com/frrist) (100 commits, 51 PRs, 44 issues, 111 comments) -* [@gfc-test](https://github.com/gfc-test) (1 PR) -* [@gmas](https://github.com/gmas) (12 commits) -* [@gmasgras](https://github.com/gmasgras) (22 commits, 19 PRs, 14 issues, 35 comments) -* [@gnunicorn](https://github.com/gnunicorn) (1 comment) -* [@haadcode](https://github.com/haadcode) (1 issue) -* [@hango-hango](https://github.com/hango-hango) (1 comment) -* [@haoglehaogle](https://github.com/haoglehaogle) (1 issue) -* [@hsanjuan](https://github.com/hsanjuan) (3 commits, 2 PRs, 7 comments) -* [@ianjdarrow](https://github.com/ianjdarrow) (5 comments) -* [@imrehg](https://github.com/imrehg) (7 issues, 4 comments) -* [@ipfsmainofficial](https://github.com/ipfsmainofficial) (1 issue, 1 comment) -* [@irocnX](https://github.com/irocnX) (1 issue, 1 comment) -* [@jamiew](https://github.com/jamiew) (1 comment) -* [@jaybutera](https://github.com/jaybutera) (1 issue) -* [@jbenet](https://github.com/jbenet) (1 commit, 4 issues, 8 comments) -* [@jcchua](https://github.com/jcchua) (1 issue, 1 comment) -* [@jesseclay](https://github.com/jesseclay) (1 issue, 1 comment) -* [@jhiesey](https://github.com/jhiesey) (1 issue) -* [@jimpick](https://github.com/jimpick) (1 issue, 3 comments) -* [@joshgarde](https://github.com/joshgarde) (4 comments) -* [@jscode017](https://github.com/jscode017) (2 commits, 2 PRs, 4 issues, 17 comments) -* [@karalabe](https://github.com/karalabe) (1 issue, 4 comments) -* [@kishansagathiya](https://github.com/kishansagathiya) (1 issue, 4 comments) -* [@Kostadin](https://github.com/Kostadin) (1 commit, 1 PR) -* [@Kubuxu](https://github.com/Kubuxu) (13 commits, 9 PRs, 8 comments) -* [@lanzafame](https://github.com/lanzafame) (2 commits, 1 PR, 1 issue, 4 comments) -* [@laser](https://github.com/laser) (73 commits, 64 PRs, 77 issues, 178 comments) -* [@leinue](https://github.com/leinue) (1 issue, 1 comment) -* [@lidel](https://github.com/lidel) (3 comments) -* [@life-i](https://github.com/life-i) (1 issue, 3 comments) -* [@lin6461](https://github.com/lin6461) (2 issues, 5 comments) -* [@linsheng9731](https://github.com/linsheng9731) (1 issue) -* [@loulancn](https://github.com/loulancn) (1 issue, 1 comment) -* [@Luca8991](https://github.com/Luca8991) (1 issue) -* [@madper](https://github.com/madper) (1 commit, 1 PR) -* [@magik6k](https://github.com/magik6k) (4 commits, 4 PRs, 9 comments) -* [@MariusVanDerWijden](https://github.com/MariusVanDerWijden) (2 comments) -* [@markwylde](https://github.com/markwylde) (2 issues, 5 comments) -* [@mburns](https://github.com/mburns) (1 PR) -* [@mgoelzer](https://github.com/mgoelzer) (2 issues, 7 comments) -* [@mhammersley](https://github.com/mhammersley) (3 issues, 15 comments) -* [@mikeal](https://github.com/mikeal) (1 PR, 1 issue, 2 comments) -* [@mishmosh](https://github.com/mishmosh) (21 commits, 8 PRs, 35 issues, 159 comments) -* [@mkky-lisheng](https://github.com/mkky-lisheng) (1 issue, 1 comment) -* [@moyid](https://github.com/moyid) (4 comments) -* [@mslipper](https://github.com/mslipper) (9 commits, 11 PRs, 7 issues, 51 comments) -* [@muronglaowang](https://github.com/muronglaowang) (8 issues, 7 comments) -* [@Nanofortress](https://github.com/Nanofortress) (1 issue, 4 comments) -* [@NatoBoram](https://github.com/NatoBoram) (3 issues, 9 comments) -* [@nicola](https://github.com/nicola) (17 commits, 5 PRs, 7 issues, 25 comments) -* [@nijynot](https://github.com/nijynot) (1 PR) -* [@ognots](https://github.com/ognots) (56 commits, 37 PRs, 19 issues, 86 comments) -* [@olizilla](https://github.com/olizilla) (1 commit, 1 PR) -* [@Pacius](https://github.com/Pacius) (1 issue) -* [@ParadiseTaboo](https://github.com/ParadiseTaboo) (1 comment) -* [@pengxiankaikai](https://github.com/pengxiankaikai) (7 issues, 15 comments) -* [@phritz](https://github.com/phritz) (13 commits, 11 PRs, 50 issues, 366 comments) -* [@pkrasam](https://github.com/pkrasam) (1 issue, 1 comment) -* [@pooja](https://github.com/pooja) (5 commits, 1 PR, 11 issues, 95 comments) -* [@porcuquine](https://github.com/porcuquine) (62 commits, 25 PRs, 31 issues, 246 comments) -* [@protocolin](https://github.com/protocolin) (1 issue) -* [@pxrxingrui520](https://github.com/pxrxingrui520) (1 issue) -* [@rafael81](https://github.com/rafael81) (2 commits, 2 PRs, 1 issue, 3 comments) -* [@raulk](https://github.com/raulk) (4 commits, 5 PRs, 22 comments) -* [@redransil](https://github.com/redransil) (1 issue) -* [@RichardLitt](https://github.com/RichardLitt) (1 commit, 1 PR) -* [@ridewindx](https://github.com/ridewindx) (2 commits, 2 PRs) -* [@rjan90](https://github.com/rjan90) (1 comment) -* [@rkowalick](https://github.com/rkowalick) (52 commits, 46 PRs, 17 issues, 106 comments) -* [@RobQuistNL](https://github.com/RobQuistNL) (1 issue, 7 comments) -* [@rosalinekarr](https://github.com/rosalinekarr) (38 commits, 39 PRs, 48 issues, 157 comments) -* [@sanchopansa](https://github.com/sanchopansa) (2 comments) -* [@sandjj](https://github.com/sandjj) (5 issues, 8 comments) -* [@SaveTheAles](https://github.com/SaveTheAles) (1 issue, 3 comments) -* [@schomatis](https://github.com/schomatis) (47 commits, 22 PRs, 12 issues, 173 comments) -* [@scout](https://github.com/scout) (3 comments) -* [@SCUTVincent](https://github.com/SCUTVincent) (1 issue, 2 comments) -* [@shannonwells](https://github.com/shannonwells) (23 commits, 24 PRs, 43 issues, 68 comments) -* [@sidke](https://github.com/sidke) (79 commits, 22 PRs, 18 issues, 12 comments) -* [@SmartMeshFoundation](https://github.com/SmartMeshFoundation) (1 issue) -* [@songjiayang](https://github.com/songjiayang) (1 comment) -* [@Stebalien](https://github.com/Stebalien) (4 commits, 6 PRs, 18 comments) -* [@sternhenri](https://github.com/sternhenri) (38 commits, 10 PRs, 5 issues, 50 comments) -* [@steven004](https://github.com/steven004) (3 commits, 7 PRs, 4 issues, 11 comments) -* [@sywyn219](https://github.com/sywyn219) (3 issues, 13 comments) -* [@Tbaut](https://github.com/Tbaut) (1 issue) -* [@terichadbourne](https://github.com/terichadbourne) (1 issue, 12 comments) -* [@thomas92911](https://github.com/thomas92911) (1 issue, 1 comment) -* [@travisperson](https://github.com/travisperson) (98 commits, 53 PRs, 40 issues, 190 comments) -* [@tycholiu](https://github.com/tycholiu) (1 comment) -* [@urugang](https://github.com/urugang) (1 PR, 1 issue, 1 comment) -* [@vmx](https://github.com/vmx) (8 commits, 5 PRs, 2 issues, 19 comments) -* [@vyzo](https://github.com/vyzo) (8 comments) -* [@warpfork](https://github.com/warpfork) (6 comments) -* [@waynewyang](https://github.com/waynewyang) (3 commits, 5 PRs, 2 issues, 8 comments) -* [@whyrusleeping](https://github.com/whyrusleeping) (157 commits, 42 PRs, 55 issues, 296 comments) -* [@windstore](https://github.com/windstore) (1 issue, 2 comments) -* [@woshihanhaoniao](https://github.com/woshihanhaoniao) (5 issues, 6 comments) -* [@wyblyf](https://github.com/wyblyf) (1 issue, 6 comments) -* [@xcshuan](https://github.com/xcshuan) (1 issue, 1 comment) -* [@yangjian102621](https://github.com/yangjian102621) (1 PR, 4 issues, 16 comments) -* [@yaohcn](https://github.com/yaohcn) (1 PR, 1 issue, 3 comments) -* [@yph152](https://github.com/yph152) (1 issue) -* [@ytQiao](https://github.com/ytQiao) (1 issue, 2 comments) -* [@yusefnapora](https://github.com/yusefnapora) (1 comment) -* [@yyh1102](https://github.com/yyh1102) (2 comments) -* [@zebul](https://github.com/zebul) (1 issue) -* [@ZenGround0](https://github.com/ZenGround0) (35 commits, 29 PRs, 85 issues, 128 comments) -* [@zhangkuicheng](https://github.com/zhangkuicheng) (2 issues, 4 comments) -* [@zixuanzh](https://github.com/zixuanzh) (4 comments) -* [@zjoooooo](https://github.com/zjoooooo) (1 issue, 1 comment) - -### 🙌🏽 Want to contribute? - -Would you like to contribute to the Filecoin project and don’t know how? Here are a few places you can get started: - -- Check out the [Contributing Guidelines](https://github.com/filecoin-project/go-filecoin/blob/master/CONTRIBUTING.md) -- Look for issues with the `good-first-issue` label in [go-filecoin](https://github.com/filecoin-project/go-filecoin/issues?q=is%3Aissue+is%3Aopen+label%3A"good+first+issue") and [rust-fil-proofs](https://github.com/filecoin-project/rust-fil-proofs/issues?q=is%3Aissue+is%3Aopen+label%3A"good+first+issue") -- Join the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat), introduce yourself in #_fil-lobby, and let us know where you would like to contribute - -### ⁉️ Do you have questions? - -The best place to ask your questions about go-filecoin, how it works, and what you can do with it is at [discuss.filecoin.io](https://discuss.filecoin.io). We are also available at the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat). - ---- - -## go-filecoin 0.1.4 - -We're happy to announce go-filecoin 0.1.4. This release contains a better install experience, initial Proof-of-Spacetime integration, more reliable message sending and networking, and many other improvements. Get pumped! 🎁 - -### Install and Setup - -#### 💝 Binary releases - -Linux and MacOS binaries for go-filecoin are now available! See [Installing from binary](https://github.com/filecoin-project/go-filecoin/wiki/Getting-Started#installing-from-binary) for instructions. - -#### 🍱 Precompiled proofs parameters - -Running secure proofs requires parameter files that are several GB in size. Previously, these files were generated at install, an extremely memory-intensive process causing slow or impossible builds for many users. Now, you can download pre-generated files during install by running `paramfetch`. This step is now included in the [Installing from binary](https://github.com/filecoin-project/go-filecoin/wiki/Getting-Started#installing-from-binary) instructions. - -#### 🦖 Version checking - -go-filecoin now checks that it is running the same version (at the same commit) while connecting to a devnet. This is a temporary fix until a model for change is implemented, allowing different versions to interoperate. - -### Features - -#### 💎 Proof-of-Spacetime Integration - -Miners now call `rust-fil-proofs` to periodically generate proofs of continued storage. With this major integration in place, you can expect some follow-up (for example, storage mining faults do not yet appear on-chain) and continued optimizations to the underlying Proof-of-Spacetime construction and implementation. - -### Performance and Reliability - -#### 🤝 Networking - -We’ve upgraded to [go-libp2p](http://github.com/libp2p/go-libp2p) 6.0.35 which has fixed autorelay reliability issues. We’ve also added a `go-filecoin dht` command for interacting with and debugging our dht. - -#### 🎈 Better message sending - -In the past, if messages failed, they failed silently. go-filecoin would continue to select nonces higher than the sent message, effectively deadlocking message sending. We have now implemented several improvements to message sending: incoming and outgoing queues, better nonce selection logic, and a message timeout after a certain number of blocks. See [message status](https://github.com/filecoin-project/go-filecoin/blob/6a34245644cd62436239b885cd7ba1f0f29d0ca5/commands/message.go) and mpool ls/show/rm commands for more. - -#### 🔗 Chain syncing is faster - -Chain is now faster due to use of bitswap sessions. Woohoo! - -#### ⌛ Context deadline errors fixed - -In the past, the context deadline was set artificially low for file transfer. This caused some large file transfers to time out, preventing storage deals from being completed. Thank you to @markwylde, @muronglaowang, @pengxiankaikai, @sandjj, and others for bug reports. - -### Refactors and Endeavors - -#### 🦊 FAST (Filecoin Automation & System Toolkit) - -FAST is a common library of go-filecoin code that can be used in daemon testing, devnet initialization, and other applications like network randomization that involve managing nodes, running commands against them, and observing their state. - -Using FAST, we’ve developed [localnet](https://github.com/filecoin-project/go-filecoin/tree/master/tools/fast/bin/localnet), a new tool to quickly and easily set up a local network for testing, debugging, development, and more. Want to give it a whirl? Check out the [localnet README](https://github.com/filecoin-project/go-filecoin/tree/master/tools/fast/bin/localnet). - -#### 👾 Porcelain/Plumbing refactor for node object - -Previously, the node object contained both interfaces and internals for much of the core protocol. It was difficult to unit test due to many dependencies and complicated setup. Following the [porcelain and plumbing pattern from Git](https://git-scm.com/book/en/v2/Git-Internals-Plumbing-and-Porcelain), we have now decoupled the node object from many of its dependencies. We have also created a separate API for block, storage, and retrieval mining. - -### Changelog - -A full list of [all 200 PRs in this release](https://github.com/filecoin-project/go-filecoin/pulls?utf8=%E2%9C%93&q=is%3Apr+merged%3A2019-02-14..2019-03-26) can be found on Github. - -### Contributors - -❤️ Huge thank you to everyone that made this release possible! By alphabetical order, here are all the humans who contributed issues and commits in `go-filecoin` and `rust-fil-proofs` to date: - -- [@aaronhenshaw](http://github.com/aaronhenshaw) -- [@aboodman](http://github.com/aboodman) -- [@AbelLaker](http://github.com/AbelLaker) -- [@alanshaw](http://github.com/alanshaw) -- [@acruikshank](http://github.com/acruikshank) -- [@anacrolix](http://github.com/anacrolix) -- [@andychen1984](http://github.com/andychen1984) -- [@anorth](http://github.com/anorth) -- [@Byte-Doctor](http://github.com/Byte-Doctor) -- [@chenminjuan](http://github.com/chenminjuan) -- [@coderlane](http://github.com/coderlane) -- [@comeradekingu](http://github.com/comeradekingu) -- [@danigrant](http://github.com/danigrant) -- [@dayou5168](http://github.com/dayou5168) -- [@dignifiedquire](http://github.com/dignifiedquire) -- [@diwufeiwen](http://github.com/diwufeiwen) -- [@ebuchman](http://github.com/ebuchman) -- [@eefahy](http://github.com/eefahy) -- [@firmianavan](http://github.com/firmianavan) -- [@frrist](http://github.com/frrist) -- [@gmasgras](http://github.com/gmasgras) -- [@haoglehaogle](http://github.com/haoglehaogle) -- [@hsanjuan](http://github.com/hsanjuan) -- [@imrehg](http://github.com/imrehg) -- [@jaybutera](http://github.com/jaybutera) -- [@jbenet](http://github.com/jbenet) -- [@jimpick](http://github.com/jimpick) -- [@karalabe](http://github.com/karalabe) -- [@kubuxu](http://github.com/kubuxu) -- [@lanzafame](http://github.com/lanzafame) -- [@laser](http://github.com/laser) -- [@leinue](http://github.com/leinue) -- [@life-i](http://github.com/life-i) -- [@luca8991](http://github.com/luca8991) -- [@madper](http://github.com/madper) -- [@magik6k](http://github.com/magik6k) -- [@markwylde](http://github.com/markwylde) -- [@mburns](http://github.com/mburns) -- [@michellebrous](http://github.com/michellebrous) -- [@mikael](http://github.com/mikael) -- [@mishmosh](http://github.com/mishmosh) -- [@mslipper](http://github.com/mslipper) -- [@muronglaowang](http://github.com/muronglaowang) -- [@nanofortress](http://github.com/nanofortress) -- [@natoboram](http://github.com/natoboram) -- [@nicola](http://github.com/nicola) -- [@ognots](http://github.com/ognots) -- [@olizilla](http://github.com/olizilla) -- [@pacius](http://github.com/pacius) -- [@pengxiankaikai](http://github.com/pengxiankaikai) -- [@pooja](http://github.com/pooja) -- [@porcuquine](http://github.com/porcuquine) -- [@phritz](http://github.com/phritz) -- [@pkrasam](http://github.com/pkrasam) -- [@pxrxingrui520](http://github.com/pxrxingrui520) -- [@raulk](http://github.com/raulk) -- [@rafael81](http://github.com/rafael81) -- [@richardlitt](http://github.com/richardlitt) -- [@rkowalick](http://github.com/rkowalick) -- [@rosalinekarr](http://github.com/rosalinekarr) -- [@sandjj](http://github.com/sandjj) -- [@schomatis](http://github.com/schomatis) -- [@shannonwells](http://github.com/shannonwells) -- [@sidka](http://github.com/sidka) -- [@stebalien](http://github.com/stebalien) -- [@steven004](http://github.com/steven004) -- [@sywyn219](http://github.com/sywyn219) -- [@tbaut](http://github.com/tbaut) -- [@thomas92911](http://github.com/thomas92911) -- [@travisperson](http://github.com/travisperson) -- [@vmx](http://github.com/vmx) -- [@waynewyang](http://github.com/waynewyang) -- [@whyrusleeping](http://github.com/whyrusleeping) -- [@windstore](http://github.com/windstore) -- [@woshihanhaoniao](http://github.com/woshihanhaoniao) -- [@xcshuan](http://github.com/xcshuan) -- [@yangjian102621](http://github.com/yangjian102621) -- [@yph152](http://github.com/yph152) -- [@zenground0](http://github.com/zenground0) -- [@zhangkuicheng](http://github.com/zhangkuicheng) -- [@zjoooooo](http://github.com/zjoooooo) - -### 🙌🏽 Want to contribute? - -Would you like to contribute to the Filecoin project and don’t know how? Here are a few places you can get started: - -- Check out the [Contributing Guidelines](https://github.com/filecoin-project/go-filecoin/blob/master/CONTRIBUTING.md) -- Look for issues with the `good-first-issue` label in [go-filecoin](https://github.com/filecoin-project/go-filecoin/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22e-good-first-issue%22+) and [rust-fil-proofs](https://github.com/filecoin-project/rust-fil-proofs/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) -- Join the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat), introduce yourself in #_fil-lobby, and let us know where you would like to contribute - -### ⁉️ Do you have questions? - -The best place to ask your questions about go-filecoin, how it works, and what you can do with it is at [discuss.filecoin.io](https://discuss.filecoin.io). We are also available at the [community chat on Matrix/Slack](https://github.com/filecoin-project/community#chat). +# venus changelog diff --git a/CODEWALK.md b/CODEWALK.md index 44808ba930..71f862a404 100644 --- a/CODEWALK.md +++ b/CODEWALK.md @@ -2,11 +2,11 @@ This document is formatted one-sentence-per-line, breaking very long sentences at phrase boundaries. This format makes diffs clean and review comments easy to target. --> -# Go-filecoin code overview +# Venus code overview -This document provides a high level tour of the go-filecoin implementation of the Filecoin protocols in Go. +This document provides a high level tour of the venus implementation of the Venus protocols in Go. -This document assumes a reasonable level of knowledge about the Filecoin system and protocols, which are not re-explained here. +This document assumes a reasonable level of knowledge about the Venus system and protocols, which are not re-explained here. It is complemented by specs (link forthcoming) that describe the key concepts implemented here. **Table of contents** @@ -25,18 +25,12 @@ It is complemented by specs (link forthcoming) that describe the key concepts im - [A tour of the code](#a-tour-of-the-code) - [History–the Node object](#historythe-node-object) - [Core services](#core-services) - - [Plumbing & porcelain](#plumbing--porcelain) - [Commands](#commands) - - [Protocols](#protocols) - - [Protocol Mining APIs](#protocol-mining-apis) - [Actors](#actors) - [The state tree](#the-state-tree) - [Messages and state transitions](#messages-and-state-transitions) - [Consensus](#consensus) - - [Storage protocol](#storage-protocol) - - [Retrieval](#retrieval) - [Entry point](#entry-point) -- [Sector builder & proofs](#sector-builder--proofs) - [Building and distribution.](#building-and-distribution) - [Groth parameters](#groth-parameters) - [Proof mode configuration](#proof-mode-configuration) @@ -50,11 +44,8 @@ It is complemented by specs (link forthcoming) that describe the key concepts im - [Unit Tests (`-unit`)](#unit-tests--unit) - [Integration Tests (`-integration`)](#integration-tests--integration) - [Functional Tests (`-functional`)](#functional-tests--functional) - - [Sector Builder Tests (`-sectorbuilder`)](#sector-builder-tests--sectorbuilder) - [Dependencies](#dependencies) - [Patterns](#patterns) - - [Plumbing and porcelain](#plumbing-and-porcelain) - - [Consumer-defined interfaces](#consumer-defined-interfaces) - [Observability](#observability) - [Metrics](#metrics) - [Tracing](#tracing) @@ -63,26 +54,25 @@ It is complemented by specs (link forthcoming) that describe the key concepts im ## Background -The go-filecoin implementations is the result of combined research and development effort. +The venus implementations is the result of combined research and development effort. The protocol spec and architecture evolved from a prototype, and is the result of iterating towards our goals. -Go-filecoin is a work in progress. We are still working on clarifying the architecture and propagating good patterns throughout the code. Please bear with us, and we’d love your help. -Filecoin borrows a lot from the [IPFS](https://ipfs.io/) project, including some patterns, tooling, and packages. +Venus borrows a lot from the [IPFS](https://ipfs.io/) project, including some patterns, tooling, and packages. Some benefits of this include: - the projects encode data in the same way ([IPLD](https://ipld.io/), [CIDs](https://github.com/multiformats/cid)), easing interoperability; -- the go-filecoin project can build on solid libraries like the IPFS commands. +- the venus project can build on solid libraries like the IPFS commands. Other patterns, we've evolving for our needs: - go-ipfs relies heavily on shell-based integration testing; we aim to rely heavily on unit testing and Go-based integration tests. - The go-ipfs package structure involves a deep hierarchy of dependent implementations; we're moving towards a more Go-idiomatic approach with narrow interfaces defined in consuming packages (see [Patterns](#patterns). -- The term "block" is heavily overloaded: a blockchain block ([`types/block.go`](https://github.com/filecoin-project/go-filecoin/tree/master/types/block.go)), -but also content-id-addressed blocks in the block service. +- The term "block" is heavily overloaded: a blockchain block ([`types/block.go`](https://github.com/filecoin-project/venus/tree/master/venus-shared/types/block_header.go)), +but also content-id-addressed blocks in the block service. Blockchain blocks are stored in block service blocks, but are not the same thing. ## Architecture overview @@ -100,7 +90,7 @@ Blockchain blocks are stored in block service blocks, but are not the same thing │ │ ┌───────────────┼───────────────┐ ┌──────────────────────┤ │ │ │ │ │ - ▼ ▼ ▼ ▼ │ + ▼ ▼ ▼ ▼ │ ┌──────────────┬─────────────────┬─────────────┐ │ ┌───────▶│ Storage API │ Retrieval API │ Block API │ │ │ ├──────────────┼─────────────────┼─────────────┤ │ @@ -112,10 +102,10 @@ Internal │ │ Protocol │ Protocol │ Protocol │ │ │ │ │ │ │ │ ▼ ▼ ▼ │ ▼ │ ┌───────────────────────────────────────────┐ │ ┌─────────────┬──────────────┐ - │ │ │ │ │ │ │ - │ │ Core API │ │ │ Porcelain │ Plumbing │ - └─────────▶│ │ └─▶├─────────────┘ │ - │ │ │ │ + │ │ │ │ │ + │ │ Core API │ │ │ Client API + └─────────▶│ │ └─▶ + │ │ │ └───────────────────────────────────────────┘ └────────────────────────────┘ │ │ ┌─────────────────┬────┴──────────────┬────────────────┬───────────┴─────┐ @@ -132,23 +122,13 @@ Internal │ │ Protocol │ Protocol │ Protocol │ ### History–the Node object -The `Node` ([`node/`](https://github.com/filecoin-project/go-filecoin/tree/master/node)) object is the "server". +The `Node` ([`node/`](https://github.com/filecoin-project/venus/tree/master/app/node)) object is the "server". It contains much of the core protocol implementation and plumbing. -As an accident of history it has become something of a god-object, which we are working to resolve. -The `Node` object is difficult to unit test due to its many dependencies and complicated set-up. -We are [moving away from this pattern](https://github.com/filecoin-project/go-filecoin/issues/1469#issuecomment-451619821), -and expect the Node object to be reduced to a glorified constructor over time. -The [`api`](https://github.com/filecoin-project/go-filecoin/tree/master/api) package contains the API of all the +The [`api`](https://github.com/filecoin-project/venus/tree/master/app/client) package contains the API of all the core building blocks upon which the protocols are implemented. The implementation of this API is the `Node`. -We are migrating away from this `api` package to the plumbing package, see below. -The [`protocol`](https://github.com/filecoin-project/go-filecoin/tree/master/protocol) package contains much of the application-level protocol code. -The protocols are implemented in terms of the plumbing & porcelain APIs (see below). -Currently the hello, retrieval and storage protocols are implemented here. -Block mining should move here (from the [`mining`](https://github.com/filecoin-project/go-filecoin/tree/master/mining) top-level package and `Node` internals). -Chain syncing may move here too. ### Core services @@ -170,32 +150,16 @@ Services include (not exhaustive): - Block service: content-addressed key value store that stores IPLD data, including blockchain blocks as well as the state tree (it’s poorly named). - Wallet: manages keys. -### Plumbing & porcelain - -The [`plumbing`](https://github.com/filecoin-project/go-filecoin/tree/master/plumbing) & -[`porcelain`](https://github.com/filecoin-project/go-filecoin/tree/master/porcelain) packages are -the API for most non-protocol commands. - -__Plumbing__ is the set of public apis required to implement all user-, tool-, and some protocol-level features. -Plumbing implementations depend on the core services they need, but not on the `Node`. -Plumbing is intended to be fairly thin, routing requests and data between core components. -Plumbing implementations are often tested with real implementations of the core services they use, but can also be tested with fakes and mocks. - -__Porcelain__ implementations are convenience compositions of plumbing. -They depend only on the plumbing API, and can coordinate a sequence of actions. -Porcelain is ephemeral; the lifecycle is the duration of a single porcelain call: something calls into it, it does its thing, and then returns. -Porcelain implementations are ideally tested with fakes of the plumbing they use, but can also use full implementations. - ### Commands -The `go-filecoin` binary can run in two different modes, either as a long-running daemon exposing a JSON/HTTP RPC API, +The `venus` binary can run in two different modes, either as a long-running daemon exposing a JSON/HTTP RPC API, or as a command-line interface which interprets and routes requests as RPCs to a daemon. -In typical usage, you start the daemon with `go-filecoin daemon` then use the same binary to issue commands like `go-filecoin wallet addrs`, +In typical usage, you start the daemon with `venus daemon` then use the same binary to issue commands like `venus wallet ls`, which are transmitted to the daemon over the HTTP API. The commands package uses the [go-ipfs command library](https://github.com/ipfs/go-ipfs-cmds) and defines commands as both CLI and JSON entry points. -[Commands](https://github.com/filecoin-project/go-filecoin/tree/master/commands) implement user- and tool-facing functionality. +[Commands](https://github.com/filecoin-project/venus/tree/master/cmd) implement user- and tool-facing functionality. Command implementations should be very, very small. With no logic of their own, they should call just into a single plumbing or porcelain method (never into core APIs directly). The go-ipfs command library introduces some boilerplate which we can reduce with some effort in the future. @@ -204,34 +168,6 @@ Right now, some of the command implementations call into the node; this should c Tests for commands are generally end-to-end "daemon tests" that exercise CLI. They start some nodes and interact with them through shell commands. -### Protocols - -[Protocols](https://github.com/filecoin-project/go-filecoin/tree/master/protocol) embody -"application-level" functionality. They are persistent; they keep running without active user/tool activity. -Protocols interact with the network. -Protocols depend on `plumbing` and `porcelain` for their implementation, as well some "private" core APIs (at present, many still depend on the `Node` object). - -Protocols drive changes in, but do not own, core state. -For example, the chain sync protocol drives updates to the chain store (a core service), but the sync protocol does not own the chain data. -However, protocols may maintain their own non-core, protocol-specific datastores (e.g. unconfirmed deals). - -Application-level protocol implementations include: - -- Storage protocol: the mechanism by which clients make deals with miners, transfer data for storage, and then miners prove storage. -- Block mining protocol: the protocol for block mining and consensus. -Miners who are storing data participate in creating new blocks. -Miners win elections in proportion to storage committed. -This block mining is spread through a few places in the code. -Much in mining package, but also a bunch in the node implementation. -- Chain protocol: protocol for exchange of mined blocks - -##### Protocol Mining APIs -The [`storage`](https://github.com/filecoin-project/go-filecoin/tree/master/protocol/storage/), -[`retrieval`](https://github.com/filecoin-project/go-filecoin/tree/master/protocol/retrieval/) -and [`block`](https://github.com/filecoin-project/go-filecoin/tree/master/protocol/mining/) packages now house their own APIs. These are the new interfaces for all mining commands, but not miner creation. These Protocol APIs provide a the new interface for the Network layer of go-filecoin. Protocol APIs also consume Plumbing and Porcelain APIs. They are ephemeral, like the Porcelain API. Note also that the MiningOnce command uses `BlockMiningAPI` to create its own block mining worker, which lasts only for the time it takes to mine and post a new block. - - - More detail on the individual protocols is coming soon. ### Actors @@ -242,7 +178,7 @@ It is expected that other implementations will match the behaviour of the Go act An ABI describes how inputs and outputs to the VM are encoded. Future work will replace this implementation with a "real" VM. -The [Actor](https://github.com/filecoin-project/go-filecoin/blob/master/actor/actor.go) struct is the base implementation of actors, with fields common to all of them. +The [Actor](https://github.com/filecoin-project/venus/blob/master/venus-shared/internal/actor.go) struct is the base implementation of actors, with fields common to all of them. - `Code` is a CID identifying the actor code, but since these actors are implemented in Go, is actually some fixed bytes acting as an identifier. This identifier selects the kind of actor implementation when a message is sent to its address. @@ -256,38 +192,24 @@ A storage miner actor exists for each miner in the Filesystem network. Their structs share the same code CID so they have the same behavior, but have distinct head state CIDs and balance. Each actor instance exists at an address in the state tree. An address is the hash of the actor’s public key. -The [account](https://github.com/filecoin-project/go-filecoin/blob/master/actor/builtin/account) actor doesn’t have any special behavior or state other than a balance. -Everyone who wants to send messages (transactions) has an account actor, and it is from this actor’s address that they send messages. - -Every storage miner has an instance of a [miner](https://github.com/filecoin-project/go-filecoin/blob/master/actor/builtin/miner) actor. -The miner actor plays a role in the storage protocol, for example it pledges space and collateral for storage, posts proofs of storage, etc. -A miner actor’s state is located in the state tree at its address; the value found there is an Actor structure. -The head CID in the actor structure points to that miner’s state instance (encoded). - -Other built-in actors include the [payment broker](https://github.com/filecoin-project/go-filecoin/blob/master/actor/builtin/paymentbroken), -which provides a mechanism for off-chain payments via payment channels, -and the [storage market](https://github.com/filecoin-project/go-filecoin/blob/master/actor/storagemarket), -which starts miners and tracks total storage (aka "power"). -These are both singletons. - Actors declare a list of exported methods with ABI types. Method implementations typically load the state tree, perform some query or mutation, then return a value or an error. ### The state tree -Blockchain state is represented in the [state tree](https://github.com/filecoin-project/go-filecoin/blob/master/state/tree.go), +Blockchain state is represented in the [state tree](https://github.com/filecoin-project/venus/blob/master/pkg/state/tree/state.go), which contains the state of all actors. The state tree is a map of address to (encoded) actor structs. The state tree interface exposes getting and setting actors at addresses, and iterating actors. The underlying data structure is a [Hash array-mapped trie](https://en.wikipedia.org/wiki/Hash_array_mapped_trie). A HAMT is also often used to store actor state, eg when the actor wants to store a large map. -The canonical binary encoding used by Filecoin is [CBOR](http://cbor.io/). In Go, structs are CBOR-encoded by reflection. +The canonical binary encoding used by Venus is [CBOR](http://cbor.io/). In Go, structs are CBOR-encoded by reflection. The ABI uses a separate inner encoding, which is manual. ### Messages and state transitions -Filecoin state transitions are driven by messages sent to actors; these are our "transactions". +Venus state transitions are driven by messages sent to actors; these are our "transactions". A message is a method invocation on an actor. A message has sender and recipient addresses, and optional parameters such as an amount of filecoin to transfer, a method name, and parameters. @@ -300,7 +222,7 @@ One invokes a method on an actor by sending it a message. To send a message the message is created, signed, added to your local node’s message pool broadcast on the network to other nodes, which will add it to their message pool too. Some node will then mine a block and possibly include your message. -In Filecoin, it is essential to remember that sending the message does not mean it has gone on chain or that its outcome has been reflected in the state tree. +In Venus, it is essential to remember that sending the message does not mean it has gone on chain or that its outcome has been reflected in the state tree. Sending means the message is available to be mined into a block. You must wait for the message to be included in a block to see its effect. @@ -309,19 +231,19 @@ These messages are executed locally against a read only version of the state tre They never leave the node, they are not broadcast. The plumbing API exposes `MessageSend` and `MessageQuery` for these two cases. -The [processor](https://github.com/filecoin-project/go-filecoin/blob/master/consensus/processor.go) is the +The [processor](https://github.com/filecoin-project/venus/blob/master/pkg/consensus/processor.go) is the entry point for making and validating state transitions represented by the messages. It is modelled Ethereum’s message processing system. The processor manages the application of messages to the state tree from the prior block/s. It loads the actor from which a message came, check signatures, then loads the actor and state to which a message is addressed and passes the message to the VM for execution. -The [vm](https://github.com/filecoin-project/go-filecoin/blob/master/vm) package has the low level detail of calling actor methods. -A [VM context](https://github.com/filecoin-project/go-filecoin/blob/master/vm/context.go) defines the world visible from an actor implementation while executing, +The [vm](https://github.com/filecoin-project/venus/blob/master/pkg/vm) package has the low level detail of calling actor methods. +A [VM context](https://github.com/filecoin-project/venus/blob/master/pkg/vm/context.go) defines the world visible from an actor implementation while executing. ### Consensus -Filecoin uses a consensus algorithm called [expected consensus](https://github.com/filecoin-project/go-filecoin/blob/master/consensus/expected.go). +Venus uses a consensus algorithm called [expected consensus](https://github.com/filecoin-project/venus/blob/master/pkg/consensus/expected.go). Unlike proof-of-work schemes, expected-consensus is a proof-of-stake model, where probability of mining a block in each round (30 seconds) is proportional to amount of storage a miner has committed to the network. Each round, miners are elected through a probabilistic but private mechanism akin to rolling independent, private, but verifiable dice. @@ -330,59 +252,13 @@ If a miner is elected, they have the right to mine a block in that round. Given the probabilistic nature of mining new blocks, more than one block may be mined in any given round. Hence, a new block might have more than one parent block. -The parents form a set, which we call a [tipset](https://github.com/filecoin-project/go-filecoin/blob/master/consensus/tipset.go). +The parents form a set, which we call a [tipset](https://github.com/filecoin-project/venus/blob/master/venus-shared/types/tipset.go). All the blocks in a tipset are at the same height and share the same parents. Tipsets contain one or more blocks. A null block count indicates the absence of any blocks mined in a previous round. Subsequent blocks are built upon *all* of the tipset; there is a canonical ordering of the messages in a tipset defining a new consensus state, not directly referenced from any of the tipset’s blocks. -### Storage protocol -The storage protocol is mechanism by which clients make deals directly with storage miners to store their data, implemented in [`protocol/storage`](https://github.com/filecoin-project/go-filecoin/blob/master/protocol/storage). - -A storage miner ([protocol/storage/miner.go](https://github.com/filecoin-project/go-filecoin/blob/master/protocol/storage/miner.go)) advertises storage with an on-chain ask, -which specifies an asking price and storage capacity at that price. -Clients discover asks by iterating miner actors’ on-chain state. -A client wishing to take an ask creates a deal proposal. -A proposal references a specific unit of data, termed a piece, which has a CID (hash of the bytes). -A piece must fit inside a single sector (see below) as defined by network parameters. - -A storage client ([protocol/storage/client.go](https://github.com/filecoin-project/go-filecoin/blob/master/protocol/storage/client.go)) connects directly to a miner to propose a deal, -using a libp2p peer id embedded in the on-chain storage miner actor data. -An off-chain lookup service maps peer ids to concrete addresses, in the form of multiaddr, using a [libp2p distributed hash table](https://github.com/filecoin-project/go-filecoin/blob/master/networking.md) (DHT). -A client also creates a payment channel so the miner can be paid over time for storing the piece. -The miner responds with acceptance or otherwise. - -When proposing a deal, a client loads the piece data into its [IPFS block service](https://github.com/ipfs/go-blockservice). -This advertises the availability of the data to the network, making it available to miners. -A miner accepting a deal pulls the data from the client (or any other host) using the IPFS block service [bitswap protocol](https://github.com/ipfs/specs/tree/master/bitswap). - -A miner packs pieces from many clients into a sector, which is then sealed with a proof of replication (aka commitment). -Sealing is a computationally intensive process that can take tens of minutes. -A client polls the miner directly for deal status to monitor the piece being received, staged in a sector, the sector sealed, and the proof posted to the blockchain. -Once the sector commitment is on chain, the client can observe it directly. -A miner periodically computes proofs for all sealed sectors and posts on chain. -There is no on-chain mapping of pieces to sectors; a client must keep track of its own pieces. - -Note that the mechanisms for communication of deals and state are not specified in the protocol, except the format of messages and the eventual on-chain commitment. -Other mechanisms may be used. - -The storage [client commands](https://github.com/filecoin-project/go-filecoin/blob/master/commands/client.go) interface to a `go-filecoin` daemon in the same way as other node [commands](#commands). -Right now, a client must be running a full node, but that’s not in-principle necessary. -Future reorganisation will allow the extraction of a thin client binary. - -Data preparation is entirely the responsibility of the client, including breaking it up into appropriate chunks (<= sector size), compressing, -encrypting, adding error-correcting codes, and replicating widely enough to achieve redundancy goals. -In the future, we will build a client library which handles many of these tasks for a user. -We also plan support for "repair miners", to whom responsibility can be delegated for monitoring and repairing faults. - -### Retrieval -Retrieval mining is not necessarily linked to storage mining, although in practise we expect all storage miners to also run retrieval miners. -Retrieval miners serve requests to fetch content, and are not much more than a big cache and some logic to find missing pieces. - -The retrieval protocol and implementation are not yet fully developed. -At present (early 2019), retrieval is not charged for, and always causes unsealing of a sector. - ### Entry point There’s no centrally dispatched event loop. @@ -391,50 +267,11 @@ Protocols (goroutines) communicate through custom channels. This architecture needs more thought, but we are considering moving more inter-module communication to use iterators (c.f. those in Java). An event bus might also be a good pattern for some cases, though. -## Sector builder & proofs -A storage mining node commits to storage by cryptographically proving that it has stored a sector, a process known as sealing. -Proof of replication, or PoRep, is an operation which generates a unique copy (sealed sector) of a sector's original data, a SNARK proof, -and a set of commitments identifying the sealed sector and linking it to the corresponding unsealed sector. -The commitSector message, posted to the blockchain, includes these commitments (CommR, CommRStar, CommD) and the SNARK proof. - -A storage miner continually computes proofs over their sealed sectors and periodically posts a summary of their proofs on chain. -When a miner commits their first sector to the network (with `commitSector` message included in some block), a "proving period" begins. -A proving period is a window of time (a fixed number of blocks) in which the miner must generate a "proof of space-time", or PoSt, -in order to demonstrate to the network that they have not lost the sector which they have committed. -If the miner does not get a `submitPoSt` message included in a block during a proving period, it may be penalised ("slashed"). - -Storage and proofs are administered by the [sector builder](https://github.com/filecoin-project/go-filecoin/tree/master/proofs/sectorbuilder). -Most of the sector builder is implemented in Rust and invoked via a [FFI](https://github.com/filecoin-project/go-filecoin/blob/master/proofs/interface.go). -This code includes functionality to: -- write (and "preprocess") user piece-bytes to disk, -- schedule seal (proof-of-replication) jobs, -- schedule proof-of-spacetime generation jobs, -- schedule unseal (retrieval) jobs, -- verify proof-of-spacetime and proof-of-replication-verification, -- map between replica commitment and sealed sector-bytes, -- map between piece key (CID of user piece) and sealed sector-bytes. - -The Go `SectorBuilder` interface corresponds closely to the rust `SectorBuilder` struct. -Rust code is invoked directly (in-process) via Cgo. The sector builder’s lifecycle (in Rust) is controlled by go-filecoin. -Cgo functions like `C.CBytes` are used to move bytes across Cgo from Go to Rust; -Go allocates in the Rust heap through Cgo and then provides Rust with pointers from which it can reconstitute arrays, structs, and so forth. - -Sectors and sealed sectors (aka replicas) are just flat files on disk. -Sealing a sector is a destructive operation on the sector. -The process of sealing yields metadata such as the proof/commitment, which is stored is a separate metadata store, not within the replica file. - -We intend the sector builder interface to represent a service, abstracting away both policy (e.g. sector packing, scheduling of PoSt calculation) and implementation details. -In the future, we would like to able to interface to it via IPC/RPC as well as FFI. - -The sector builder and proofs code is written in Rust partly to ease use of the [Bellman zk-SNARK library](https://github.com/zkcrypto/bellman). -The PoRep and PoSt code is under active development. -PoRep is integrated with go-filecoin, while the PoST implementation and integration is still in progress (March 2019). - ### Building and distribution. The Rust code responsible for sectors and proofs is in the [rust-fil-proofs](https://github.com/filecoin-project/rust-fil-proofs) repo. -This repo is included in go-filecoin as a Git submodule. +This repo is included in venus as a Git submodule. The submodule refers to a specific repository SHA hash. -The `install-rust-proofs.sh` script, invoked by the `deps` build step of go-filecoin, builds the Rust proofs code and copies binary assets to locations hardcoded in Go interface code. +The `install-rust-proofs.sh` script, invoked by the `deps` build step of venus, builds the Rust proofs code and copies binary assets to locations hardcoded in Go interface code. As an alternative to compiling Rust code locally, the continuous integration server publishes an archive of precompiled binary objects with every successful build of the `rust-fil-proofs/master` branch. These releases are identified by the Git submodule SHA. This archive is pushed to the GitHub releases service. @@ -451,7 +288,7 @@ The build or tarball contains: ### Groth parameters The proving algorithms rely on a large binary parameter file known as the Groth parameters. -This file is stored in a cache directory, typically `/tmp/filecoin-proof-parameters`. +This file is stored in a cache directory, typically `/var/tmp/filecoin-proof-parameters`. When proofs code changes, the params may need to change. The `paramcache` program populates the Groth parameter directory by generating the parameters, a slow process (10s of minutes). @@ -461,28 +298,28 @@ The CIDs of the parameter files thus published must be pinned (made continuously The `paramfetch` program fetches params to local cache directory from IPFS gateway. The `install-rust-proofs.sh` script fetches or generates these Groth parameters as necessary when building `deps`. -Groth parameters in `/tmp/filecoin-proof-parameters` are accessed at go-filecoin runtime. +Groth parameters in `/var/tmp/filecoin-proof-parameters` are accessed at venus runtime. The parameters are identified by the `parameters.json` file from fil-rust-proofs, which includes a checksum. ### Proof mode configuration -For ease of development, go-filecoin can be configured to use a test proofs mode, which will cause storage miners to use sectors into which only 1016 bytes of user data can be written. +For ease of development, venus can be configured to use a test proofs mode, which will cause storage miners to use sectors into which only 1016 bytes of user data can be written. This lowers the computational burden of sealing and generating PoSts. The `genesis.car` in `fixtures/test/` is configured to use test proofs mode. ## Networking -Filecoin relies on [libp2p](https://libp2p.io/) for its networking needs. +Venus relies on [libp2p](https://libp2p.io/) for its networking needs. libp2p is a modular networking stack for the peer-to-peer era. It offers building blocks to tackle requirements such as -peer discovery, transport switching, multiplexing, content routing, NAT traversal, pubsub, circuit relay, etc., most of which Filecoin uses. +peer discovery, transport switching, multiplexing, content routing, NAT traversal, pubsub, circuit relay, etc., most of which Venus uses. Developers can compose these blocks easily to build the networking layer behind their P2P system. -A detailed overview of how Filecoin uses libp2p can be found in the [Networking doc](networking.md). +A detailed overview of how Venus uses libp2p can be found in the [Networking doc](networking.md). ## Filesystem storage -The *repo*, aka `fsrepo`, is a directory stored on disk containing all necessary information to run a `go-filecoin daemon`, typically at `$HOME/.filecoin`. +The *repo*, aka `fsrepo`, is a directory stored on disk containing all necessary information to run a `venus daemon`, typically at `$HOME/.venus`. The repo does not include client data stored by storage miners, which is held instead in the sector base. The repo does include a JSON config file with preferences on how the daemon should operate, several key value datastores holding data important to the internal services, @@ -490,75 +327,57 @@ and the keystore which holds private key data for encryption. ### JSON Config -The JSON config file is stored at `$HOME/.filecoin/config.json`, and can be easily edited using the `go-filecoin config` command. +The JSON config file is stored at `$HOME/.venus/config.json`, and can be easily edited using the `venus config` command. Users can also edit the file directly at their own peril. ### Datastores -The key-value datastores in the repo include persisted data from a variety of systems within Filecoin. +The key-value datastores in the repo include persisted data from a variety of systems within venus. Most of them hold CBOR encoded data keyed on CID, however this varies. -The key value stores include the badger, chain, deals, and wallet directories under `$HOME/.filecoin`. +The key value stores include the badger, chain, deals, and wallet directories under `$HOME/.venus`. The purpose of these directories is: - _Badger_ is a general purpose datastore currently only holding the genesis key, but in the future, almost all our datastores should be merged into this one. - _Chain_ is where the local copy of the blockchain is stored. -- _Deals_ is where the miner and client store persisted information on open deals for data storage, -essentially who is storing what data, for what fee and which sectors have been sealed. -- _Wallet_ is where the user’s Filecoin wallet information is stored. +- _Wallet_ is where the user’s Venus wallet information is stored. ### Keystore The keystore contains the binary encoded peer key for interacting securely over the network. -This data lives in a file at `$HOME/.filecoin/keystore/self`. +This data lives in a file at `$HOME/.venus/keystore/self`. ## Testing -The `go-filecoin` codebase has a few different testing mechanisms: +The `venus` codebase has a few different testing mechanisms: unit tests, in-process integration tests, "daemon" integration tests, and a couple of high level functional tests. Many parts of code have good unit tests. We’d like all parts to have unit tests, but in some places it hasn’t been possible where prototype code omitted testability features. -Functionality on the `Node` object is a prime example, which we are [moving away from](#plumbing-and-porcelain). Historically there has been a prevalence of integration-type testing. Relying only on integration tests can make it hard to verify small changes to internals. We’re driving towards both wide unit test coverage, with integration tests to verifying end-to-end behaviour. -There are two patterns for unit tests. -In plumbing and low level components, many tests use real dependencies (or at least in-memory versions of them). -For higher level components like porcelain or protocols, where dependencies are more complex to set up, -we often use fake implementations of just those parts of the plumbing that are required. -It is a goal to have both unit tests (with fakes or real deps), as well as higher level integration-style tests. - Code generally uses simple manual dependency injection. A component that takes a large number of deps at construction can have them factored into a struct. -A module should often (re-)declare a narrow subset of the interfaces it depends on (see [Consumer-defined interfaces](#consumer-defined-interfaces))), in canonical Go style. -Some [node integration tests](https://github.com/filecoin-project/go-filecoin/blob/master/node/node_test.go) start one or more full nodes in-process. +Some [node integration tests](https://github.com/filecoin-project/venus/blob/master/app/node/test/node.go) start one or more full nodes in-process. This is useful for fine-grained control over the node being tested. Setup for these tests is a bit difficult and we aim to make it much easier to instantiate and test full nodes in-process. -Daemon tests are end-to-end integration tests that exercise the command interface of the `go-filecoin` binary. -These execute separate `go-filecoin` processes and drive them via the command line. -These tests are mostly under the [`commands`](https://github.com/filecoin-project/go-filecoin/blob/master/commands) package, -and use [TestDaemon](https://github.com/filecoin-project/go-filecoin/blob/master/testhelpers/commands.go). -Because the test and the daemon being tested are in separate processes, getting access to the daemon process’s output streams or attaching a debugger is tricky; -see comments in [createNewProcess][(https://github.com/filecoin-project/go-filecoin/blob/726e6705860ddfc8ca4e55bc3610ad2230a95c0c/testhelpers/commands.go#L849) - -In daemon tests it is important to remember that messages do not have any effect on chain state until they are mined into a block. -Preparing an actor in order to receive messages and mutate state requires some delicate network set-up, mining messages into a block to create the actor before it can receive messages. -See `MineOnce` in [`mining/scheduler`](https://github.com/filecoin-project/go-filecoin/blob/master/mining/scheduler.go) which synchronously performs a round of block mining and then stops, pushing the test state forward. +Daemon tests are end-to-end integration tests that exercise the command interface of the `venus` binary. +These execute separate `venus` processes and drive them via the command line. +These tests are mostly under the [`commands`](https://github.com/filecoin-project/venus/blob/master/cmd) package, The `functional-tests` directory contains some Go and Bash scripts which perform complicated multi-node tests on our continuous build. These are not daemon tests, but run separately. Some packages have a `testing.go` file with helpers for setting up tests involving that package’s types. -The [`types/testing.go`](https://github.com/filecoin-project/go-filecoin/blob/master/types/testing.go) file has some more generally useful constructors. -There is also a top-level [`testhelpers`](https://github.com/filecoin-project/go-filecoin/blob/master/testhelpers) package with higher level helpers, often used by daemon tests. +There is also a top-level [`testhelpers`](https://github.com/filecoin-project/venus/blob/master/pkg/testhelpers) package with higher level helpers, often used by daemon tests. -We’re in process of creating the Filecoin Automation and Systems Toolkit (FAST) [library](https://github.com/filecoin-project/go-filecoin/tree/master/tools/fast). -The goal of this is to unify duplicated code paths which bootstrap and drive `go-filecoin` daemons for daemon tests, functional tests, +We’re in process of creating the venus Automation and Systems Toolkit (FAST) [library](https://github.com/filecoin-project/venus/tree/master/tools/fast). +The goal of this is to unify duplicated code paths which bootstrap and drive `venus` daemons for daemon tests, functional tests, and network deployment verification, providing a common library for filecoin automation in Go. Tests are typically run with `go run ./build/*.go test`. @@ -584,76 +403,29 @@ By default functional tests are disabled when issuing the `go test` command. To enable pass `-functional`. A functional test is an extensive multi-node orchestration or resource-intensive test that may take minutes to run. -##### Sector Builder Tests (`-sectorbuilder`) -By default sectorbuilder tests are disabled when issuing the `go test` command. -To enable pass `-sectorbuilder`. -A sectorbuilder test is a resource-intensive test that may take minutes to run. - ## Dependencies -Dependencies in go-filecoin are managed as go modules, go's new dependency system. +Dependencies in venus are managed as go modules, go's new dependency system. -If you've cloned go-filecoin into your GOPATH, you may need to set the `GO111MODULES` environment variable to `on`. The build system automatically sets this but your favorite editor or IDE may not work without it. +If you've cloned venus into your GOPATH, you may need to set the `GO111MODULES` environment variable to `on`. The build system automatically sets this but your favorite editor or IDE may not work without it. ## Patterns The project makes heavy use of or is moving towards a few key design patterns, explained here. -### Plumbing and porcelain - -The _plumbing and porcelain_ pattern is [borrowed from Git](https://git-scm.com/book/en/v2/Git-Internals-Plumbing-and-Porcelain). -Plumbing and porcelain form the API to the internal [core services](#core-services), and will replace the `api` package. - -*Plumbing* is the small set of public building blocks of queries and operations that protocols, clients and humans want to use with a Filecoin node. -These are things like `MessageSend`, `GetHead`, `GetBlockTime`, etc. -By fundamental, we mean that it doesn't make sense to expose anything lower level. -The bar for adding new plumbing is high. -It is very important, for testing and sanity, that plumbing methods be implemented in terms of their narrowest actual dependencies on core services, -and that they not depend on Node or another god object. - -The plumbing API is defined by its implementation in [plumbing/api.go](https://github.com/filecoin-project/go-filecoin/blob/master/plumbing/api.go). -Consumers of plumbing (re-)define the subset of plumbing on which they depend, which is an idiomatic Go pattern (see below). -Implementations of plumbing live in their own concisely named packages under [plumbing](https://github.com/filecoin-project/go-filecoin/tree/master/plumbing). - -*Porcelain* are calls on top of the plumbing API. -A porcelain call is a useful composition of plumbing calls and is implemented in terms of calls to plumbing. -An example of a porcelain call is `CreateMiner == MessageSend + MessageWait + ConfigUpdate`. -The bar is low for creation of porcelain calls. -Porcelain calls should define the subset of the plumbing interface on which they depend for ease of testing. - -Porcelain lives in a single [porcelain](https://github.com/filecoin-project/go-filecoin/blob/master/porcelain/) package. -Porcelain calls are free functions that take the plumbing interface as an argument. -The call defines the subset of the plumbing interface that it needs, which can be easily faked in testing. - -We are in the [process of refactoring](https://github.com/filecoin-project/go-filecoin/issues/1469) all protocols to depend only on porcelain, -plumbing and other core APIs, instead of on the Node. - -### Consumer-defined interfaces - -Go interfaces generally belong in the package that *uses* values of the interface type, not the package that implements those values. -This embraces [Postel's law](https://en.wikipedia.org/wiki/Robustness_principle), -reducing direct dependencies between packages and making them easier to test. -It isolates small changes to small parts of the code. - -Note that this is quite different to the more common pattern in object-oriented languages, where interfaces are defined near their implementations. -Our implementation of [plumbing and porcelain](#plumbing-and-porcelain) embraces this pattern, and we are adopting it more broadly. - -This idiom is unfortunately hidden away in a [wiki page about code review](https://github.com/golang/go/wiki/CodeReviewComments#interfaces). -See also Dave Cheney on [SOLID Go Design](https://dave.cheney.net/2016/08/20/solid-go-design). - ### Observability -go-filecoin uses [Opencensus-go](https://github.com/census-instrumentation/opencensus-go) for stats collection and distributed tracing instrumentation. -Stats are exported for consumption via [Prometheus](https://prometheus.io/) and traces are exported for consumption via [Jaeger](https://www.jaegertracing.io/docs/1.11/). +venus uses [Opencensus-go](https://github.com/census-instrumentation/opencensus-go) for stats collection and distributed tracing instrumentation. +Stats are exported for consumption via [Prometheus](https://prometheus.io/). #### Metrics -go-filecoin can be configured to collect and export metrics to Prometheus via the `MetricsConfig`. -The details of this can be found inside the [`config/`](https://godoc.org/github.com/filecoin-project/go-filecoin/internal/pkg/config#ObservabilityConfig) package. +venus can be configured to collect and export metrics to Prometheus via the `MetricsConfig`. +The details of this can be found inside the [`config/`](https://pkg.go.dev/github.com/filecoin-project/venus/pkg/config#ObservabilityConfig) package. To view metrics from your filecoin node using the default configuration options set the `prometheusEnabled` value to `true`, start the filecoin daemon, then visit `localhost:9400/metrics` in your web-browser. #### Tracing -go-filecoin can be configured to collect and export traces to Jaeger via the `TraceConfig`. -The details of this can be found inside the [`config/`](https://godoc.org/github.com/filecoin-project/go-filecoin/internal/pkg/config#ObservabilityConfig) package. -To collect traces from your filecoin node using the default configuration options set the `jaegerTracingEnabled` value to `true`, start the filecoin daemon, then follow the [Jaeger Getting](https://www.jaegertracing.io/docs/1.11/getting-started/#all-in-one) started guide. +venus can be configured to collect and export traces to Jaeger via the `TraceConfig`. +The details of this can be found inside the [`config/`](https://pkg.go.dev/github.com/filecoin-project/venus/pkg/config#ObservabilityConfig) package. +To collect traces from your venus node using the default configuration options set the `jaegerTracingEnabled` value to `true`, start the venus daemon, then follow the [Jaeger Getting](https://www.jaegertracing.io/docs/1.11/getting-started/#all-in-one) started guide. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4a09642216..eccca44ca8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ The following is a set of guidelines for contributing to the Filecoin Project. Feel free to propose changes, as this is a living document. -Filecoin, including go-filecoin and all related modules, follows the +Filecoin, including venus and all related modules, follows the [Filecoin Code of Conduct](CODE_OF_CONDUCT.md). **Table Of Contents** @@ -48,9 +48,9 @@ Filecoin, including go-filecoin and all related modules, follows the ## How can I contribute? -Here at `go-filecoin`, there’s always a lot of work to do. There are many ways you can support the project, from progamming, writing, organizing, and more. Consider these as starting points: +Here at `venus`, there’s always a lot of work to do. There are many ways you can support the project, from progamming, writing, organizing, and more. Consider these as starting points: -- **Submit bugs**: Perform a cursory [search](https://github.com/filecoin-project/go-filecoin/issues) to see if the problem has already been reported. If it does exist, add a 👍 to the issue to indicate this is also an issue for you, and add a comment if there is extra information you can contribute. If it does not exist, [create a new issue](https://github.com/filecoin-project/go-filecoin/issues/new/choose) (using the Bug report template). +- **Submit bugs**: Perform a cursory [search](https://github.com/filecoin-project/venus/issues) to see if the problem has already been reported. If it does exist, add a 👍 to the issue to indicate this is also an issue for you, and add a comment if there is extra information you can contribute. If it does not exist, [create a new issue](https://github.com/filecoin-project/venus/issues/new/choose) (using the Bug report template). - **Write code:** Once you've read this contributing guide, check out [Good First Issues](#good-first-issues) for well-prepared starter issues. @@ -62,7 +62,7 @@ Here at `go-filecoin`, there’s always a lot of work to do. There are many ways ## What should I know before getting started? -Check out the [Go-Filecoin code overview](CODEWALK.md) for a brief tour of the code. +Check out the [venus code overview](CODEWALK.md) for a brief tour of the code. ### Design Before Code - Write down design intent before writing code, and subject it to constructive feedback. @@ -126,7 +126,7 @@ ZenHub adds some useful project management overlay data to GitHub issues. ### Good First Issues -Ready to begin? Here are well-prepared starter issues ([E-good-first-issue](https://github.com/filecoin-project/go-filecoin/issues?q=is%3Aopen+is%3Aissue+label%3AE-good-first-issue)) for your coding pleasure. They have clear problem statements, pointers to the right areas of the code base, and clear acceptance criteria. +Ready to begin? Here are well-prepared starter issues ([E-good-first-issue](https://github.com/filecoin-project/venus/issues?q=is%3Aopen+is%3Aissue+label%3AE-good-first-issue)) for your coding pleasure. They have clear problem statements, pointers to the right areas of the code base, and clear acceptance criteria. To pick up an issue: @@ -135,7 +135,7 @@ To pick up an issue: 3. For issues labeled `PROTOCOL BREAKING` see [the spec section](#the-spec) for additional instructions. 4. **Create a PR** with your changes, following the [Pull Request and Code Review guidelines](). -For continued adventures, search for issues with the label [E-help-wanted](https://github.com/filecoin-project/go-filecoin/issues?q=is%3Aopen+is%3Aissue+label%3AE-help-wanted). These are slightly thornier problems that are also reasonably well-prepared. +For continued adventures, search for issues with the label [E-help-wanted](https://github.com/filecoin-project/venus/issues?q=is%3Aopen+is%3Aissue+label%3AE-help-wanted). These are slightly thornier problems that are also reasonably well-prepared. ### Pipelines (ZenHub) We use ZenHub pipelines to track the flow of work on open issues. @@ -161,7 +161,7 @@ Labels are used inclusively, to aid discovery. An issue may have multiple labels; not all issues are expected to have labels. A label is never expected to be "done" (stable state of no open issues). -Labels mark [dimensions including](https://github.com/filecoin-project/go-filecoin/issues/labels): +Labels mark [dimensions including](https://github.com/filecoin-project/venus/issues/labels): - *Area* (name prefixed with `A-`): an area of code functionality - *Category* (name prefixed with `C-`): type of issue, e.g. bug, tech debt, ux - *Engagement* (name prefixed with `E-`): issues suitable for broader community involvement @@ -176,7 +176,7 @@ Epics support focus and forecasting through exclusion or inclusion of (ir-)relev We will typically scope an epic to a deliverable targeting a particular release, so most epics should live less than six weeks. ### Releases (ZenHub) -Release tags identify issues targeted to or blocking a particular go-filecoin software release. +Release tags identify issues targeted to or blocking a particular venus software release. Release tags are forward-looking and support forecasting and focus though inclusion or exclusion of issues. At present (April 2019) we aim for a time-based release roughly every six weeks. ZenHub release tags can span multiple repositories. @@ -188,7 +188,7 @@ Milestones support short time-based cycles such as sprints. Milestones are repo- ## Roles -There are four main roles for people participating in `go-filecoin`. Each has a specific set of abilities and responsibilities: Contributors, Collaborators, Committers, and Maintainers. +There are four main roles for people participating in `venus`. Each has a specific set of abilities and responsibilities: Contributors, Collaborators, Committers, and Maintainers. ### Contributors @@ -253,7 +253,7 @@ Abilities: * Manage issues * Merge PRs -**Becoming a committer or maintainer:** Anyone can nominate someone for committership or maintainership by filing an [issue](https://github.com/filecoin-project/go-filecoin/issues) pointing to evidence that the candidate (1) meets the definition and (2) is already performing the responsibilities described in Roles. Existing maintainers must unanimously approve the new candidate. Removing a committer or maintainer requires either self-nomination, or confirmation by at least 66% of existing maintainers. +**Becoming a committer or maintainer:** Anyone can nominate someone for committership or maintainership by filing an [issue](https://github.com/filecoin-project/venus/issues) pointing to evidence that the candidate (1) meets the definition and (2) is already performing the responsibilities described in Roles. Existing maintainers must unanimously approve the new candidate. Removing a committer or maintainer requires either self-nomination, or confirmation by at least 66% of existing maintainers. ## Additional Developer Notes @@ -292,7 +292,7 @@ import ( [external packages] - [go-filecoin packages] + [venus packages] ) ``` @@ -310,8 +310,8 @@ import ( ipld "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/assert" - "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" + "github.com/filecoin-project/venus/internal/pkg/testhelpers" + "github.com/filecoin-project/venus/internal/pkg/types" ) ``` diff --git a/LICENSE-MIT b/LICENSE-MIT index b2616875ef..eeeac64472 100644 --- a/LICENSE-MIT +++ b/LICENSE-MIT @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2018 Filecoin Project +Copyright (c) 2021 Filecoin Project Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile index 382443f9ca..ddbd33c7d0 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,130 @@ -all: - go run ./build/*.go build +export CGO_CFLAGS_ALLOW=-D__BLST_PORTABLE__ +export CGO_CFLAGS=-D__BLST_PORTABLE__ -deps: - go run ./build/*.go smartdeps +all: build +.PHONY: all -# WARNING THIS BUILDS A GO PLUGIN AND PLUGINS *DO NOT* WORK ON WINDOWS SYSTEMS -iptb: - make -C tools/iptb-plugins all +## variables + +# git modules that need to be loaded +MODULES:= + +ldflags=-X=github.com/filecoin-project/venus/pkg/constants.CurrentCommit=+git.$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)) +ifneq ($(strip $(LDFLAGS)),) + ldflags+=-extldflags=$(LDFLAGS) +endif + +GOFLAGS+=-ldflags="$(ldflags)" + +## FFI + +FFI_PATH:=extern/filecoin-ffi/ +FFI_DEPS:=.install-filcrypto +FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS)) + +$(FFI_DEPS): build-dep/.filecoin-install ; + +build-dep/.filecoin-install: $(FFI_PATH) + $(MAKE) -C $(FFI_PATH) $(FFI_DEPS:$(FFI_PATH)%=%) + @touch $@ + +MODULES+=$(FFI_PATH) +BUILD_DEPS+=build-dep/.filecoin-install +CLEAN+=build-dep/.filecoin-install + +## modules +build-dep: + mkdir $@ + +$(MODULES): build-dep/.update-modules; +# dummy file that marks the last time modules were updated +build-dep/.update-modules: build-dep; + git submodule update --init --recursive + touch $@ + +gen-all: cborgen gogen inline-gen api-gen bundle-gen state-type-gen + +### devtool ### +cborgen: + cd venus-devtool && go run ./cborgen/*.go + +gogen: + cd venus-shared && go generate ./... + +inline-gen: + cd venus-devtool && go run ./inline-gen/main.go ../ ./inline-gen/inlinegen-data.json + +test-venus-shared: + cd venus-shared && go test -covermode=set ./... + +bundle-gen: + cd venus-devtool && go run ./bundle-gen/*.go --dst ./../venus-shared/actors/builtin_actors_gen.go + +state-type-gen: + cd venus-devtool && go run ./state-type-gen/*.go --dst ./../venus-shared/types/state_types_gen.go + +api-gen: + find ./venus-shared/api/ -name 'client_gen.go' -delete + find ./venus-shared/api/ -name 'proxy_gen.go' -delete + cd ./venus-devtool/ && go run ./api-gen/ proxy + cd ./venus-devtool/ && go run ./api-gen/ client + cd ./venus-devtool/ && go run ./api-gen/ doc + cd ./venus-devtool/ && go run ./api-gen/ mock + +compatible-all: compatible-api compatible-actor + +compatible-api: api-checksum api-diff api-perm + +api-checksum: + cd venus-devtool && go run ./compatible/apis/*.go checksum > ../venus-shared/compatible-checks/api-checksum.txt + +api-diff: + cd venus-devtool && go run ./compatible/apis/*.go diff > ../venus-shared/compatible-checks/api-diff.txt + +api-perm: + cd venus-devtool && go run ./compatible/apis/*.go perm > ../venus-shared/compatible-checks/api-perm.txt + +compatible-actor: actor-templates actor-sources actor-render + +actor-templates: + cd venus-devtool && go run ./compatible/actors/*.go templates --dst ../venus-shared/actors/ > ../venus-shared/compatible-checks/actor-templates.txt + +actor-sources: + cd venus-devtool && go run ./compatible/actors/*.go sources > ../venus-shared/compatible-checks/actor-sources.txt + +actor-render: + cd venus-devtool && go run ./compatible/actors/*.go render ../venus-shared/actors/ + +actor-replica: + cd venus-devtool && go run ./compatible/actors/*.go replica --dst ../venus-shared/actors/ + +test:test-venus-shared + go build -o genesis-file-server ./tools/genesis-file-server + go build -o gengen ./tools/gengen + ./gengen --keypath ./fixtures/live --out-car ./fixtures/live/genesis.car --out-json ./fixtures/live/gen.json --config ./fixtures/setup.json + ./gengen --keypath ./fixtures/test --out-car ./fixtures/test/genesis.car --out-json ./fixtures/test/gen.json --config ./fixtures/setup.json + go test $$(go list ./... | grep -v /venus-shared/) -timeout=30m -v -integration=true -unit=false + go test $$(go list ./... | grep -v /venus-shared/) -timeout=30m -v -integration=false -unit=true + +lint: $(BUILD_DEPS) + golangci-lint run + +deps: $(BUILD_DEPS) + +dist-clean: + git clean -xdff + git submodule deinit --all -f + +build: $(BUILD_DEPS) + rm -f venus + go build -o ./venus $(GOFLAGS) . + + +.PHONY: docker + + +TAG:=test +docker: $(BUILD_DEPS) + curl -O https://raw.githubusercontent.com/filecoin-project/venus-docs/master/script/docker/dockerfile + docker build --build-arg https_proxy=$(BUILD_DOCKER_PROXY) --build-arg BUILD_TARGET=venus -t venus . + docker tag venus:latest filvenus/venus:$(TAG) diff --git a/README.md b/README.md index 4cd1a66c70..8d0002a88f 100644 --- a/README.md +++ b/README.md @@ -1,213 +1,55 @@ -# Filecoin (go-filecoin) +

+ + Project Venus Logo + +

-[![CircleCI](https://circleci.com/gh/filecoin-project/go-filecoin.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-filecoin) -[![User Devnet Release](https://img.shields.io/endpoint.svg?color=brightgreen&style=flat&logo=GitHub&url=https://raw.githubusercontent.com/filecoin-project/go-filecoin-badges/master/user-devnet.json)](https://github.com/filecoin-project/go-filecoin/releases/latest) -[![Nightly Devnet Release](https://img.shields.io/endpoint.svg?color=blue&style=flat&logo=GitHub&url=https://raw.githubusercontent.com/filecoin-project/go-filecoin-badges/master/nightly-devnet.json)](https://github.com/filecoin-project/go-filecoin/releases) -[![Staging Devnet Release](https://img.shields.io/endpoint.svg?color=brightgreen&style=flat&logo=GitHub&url=https://raw.githubusercontent.com/filecoin-project/go-filecoin-badges/master/staging-devnet.json)](https://github.com/filecoin-project/go-filecoin/releases) -Go-filecoin is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, checkout the [Filecoin Spec](https://spec.filecoin.io). -Go-filecoin was the first Filecoin implementation originially initiated and developed by Protocol Labs, and now is maintained by the Filecoin community. See [maintenance](#maintenance) for more information. +

Project Venus - 启明星

-__Questions or problems with go-filecoin? [Ask the community first](#community)__. Your problem may already be solved. +

+ + + + +
+

-## Table of Contents - - - +Venus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://spec.filecoin.io). -- [What is Filecoin?](#what-is-filecoin) -- [Install](#install) - - [System Requirements](#system-requirements) - - [Install from Source](#install-from-source) - - [Install Go](#install-go) - - [Install Dependencies](#install-dependencies) - - [Build and run tests](#build-and-run-tests) -- [Usage](#usage) - - [Advanced usage](#advanced-usage) - - [Setting up a localnet](#setting-up-a-localnet) -- [Contributing](#contributing) -- [Community](#community) -- [License](#license) +## Building & Documentation - +For instructions on how to build, install and join a venus storage pool, please visit [here](https://venus.filecoin.io/intro/). -## What is Filecoin? -Filecoin is a decentralized storage network that turns the world’s unused storage into an algorithmic market, creating a permanent, decentralized future for the web. -**Miners** earn the native protocol token (also called “Filecoin”) by providing data storage and/or retrieval. -**Clients** pay miners to store or distribute data and to retrieve it. -Check out the [Filecoin website](https://filecoin.io/) and [Filecoin Documentation](https://docs.filecoin.io/) for more. +## Venus architecture -## Install +With key features like security, ease of use and distributed storage pool, the deployment of a node using Venus is quite different from the one using [Lotus](https://github.com/filecoin-project/lotus). Details of mining architecture can be found [here](https://venus.filecoin.io/intro/#how-venus-works). -👋 Welcome to Go-Filecoin! +## Related modules -This README outlines the basics for building and running Go-filecoin. -**For more background, configuration, and troubleshooting information check out the [Go-filecoin Docs](https://go.filecoin.io/)**. +Venus loosely describes a collection of modules that work together to realize a fully featured Filecoin implementation. List of stand-alone venus modules repos can be found [here](https://venus.filecoin.io/intro/#how-venus-works), each assuming different roles in the functioning of Filecoin. -### System Requirements +## Contribute -Go-filecoin can build and run on most Linux and MacOS systems. -Windows is not yet supported. +Venus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations: -A validating node can run on most systems with at least 8GB of RAM. -A mining node requires significant RAM and GPU resources, depending on the sector configuration to be used. +1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs). +2. If the change is complex and requires prior discussion, [open an issue](https://github.com/filecoin-project/venus/issues) or a [discussion](https://github.com/filecoin-project/venus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted. +3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn. -### Install from Source on MacOS -```sh -curl -fsSL https://raw.githubusercontent.com/filecoin-project/go-filecoin/master/scripts/build/mac-build.sh | bash -``` -Note: macOS users may need to update their git config with `git config --global core.autocrlf input` +When implementing a change: -### Install from Source - -Clone this git repository to your machine: - -```sh -mkdir -p /path/to/filecoin-project -git clone https://github.com/filecoin-project/go-filecoin.git /path/to/filecoin-project/go-filecoin -``` - -#### Install Go - -The build process for go-filecoin requires [Go](https://golang.org/doc/install) >= v1.13 - -> Installing Go for the first time? We recommend [this tutorial](https://www.ardanlabs.com/blog/2016/05/installing-go-and-your-workspace.html) which includes environment setup. - -Due to our use of `cgo`, you'll need a C compiler to build go-filecoin whether you're using a prebuilt library or building it yourself from source. -If you want to use `gcc` (e.g. `export CC=gcc`) when building go-filecoin, you will need to use v7.4.0 or higher. - -The build process will download a static library containing the [Filecoin proofs implementation](https://github.com/filecoin-project/rust-fil-proofs) (which is written in Rust). - -> If instead you wish to build proofs from source, you'll need (1) Rust development environment and (2) to set the environment variable `FFI_BUILD_FROM_SOURCE=1`. -More info at [filecoin-ffi](https://github.com/filecoin-project/filecoin-ffi). - -#### Install Dependencies - -First, load all the Git submodules. - -```sh -git submodule update --init --recursive -``` - -Initialize build dependencies. - -```sh -make deps -``` - -Note: The first time you run `deps` can be **slow** as very large parameter files are either downloaded or generated locally in `/var/tmp/filecoin-proof-parameters`. -Have patience; future runs will be faster. - -#### Build and run tests - -```sh -# First, build the binary -make - -# Then, run the unit tests. -go run ./build test - -# Build and test can be combined! -go run ./build best -``` - -Other handy build commands include: - -```sh -# Check the code for style and correctness issues -go run ./build lint - -# Run different categories of tests by toggling their flags -go run ./build test -unit=false -integration=true -functional=true - -# Test with a coverage report -go run ./build test -cover - -# Test with Go's race-condition instrumentation and warnings (see https://blog.golang.org/race-detector) -go run ./build test -race - -# Deps, Lint, Build, Test (any args will be passed to `test`) -go run ./build all -``` - -Note: Any flag passed to `go run ./build test` (e.g. `-cover`) will be passed on to `go test`. - -If you have **problems with the build**, please consult the [Troubleshooting](https://go.filecoin.io/go-filecoin-tutorial/Troubleshooting-&-FAQ.html) section of the [Go-filecoin Documentation](https://go.filecoin.io/). - -## Usage - -For a complete step-by-step tutorial, see [Getting Started](https://go.filecoin.io/go-filecoin-tutorial/Getting-Started.html). - -#### Quick start: - -```sh -# Remove any existing symlink to a repo directory -rm ~/.filecoin - -# Initialize a new repository, downloading a genesis file and setting network parameters (in this case, for the Testnet network) -./go-filecoin init --genesisfile=https://ipfs.io/ipfs/QmXZQeezX1x8uRQX9EUaYxnyivUpTfJqQTvszk3c8SnFPN/testnet.car --network=testnet - -# Start the daemon. It will block until it connects to at least one bootstrap peer. -./go-filecoin daemon -``` - -Your node should now be connected to some peers, and begin downloading and validating the blockchain. - -Open a new terminal to interact with your node: - -```sh -# Print the node's connection information -./go-filecoin id - -# Show chain sync status -./go-filecoin chain status -``` - -To see a full list of commands, run `./go-filecoin --help`. - -### Advanced usage - -#### Setting up a localnet - -The localnet FAST binary tool allows users to quickly and easily setup a local network on the users computer. -Please refer to the [localnet README](https://github.com/filecoin-project/go-filecoin/tree/master/tools/fast/bin/localnet#localnet) for more information. -The localnet tool is only compatible when built from the same git ref as the targeted `go-filecoin` binary. - -## Contributing - -We ❤️ all our contributors; this project wouldn’t be what it is without you! If you want to help out, please see [CONTRIBUTING.md](CONTRIBUTING.md). - -Check out the [Go-Filecoin code overview](CODEWALK.md) for a brief tour of the code. - -## Community - -Here are a few places to get help and connect with the Filecoin community: -- [Go-filecoin Documentation](http://go.filecoin.io/) — for tutorials, troubleshooting, and FAQs -- The `#fil-go-filecoin` channel on [Filecoin Project Slack](https://filecoinproject.slack.com/messages/CEHHJNJS3/) or [Matrix/Riot](https://riot.im/app/#/room/#fil-dev:matrix.org) - for live help and some dev discussions -- [Filecoin Community Forum](https://discuss.filecoin.io) - for talking about design decisions, use cases, implementation advice, and longer-running conversations -- [GitHub issues](https://github.com/filecoin-project/go-filecoin/issues) - to report bugs, and view or contribute to ongoing development. -- [Filecoin Specification](https://github.com/filecoin-project/specs) - how Filecoin is supposed to work - -Looking for even more? See the full rundown at [filecoin-project/community](https://github.com/filecoin-project/community). - -## Maintenance - -Venus (previously called `go-filecoin`) is now maintained by [IPFS-Force Community](https://github.com/ipfs-force-community) - -Maintainers: @steven004, @diwufeiwen, @hunjixin, @felix00000 - -This repo is open for anyone to submit issues and PRs. +1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`. +2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc. +3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go. +4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers. +5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted). +6. Add tests. +7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description. +8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message. ## License -The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms: - -- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/filecoin-project/go-filecoin/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](https://github.com/filecoin-project/go-filecoin/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT) +This project is dual-licensed under [Apache 2.0](https://github.com/filecoin-project/venus/blob/master/LICENSE-APACHE) and [MIT](https://github.com/filecoin-project/venus/blob/master/LICENSE-MIT). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..f288f71d37 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +forked from lotus + +## Reporting a Vulnerability + +For reporting security vulnerabilities/bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md. Security vulnerabilities should be reported via our [Vulnerability Reporting channels](https://github.com/filecoin-project/community/blob/master/SECURITY.md#vulnerability-reporting) and will be eligible for a [Bug Bounty](https://security.filecoin.io/bug-bounty/). + +Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report. + +Here are some examples of bugs we would consider to be security vulnerabilities: + +* If you can spend from a `multisig` wallet you do not control the keys for. +* If you can cause a miner to be slashed without them actually misbehaving. +* If you can maintain power without submitting windowed posts regularly. +* If you can craft a message that causes lotus nodes to panic. +* If you can cause your miner to win significantly more blocks than it should. +* If you can craft a message that causes a persistent fork in the network. +* If you can cause the total amount of Filecoin in the network to no longer be 2 billion. + +This is not an exhaustive list, but should provide some idea of what we consider as a security vulnerability, . + +## Reporting a non security bug + +For non-security bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/venus/issues/new?template=bug_report.md). diff --git a/app/node/builder.go b/app/node/builder.go new file mode 100644 index 0000000000..e662ac2660 --- /dev/null +++ b/app/node/builder.go @@ -0,0 +1,222 @@ +package node + +import ( + "context" + "fmt" + "time" + + "github.com/filecoin-project/venus-auth/jwtclient" + "github.com/filecoin-project/venus/app/submodule/dagservice" + "github.com/filecoin-project/venus/app/submodule/network" + + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/app/submodule/blockstore" + "github.com/filecoin-project/venus/app/submodule/chain" + "github.com/filecoin-project/venus/app/submodule/common" + config2 "github.com/filecoin-project/venus/app/submodule/config" + "github.com/filecoin-project/venus/app/submodule/market" + "github.com/filecoin-project/venus/app/submodule/mining" + "github.com/filecoin-project/venus/app/submodule/mpool" + "github.com/filecoin-project/venus/app/submodule/multisig" + "github.com/filecoin-project/venus/app/submodule/paych" + "github.com/filecoin-project/venus/app/submodule/storagenetworking" + "github.com/filecoin-project/venus/app/submodule/syncer" + "github.com/filecoin-project/venus/app/submodule/wallet" + chain2 "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/journal" + "github.com/filecoin-project/venus/pkg/paychmgr" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs-force-community/metrics/ratelimit" +) + +// Builder is a helper to aid in the construction of a filecoin node. +type Builder struct { + blockTime time.Duration + libp2pOpts []libp2p.Option + offlineMode bool + verifier ffiwrapper.Verifier + propDelay time.Duration + repo repo.Repo + journal journal.Journal + isRelay bool + chainClock clock.ChainEpochClock + genBlk types.BlockHeader + walletPassword []byte + authURL string +} + +// New creates a new node. +func New(ctx context.Context, opts ...BuilderOpt) (*Node, error) { + // initialize builder and set base values + n := &Builder{ + offlineMode: false, + blockTime: clock.DefaultEpochDuration, + verifier: impl.ProofVerifier, + } + // apply builder options + for _, o := range opts { + if err := o(n); err != nil { + return nil, err + } + } + + // build the node + return n.build(ctx) +} + +func (b *Builder) build(ctx context.Context) (*Node, error) { + // + // Set default values on un-initialized fields + // + if b.repo == nil { + b.repo = repo.NewInMemoryRepo() + } + + var err error + if b.journal == nil { + b.journal = journal.NewNoopJournal() + } + + // fetch genesis block id + b.genBlk, err = readGenesisCid(ctx, b.repo.ChainDatastore(), b.repo.Datastore()) + if err != nil { + return nil, err + } + + if b.chainClock == nil { + // get the genesis block time from the chainsubmodule + b.chainClock = clock.NewChainClock(b.genBlk.Timestamp, b.blockTime) + } + + // create the node + nd := &Node{ + offlineMode: b.offlineMode, + repo: b.repo, + chainClock: b.chainClock, + } + + // modules + nd.circulatiingSupplyCalculator = chain2.NewCirculatingSupplyCalculator(b.repo.Datastore(), b.genBlk.ParentStateRoot, b.repo.Config().NetworkParams.ForkUpgradeParam) + + // services + nd.configModule = config2.NewConfigModule(b.repo) + + nd.blockstore, err = blockstore.NewBlockstoreSubmodule(ctx, (*builder)(b)) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.blockstore") + } + + nd.chain, err = chain.NewChainSubmodule(ctx, (*builder)(b), nd.circulatiingSupplyCalculator) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.Chain") + } + + nd.network, err = network.NewNetworkSubmodule(ctx, nd.chain.ChainReader, nd.chain.MessageStore, (*builder)(b)) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.Network") + } + + nd.blockservice, err = dagservice.NewDagserviceSubmodule(ctx, (*builder)(b), nd.network) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.dagservice") + } + + nd.syncer, err = syncer.NewSyncerSubmodule(ctx, (*builder)(b), nd.blockstore, nd.network, nd.chain, nd.circulatiingSupplyCalculator) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.Syncer") + } + + nd.wallet, err = wallet.NewWalletSubmodule(ctx, b.repo, nd.configModule, nd.chain, b.walletPassword) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.wallet") + } + + nd.mpool, err = mpool.NewMpoolSubmodule(ctx, (*builder)(b), nd.network, nd.chain, nd.wallet) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.mpool") + } + + nd.storageNetworking, err = storagenetworking.NewStorgeNetworkingSubmodule(ctx, nd.network) + if err != nil { + return nil, errors.Wrap(err, "failed to build node.storageNetworking") + } + nd.mining = mining.NewMiningModule(nd.syncer.Stmgr, (*builder)(b), nd.chain, nd.blockstore, nd.network, nd.syncer, *nd.wallet) + + nd.multiSig = multisig.NewMultiSigSubmodule(nd.chain.API(), nd.mpool.API(), nd.chain.ChainReader) + + mgrps := &paychmgr.ManagerParams{ + MPoolAPI: nd.mpool.API(), + ChainInfoAPI: nd.chain.API(), + SM: nd.syncer.Stmgr, + WalletAPI: nd.wallet.API(), + } + if nd.paychan, err = paych.NewPaychSubmodule(ctx, b.repo.PaychDatastore(), mgrps); err != nil { + return nil, err + } + nd.market = market.NewMarketModule(nd.chain.API(), nd.syncer.Stmgr) + + blockDelay := b.repo.Config().NetworkParams.BlockDelay + nd.common = common.NewCommonModule(nd.chain, nd.network, blockDelay) + + apiBuilder := NewBuilder() + apiBuilder.NameSpace("Filecoin") + + err = apiBuilder.AddServices(nd.configModule, + nd.blockstore, + nd.network, + nd.blockservice, + nd.chain, + nd.syncer, + nd.wallet, + nd.storageNetworking, + nd.mining, + nd.mpool, + nd.paychan, + nd.market, + nd.common, + ) + + if err != nil { + return nil, errors.Wrap(err, "add service failed ") + } + + var client *jwtclient.AuthClient + cfg := nd.repo.Config() + if len(cfg.API.VenusAuthURL) > 0 { + client, err = jwtclient.NewAuthClient(cfg.API.VenusAuthURL) + if err != nil { + return nil, fmt.Errorf("failed to create remote jwt auth client: %w", err) + } + nd.remoteAuth = jwtclient.WarpIJwtAuthClient(client) + } + + var ratelimiter *ratelimit.RateLimiter + if client != nil && cfg.RateLimitCfg.Enable { + if ratelimiter, err = ratelimit.NewRateLimitHandler(cfg.RateLimitCfg.Endpoint, + nil, &ValueFromCtx{}, jwtclient.WarpLimitFinder(client), logging.Logger("rate-limit")); err != nil { + return nil, fmt.Errorf("request rate-limit is enabled, but create rate-limit handler failed:%w", err) + } + _ = logging.SetLogLevel("rate-limit", "warn") + } + + nd.jsonRPCServiceV1 = apiBuilder.Build("v1", ratelimiter) + nd.jsonRPCService = apiBuilder.Build("v0", ratelimiter) + return nd, nil +} + +type ValueFromCtx struct{} + +func (vfc *ValueFromCtx) AccFromCtx(ctx context.Context) (string, bool) { + return jwtclient.CtxGetName(ctx) +} + +func (vfc *ValueFromCtx) HostFromCtx(ctx context.Context) (string, bool) { + return jwtclient.CtxGetTokenLocation(ctx) +} diff --git a/app/node/builder_getter.go b/app/node/builder_getter.go new file mode 100644 index 0000000000..9d11acbb07 --- /dev/null +++ b/app/node/builder_getter.go @@ -0,0 +1,72 @@ +package node + +import ( + "time" + + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/journal" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p" +) + +// Builder private method accessors for impl's + +type builder Builder + +// repo returns the repo. +func (b Builder) Repo() repo.Repo { + return b.repo +} + +// GenesisCid read genesis block cid +func (b builder) GenesisCid() cid.Cid { + return b.genBlk.Cid() +} + +// GenesisRoot read genesis block root +func (b builder) GenesisRoot() cid.Cid { + return b.genBlk.ParentStateRoot +} + +// todo remove block time +// BlockTime get chain block time +func (b builder) BlockTime() time.Duration { + return b.blockTime +} + +// Repo get home data repo +func (b builder) Repo() repo.Repo { + return b.repo +} + +// IsRelay get whether the p2p network support replay +func (b builder) IsRelay() bool { + return b.isRelay +} + +// ChainClock get chain clock +func (b builder) ChainClock() clock.ChainEpochClock { + return b.chainClock +} + +// Journal get journal to record event +func (b builder) Journal() journal.Journal { + return b.journal +} + +// Libp2pOpts get libp2p option +func (b builder) Libp2pOpts() []libp2p.Option { + return b.libp2pOpts +} + +// OfflineMode get the p2p network mode +func (b builder) OfflineMode() bool { + return b.offlineMode +} + +// Verify export ffi verify +func (b builder) Verifier() ffiwrapper.Verifier { + return b.verifier +} diff --git a/app/node/builder_opts.go b/app/node/builder_opts.go new file mode 100644 index 0000000000..fa89bf2c92 --- /dev/null +++ b/app/node/builder_opts.go @@ -0,0 +1,143 @@ +package node + +import ( + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/journal" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + "github.com/libp2p/go-libp2p" +) + +// BuilderOpt is an option for building a filecoin node. +type BuilderOpt func(*Builder) error + +// offlineMode enables or disables offline mode. +func OfflineMode(offlineMode bool) BuilderOpt { + return func(c *Builder) error { + c.offlineMode = offlineMode + return nil + } +} + +// IsRelay configures node to act as a libp2p relay. +func IsRelay() BuilderOpt { + return func(c *Builder) error { + c.isRelay = true + return nil + } +} + +// BlockTime sets the blockTime. +func BlockTime(blockTime time.Duration) BuilderOpt { + return func(c *Builder) error { + c.blockTime = blockTime + return nil + } +} + +// PropagationDelay sets the time the node needs to wait for blocks to arrive before mining. +func PropagationDelay(propDelay time.Duration) BuilderOpt { + return func(c *Builder) error { + c.propDelay = propDelay + return nil + } +} + +// SetWalletPassword set wallet password +func SetWalletPassword(password []byte) BuilderOpt { + return func(c *Builder) error { + c.walletPassword = password + return nil + } +} + +// SetAuthURL set venus auth service URL +func SetAuthURL(url string) BuilderOpt { + return func(c *Builder) error { + c.authURL = url + return nil + } +} + +// Libp2pOptions returns a builder option that sets up the libp2p node +func Libp2pOptions(opts ...libp2p.Option) BuilderOpt { + return func(b *Builder) error { + // Quietly having your options overridden leads to hair loss + if len(b.libp2pOpts) > 0 { + panic("Libp2pOptions can only be called once") + } + b.libp2pOpts = opts + return nil + } +} + +// VerifierConfigOption returns a function that sets the verifier to use in the node consensus +func VerifierConfigOption(verifier ffiwrapper.Verifier) BuilderOpt { + return func(c *Builder) error { + c.verifier = verifier + return nil + } +} + +// ChainClockConfigOption returns a function that sets the chainClock to use in the node. +func ChainClockConfigOption(clk clock.ChainEpochClock) BuilderOpt { + return func(c *Builder) error { + c.chainClock = clk + return nil + } +} + +// JournalConfigOption returns a function that sets the journal to use in the node. +func JournalConfigOption(jrl journal.Journal) BuilderOpt { + return func(c *Builder) error { + c.journal = jrl + return nil + } +} + +// MonkeyPatchNetworkParamsOption returns a function that sets global vars in the +// binary's specs actor dependency to change network parameters that live there +func MonkeyPatchNetworkParamsOption(params *config.NetworkParamsConfig) BuilderOpt { + return func(c *Builder) error { + SetNetParams(params) + return nil + } +} + +func SetNetParams(params *config.NetworkParamsConfig) { + if params.ConsensusMinerMinPower > 0 { + policy.SetConsensusMinerMinPower(big.NewIntUnsigned(params.ConsensusMinerMinPower)) + } + if len(params.ReplaceProofTypes) > 0 { + newSupportedTypes := make([]abi.RegisteredSealProof, len(params.ReplaceProofTypes)) + copy(newSupportedTypes, params.ReplaceProofTypes) + // Switch reference rather than mutate in place to avoid concurrent map mutation (in tests). + policy.SetSupportedProofTypes(newSupportedTypes...) + } + + if params.MinVerifiedDealSize > 0 { + policy.SetMinVerifiedDealSize(abi.NewStoragePower(params.MinVerifiedDealSize)) + } + + if params.PreCommitChallengeDelay > 0 { + policy.SetPreCommitChallengeDelay(params.PreCommitChallengeDelay) + } + + constants.SetAddressNetwork(params.AddressNetwork) +} + +// MonkeyPatchSetProofTypeOption returns a function that sets package variable +// SuppurtedProofTypes to be only the given registered proof type +func MonkeyPatchSetProofTypeOption(proofType abi.RegisteredSealProof) BuilderOpt { + return func(c *Builder) error { + // Switch reference rather than mutate in place to avoid concurrent map mutation (in tests). + policy.SetSupportedProofTypes(proofType) + return nil + } +} diff --git a/app/node/config.go b/app/node/config.go new file mode 100644 index 0000000000..fa3022314e --- /dev/null +++ b/app/node/config.go @@ -0,0 +1,69 @@ +package node + +import ( + libp2p "github.com/libp2p/go-libp2p" + ci "github.com/libp2p/go-libp2p/core/crypto" + errors "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/repo" +) + +// OptionsFromRepo takes a repo and returns options that configure a node +// to use the given repo. +func OptionsFromRepo(r repo.Repo) ([]BuilderOpt, error) { + sk, err := loadPrivKeyFromKeystore(r) + if err != nil { + return nil, err + } + + cfg := r.Config() + cfgopts := []BuilderOpt{ + // Libp2pOptions can only be called once, so add all options here. + Libp2pOptions( + libp2p.ListenAddrStrings(cfg.Swarm.Address), + libp2p.Identity(sk), + ), + } + + dsopt := func(c *Builder) error { + c.repo = r + return nil + } + + return append(cfgopts, dsopt), nil +} + +func loadPrivKeyFromKeystore(r repo.Repo) (ci.PrivKey, error) { + data, err := r.Keystore().Get("self") + if err != nil { + if err.Error() == "no key by the given name was found" { + return createPeerKey(r.Keystore()) + } + return nil, errors.Wrap(err, "failed to get key from keystore") + } + sk, err := ci.UnmarshalPrivateKey(data) + if err != nil { + return nil, errors.Wrap(err, "unmarshal private key failed") + } + return sk, nil +} + +/*func initPeerKey(store fskeystore.Keystore, pk acrypto.PrivKey) error { + var err error + if pk == nil { + pk, _, err = acrypto.GenerateKeyPair(acrypto.RSA, defaultPeerKeyBits) + if err != nil { + return errors.Wrap(err, "failed to create peer key") + } + } + + kbytes, err := acrypto.MarshalPrivateKey(pk) + if err != nil { + return err + } + + if err := store.Put("self", kbytes); err != nil { + return errors.Wrap(err, "failed to store private key") + } + return nil +}*/ diff --git a/app/node/env.go b/app/node/env.go new file mode 100644 index 0000000000..926ced8cc5 --- /dev/null +++ b/app/node/env.go @@ -0,0 +1,43 @@ +package node + +import ( + "context" + + cmds "github.com/ipfs/go-ipfs-cmds" + + "github.com/filecoin-project/venus/app/submodule/storagenetworking" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +// Env is the environment for command API handlers. +type Env struct { + ctx context.Context + InspectorAPI IInspector + BlockStoreAPI v1api.IBlockStore + ChainAPI v1api.IChain + NetworkAPI v1api.INetwork + StorageNetworkingAPI storagenetworking.IStorageNetworking + SyncerAPI v1api.ISyncer + WalletAPI v1api.IWallet + MingingAPI v1api.IMining + MessagePoolAPI v1api.IMessagePool + + MultiSigAPI v0api.IMultiSig + MarketAPI v1api.IMarket + PaychAPI v1api.IPaychan + CommonAPI v1api.ICommon +} + +var _ cmds.Environment = (*Env)(nil) + +// NewClientEnv returns a new environment for command API clients. +// This environment lacks direct access to any internal APIs. +func NewClientEnv(ctx context.Context) *Env { + return &Env{ctx: ctx} +} + +// Context returns the context of the environment. +func (ce *Env) Context() context.Context { + return ce.ctx +} diff --git a/app/node/helpers.go b/app/node/helpers.go new file mode 100644 index 0000000000..fc0bff1895 --- /dev/null +++ b/app/node/helpers.go @@ -0,0 +1,42 @@ +package node + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/chain" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// readGenesisCid is a helper function that queries the provided datastore for +// an entry with the genesisKey cid, returning if found. +func readGenesisCid(ctx context.Context, chainDs datastore.Datastore, bs blockstoreutil.Blockstore) (types.BlockHeader, error) { + bb, err := chainDs.Get(ctx, chain.GenesisKey) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to read genesisKey") + } + + var c cid.Cid + err = json.Unmarshal(bb, &c) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to cast genesisCid") + } + + blkRawData, err := bs.Get(ctx, c) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to read genesis block") + } + + var blk types.BlockHeader + err = blk.UnmarshalCBOR(bytes.NewReader(blkRawData.RawData())) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to unmarshal genesis block") + } + return blk, nil +} diff --git a/app/node/init.go b/app/node/init.go new file mode 100644 index 0000000000..b9a32fec6c --- /dev/null +++ b/app/node/init.go @@ -0,0 +1,78 @@ +package node + +import ( + "context" + + "github.com/filecoin-project/venus/pkg/repo/fskeystore" + + cbor "github.com/ipfs/go-ipld-cbor" + acrypto "github.com/libp2p/go-libp2p/core/crypto" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/genesis" + "github.com/filecoin-project/venus/pkg/repo" +) + +const defaultPeerKeyBits = 2048 + +// initCfg contains configuration for initializing a node's repo. +type initCfg struct { + initImports []*crypto.KeyInfo +} + +// InitOpt is an option for initialization of a node's repo. +type InitOpt func(*initCfg) + +// ImportKeyOpt imports the provided key during initialization. +func ImportKeyOpt(ki *crypto.KeyInfo) InitOpt { + return func(opts *initCfg) { + opts.initImports = append(opts.initImports, ki) + } +} + +// Init initializes a Filecoin repo with genesis state and keys. +// This will always set the configuration for wallet default address (to the specified default +// key or a newly generated one), but otherwise leave the repo's config object intact. +// Make further configuration changes after initialization. +func Init(ctx context.Context, r repo.Repo, gen genesis.InitFunc, opts ...InitOpt) error { + cfg := new(initCfg) + for _, o := range opts { + o(cfg) + } + + bs := r.Datastore() + cst := cbor.NewCborStore(bs) + _, err := genesis.Init(ctx, r, bs, cst, gen) + if err != nil { + return errors.Wrap(err, "Could not Init Node") + } + + _, err = createPeerKey(r.Keystore()) + if err != nil { + return errors.Wrap(err, "Could not Create P2p key") + } + + if err = r.ReplaceConfig(r.Config()); err != nil { + return errors.Wrap(err, "failed to write config") + } + return nil +} + +func createPeerKey(store fskeystore.Keystore) (acrypto.PrivKey, error) { + var err error + pk, _, err := acrypto.GenerateKeyPair(acrypto.RSA, defaultPeerKeyBits) + if err != nil { + return nil, errors.Wrap(err, "failed to create peer key") + } + + kbytes, err := acrypto.MarshalPrivateKey(pk) + if err != nil { + return nil, err + } + + if err := store.Put("self", kbytes); err != nil { + return nil, errors.Wrap(err, "failed to store private key") + } + return pk, nil +} diff --git a/app/node/inspector_api.go b/app/node/inspector_api.go new file mode 100644 index 0000000000..5f023b8da7 --- /dev/null +++ b/app/node/inspector_api.go @@ -0,0 +1,150 @@ +package node + +import ( + "os" + "runtime" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/repo" + sysi "github.com/whyrusleeping/go-sysinfo" +) + +type IInspector interface { + Runtime() *RuntimeInfo + Memory() (*MemoryInfo, error) + Config() *config.Config + Disk() (*DiskInfo, error) + FilecoinVersion() string + Environment() *EnvironmentInfo +} + +var _ IInspector = &inspector{} + +// NewInspectorAPI returns a `Inspector` used to inspect the venus node. +func NewInspectorAPI(r repo.Repo) IInspector { + return &inspector{ + repo: r, + } +} + +// Inspector contains information used to inspect the venus node. +type inspector struct { + repo repo.Repo +} + +// AllInspectorInfo contains all information the inspector can gather. +type AllInspectorInfo struct { + Config *config.Config + Runtime *RuntimeInfo + Environment *EnvironmentInfo + Disk *DiskInfo + Memory *MemoryInfo + FilecoinVersion string +} + +// RuntimeInfo contains information about the golang runtime. +type RuntimeInfo struct { + OS string + Arch string + Version string + Compiler string + NumProc int + GoMaxProcs int + NumGoRoutines int + NumCGoCalls int64 +} + +// EnvironmentInfo contains information about the environment filecoin is running in. +type EnvironmentInfo struct { + VENUSAPI string `json:"VENUS_API"` + VENUSPath string `json:"VENUS_PATH"` + GoPath string `json:"GOPATH"` +} + +// DiskInfo contains information about disk usage and type. +type DiskInfo struct { + Free uint64 + Total uint64 + FSType string +} + +// MemoryInfo contains information about memory usage. +type MemoryInfo struct { + Swap uint64 + Virtual uint64 +} + +// Runtime returns infrormation about the golang runtime. +func (g *inspector) Runtime() *RuntimeInfo { + return &RuntimeInfo{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + Version: runtime.Version(), + Compiler: runtime.Compiler, + NumProc: runtime.NumCPU(), + GoMaxProcs: runtime.GOMAXPROCS(0), + NumGoRoutines: runtime.NumGoroutine(), + NumCGoCalls: runtime.NumCgoCall(), + } +} + +// Environment returns information about the environment filecoin is running in. +func (g *inspector) Environment() *EnvironmentInfo { + return &EnvironmentInfo{ + VENUSAPI: os.Getenv("VENUS_API"), + VENUSPath: os.Getenv("VENUS_PATH"), + GoPath: os.Getenv("GOPATH"), + } +} + +// Disk return information about filesystem the filecoin nodes repo is on. +func (g *inspector) Disk() (*DiskInfo, error) { + fsr, ok := g.repo.(*repo.FSRepo) + if !ok { + // we are using a in memory repo + return &DiskInfo{ + Free: 0, + Total: 0, + FSType: "0", + }, nil + } + + p, err := fsr.Path() + if err != nil { + return nil, err + } + + dinfo, err := sysi.DiskUsage(p) + if err != nil { + return nil, err + } + + return &DiskInfo{ + Free: dinfo.Free, + Total: dinfo.Total, + FSType: dinfo.FsType, + }, nil +} + +// Memory return information about system meory usage. +func (g *inspector) Memory() (*MemoryInfo, error) { + meminfo, err := sysi.MemoryInfo() + if err != nil { + return nil, err + } + return &MemoryInfo{ + Swap: meminfo.Swap, + Virtual: meminfo.Used, + }, nil +} + +// configModule return the current config values of the filecoin node. +func (g *inspector) Config() *config.Config { + return g.repo.Config() +} + +// FilecoinVersion returns the version of venus. +func (g *inspector) FilecoinVersion() string { + return constants.UserVersion() +} diff --git a/app/node/inspector_api_test.go b/app/node/inspector_api_test.go new file mode 100644 index 0000000000..3712a8cea2 --- /dev/null +++ b/app/node/inspector_api_test.go @@ -0,0 +1,65 @@ +package node_test + +import ( + "runtime" + "testing" + + "github.com/filecoin-project/venus/app/node" + + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/repo" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestRuntime(t *testing.T) { + tf.UnitTest(t) + + mr := repo.NewInMemoryRepo() + g := node.NewInspectorAPI(mr) + rt := g.Runtime() + + assert.Equal(t, runtime.GOOS, rt.OS) + assert.Equal(t, runtime.GOARCH, rt.Arch) + assert.Equal(t, runtime.Version(), rt.Version) + assert.Equal(t, runtime.Compiler, rt.Compiler) + assert.Equal(t, runtime.NumCPU(), rt.NumProc) + assert.Equal(t, runtime.GOMAXPROCS(0), rt.GoMaxProcs) + assert.Equal(t, runtime.NumCgoCall(), rt.NumCGoCalls) +} + +func TestDisk(t *testing.T) { + tf.UnitTest(t) + + mr := repo.NewInMemoryRepo() + g := node.NewInspectorAPI(mr) + d, err := g.Disk() + + assert.NoError(t, err) + assert.Equal(t, uint64(0), d.Free) + assert.Equal(t, uint64(0), d.Total) + assert.Equal(t, "0", d.FSType) +} + +func TestMemory(t *testing.T) { + tf.UnitTest(t) + + mr := repo.NewInMemoryRepo() + g := node.NewInspectorAPI(mr) + + _, err := g.Memory() + assert.NoError(t, err) +} + +func TestConfig(t *testing.T) { + tf.UnitTest(t) + + mr := repo.NewInMemoryRepo() + g := node.NewInspectorAPI(mr) + c := g.Config() + + defCfg := config.NewDefaultConfig() + defCfg.Wallet.PassphraseConfig = config.TestPassphraseConfig() + assert.Equal(t, defCfg, c) +} diff --git a/app/node/node.go b/app/node/node.go new file mode 100644 index 0000000000..4857a53384 --- /dev/null +++ b/app/node/node.go @@ -0,0 +1,364 @@ +package node + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "syscall" + + "contrib.go.opencensus.io/exporter/jaeger" + "github.com/awnumar/memguard" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/venus-auth/jwtclient" + "github.com/filecoin-project/venus/app/submodule/blockstore" + chain2 "github.com/filecoin-project/venus/app/submodule/chain" + "github.com/filecoin-project/venus/app/submodule/common" + configModule "github.com/filecoin-project/venus/app/submodule/config" + "github.com/filecoin-project/venus/app/submodule/dagservice" + "github.com/filecoin-project/venus/app/submodule/market" + "github.com/filecoin-project/venus/app/submodule/mining" + "github.com/filecoin-project/venus/app/submodule/mpool" + "github.com/filecoin-project/venus/app/submodule/multisig" + apiwrapper "github.com/filecoin-project/venus/app/submodule/multisig/v0api" + network2 "github.com/filecoin-project/venus/app/submodule/network" + "github.com/filecoin-project/venus/app/submodule/paych" + "github.com/filecoin-project/venus/app/submodule/storagenetworking" + syncer2 "github.com/filecoin-project/venus/app/submodule/syncer" + "github.com/filecoin-project/venus/app/submodule/wallet" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/config" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" // enable bls signatures + _ "github.com/filecoin-project/venus/pkg/crypto/secp" // enable secp signatures + "github.com/filecoin-project/venus/pkg/metrics" + "github.com/filecoin-project/venus/pkg/repo" + cmds "github.com/ipfs/go-ipfs-cmds" + cmdhttp "github.com/ipfs/go-ipfs-cmds/http" + logging "github.com/ipfs/go-log/v2" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/pkg/errors" + "go.opencensus.io/tag" +) + +var log = logging.Logger("node") // nolint: deadcode + +// ConfigOpt mutates a node config post initialization +type ConfigOpt func(*config.Config) + +// APIPrefix is the prefix for the http version of the api. +const APIPrefix = "/api" + +// Node represents a full Filecoin node. +type Node struct { + // offlineMode, when true, disables libp2p. + offlineMode bool + + // chainClock is a chainClock used by the node for chain epoch. + chainClock clock.ChainEpochClock + + // repo is the repo this node was created with. + // + // It contains all persistent artifacts of the filecoin node. + repo repo.Repo + + // moduls + circulatiingSupplyCalculator chain.ICirculatingSupplyCalcualtor + // + // Core services + // + configModule *configModule.ConfigModule + blockstore *blockstore.BlockstoreSubmodule + blockservice *dagservice.DagServiceSubmodule + network *network2.NetworkSubmodule + + // + // Subsystems + // + chain *chain2.ChainSubmodule + syncer *syncer2.SyncerSubmodule + mining *mining.MiningModule + + // + // Supporting services + // + wallet *wallet.WalletSubmodule + multiSig *multisig.MultiSigSubmodule + mpool *mpool.MessagePoolSubmodule + storageNetworking *storagenetworking.StorageNetworkingSubmodule + + // paychannel and market + market *market.MarketSubmodule + paychan *paych.PaychSubmodule + + common *common.CommonModule + + // + // Jsonrpc + // + jsonRPCService, jsonRPCServiceV1 *jsonrpc.RPCServer + + jaegerExporter *jaeger.Exporter + remoteAuth jwtclient.IJwtAuthClient +} + +func (node *Node) Chain() *chain2.ChainSubmodule { + return node.chain +} + +func (node *Node) StorageNetworking() *storagenetworking.StorageNetworkingSubmodule { + return node.storageNetworking +} + +func (node *Node) Mpool() *mpool.MessagePoolSubmodule { + return node.mpool +} + +func (node *Node) Wallet() *wallet.WalletSubmodule { + return node.wallet +} + +func (node *Node) MultiSig() *multisig.MultiSigSubmodule { + return node.multiSig +} + +func (node *Node) Network() *network2.NetworkSubmodule { + return node.network +} + +func (node *Node) Blockservice() *dagservice.DagServiceSubmodule { + return node.blockservice +} + +func (node *Node) Blockstore() *blockstore.BlockstoreSubmodule { + return node.blockstore +} + +func (node *Node) ConfigModule() *configModule.ConfigModule { + return node.configModule +} + +func (node *Node) Repo() repo.Repo { + return node.repo +} + +func (node *Node) ChainClock() clock.ChainEpochClock { + return node.chainClock +} + +func (node *Node) OfflineMode() bool { + return node.offlineMode +} + +// Start boots up the node. +func (node *Node) Start(ctx context.Context) error { + var err error + if err = metrics.RegisterPrometheusEndpoint(node.repo.Config().Observability.Metrics); err != nil { + return errors.Wrap(err, "failed to setup metrics") + } + + if node.jaegerExporter, err = metrics.RegisterJaeger(node.network.Host.ID().Pretty(), + node.repo.Config().Observability.Tracing); err != nil { + return errors.Wrap(err, "failed to setup tracing") + } + + var syncCtx context.Context + syncCtx, node.syncer.CancelChainSync = context.WithCancel(context.Background()) + + // start syncer module to receive new blocks and start sync to latest height + err = node.syncer.Start(syncCtx) + if err != nil { + return err + } + + // Start mpool module to receive new message + err = node.mpool.Start(syncCtx) + if err != nil { + return err + } + + err = node.paychan.Start(ctx) + if err != nil { + return err + } + + // network should start late, + err = node.network.Start(syncCtx) + if err != nil { + return err + } + + return nil +} + +// Stop initiates the shutdown of the node. +func (node *Node) Stop(ctx context.Context) { + // stop mpool submodule + log.Infof("shutting down mpool...") + node.mpool.Stop(ctx) + + // stop syncer submodule + log.Infof("shutting down chain syncer...") + node.syncer.Stop(ctx) + + // Stop network submodule + log.Infof("shutting down network...") + node.network.Stop(ctx) + + // Stop chain submodule + log.Infof("shutting down chain...") + node.chain.Stop(ctx) + + // Stop paychannel submodule + log.Infof("shutting down pay channel...") + node.paychan.Stop() + + log.Infof("closing repository...") + if err := node.repo.Close(); err != nil { + log.Warnf("error closing repo: %s", err) + } + + log.Infof("flushing system logs...") + sysNames := logging.GetSubsystems() + for _, name := range sysNames { + _ = logging.Logger(name).Sync() + } + + if node.jaegerExporter != nil { + node.jaegerExporter.Flush() + } +} + +// RunRPCAndWait start rpc server and listen to signal to exit +func (node *Node) RunRPCAndWait(ctx context.Context, rootCmdDaemon *cmds.Command, ready chan interface{}) error { + // Signal that the sever has started and then wait for a signal to stop. + cfg := node.repo.Config() + mAddr, err := ma.NewMultiaddr(cfg.API.APIAddress) + if err != nil { + return err + } + + // Listen on the configured address in order to bind the port number in case it has + // been configured as zero (i.e. OS-provided) + apiListener, err := manet.Listen(mAddr) // nolint + if err != nil { + return err + } + + netListener := manet.NetListener(apiListener) // nolint + mux := http.NewServeMux() + err = node.runRestfulAPI(ctx, mux, rootCmdDaemon) // nolint + if err != nil { + return err + } + + err = node.runJsonrpcAPI(ctx, mux) + if err != nil { + return err + } + + localVerifer, token, err := jwtclient.NewLocalAuthClient() + if err != nil { + return fmt.Errorf("failed to generate local auth client: %s", err) + } + err = node.repo.SetAPIToken(token) + if err != nil { + return fmt.Errorf("set token fail: %w", err) + } + + authMux := jwtclient.NewAuthMux(localVerifer, node.remoteAuth, mux) + authMux.TrustHandle("/debug/pprof/", http.DefaultServeMux) + + // todo: + apikey, _ := tag.NewKey("api") + + apiserv := &http.Server{ + Handler: authMux, + BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), + tag.Upsert(apikey, "venus")) + return ctx + }, + } + + go func() { + err := apiserv.Serve(netListener) // nolint + if err != nil && err != http.ErrServerClosed { + return + } + }() + + // Write the resolved API address to the repo + cfg.API.APIAddress = apiListener.Multiaddr().String() + if err := node.repo.SetAPIAddr(cfg.API.APIAddress); err != nil { + log.Error("Could not save API address to repo") + return err + } + + terminate := make(chan error, 1) + + // todo: design an genterfull + memguard.CatchSignal(func(signal os.Signal) { + log.Infof("received signal(%s), venus will shutdown...", signal.String()) + log.Infof("shutting down server...") + if err := apiserv.Shutdown(ctx); err != nil { + log.Warnf("failed to shutdown server: %v", err) + } + node.Stop(ctx) + memguard.Purge() + log.Infof("venus shutdown gracefully ...") + terminate <- nil + }, syscall.SIGTERM, os.Interrupt) + + close(ready) + return <-terminate +} + +// RunAPIAndWait starts an API server and waits for it to finish. +// The `ready` channel is closed when the server is running and its API address has been +// saved to the node's repo. +// A message sent to or closure of the `terminate` channel signals the server to stop. +func (node *Node) runRestfulAPI(ctx context.Context, handler *http.ServeMux, rootCmdDaemon *cmds.Command) error { + servenv := node.createServerEnv(ctx) + + apiConfig := node.repo.Config().API + cfg := cmdhttp.NewServerConfig() + cfg.APIPath = APIPrefix + cfg.SetAllowedOrigins(apiConfig.AccessControlAllowOrigin...) + cfg.SetAllowedMethods(apiConfig.AccessControlAllowMethods...) + cfg.SetAllowCredentials(apiConfig.AccessControlAllowCredentials) + cfg.AddAllowedHeaders("Authorization") + + handler.Handle(APIPrefix+"/", cmdhttp.NewHandler(servenv, rootCmdDaemon, cfg)) + return nil +} + +func (node *Node) runJsonrpcAPI(ctx context.Context, handler *http.ServeMux) error { // nolint + handler.Handle("/rpc/v0", node.jsonRPCService) + handler.Handle("/rpc/v1", node.jsonRPCServiceV1) + return nil +} + +// createServerEnv create server for cmd server env +func (node *Node) createServerEnv(ctx context.Context) *Env { + env := Env{ + ctx: ctx, + InspectorAPI: NewInspectorAPI(node.repo), + BlockStoreAPI: node.blockstore.API(), + ChainAPI: node.chain.API(), + NetworkAPI: node.network.API(), + StorageNetworkingAPI: node.storageNetworking.API(), + SyncerAPI: node.syncer.API(), + WalletAPI: node.wallet.API(), + MingingAPI: node.mining.API(), + MessagePoolAPI: node.mpool.API(), + PaychAPI: node.paychan.API(), + MarketAPI: node.market.API(), + MultiSigAPI: &apiwrapper.WrapperV1IMultiSig{IMultiSig: node.multiSig.API(), IMessagePool: node.mpool.API()}, + CommonAPI: node.common, + } + + return &env +} diff --git a/app/node/rpc.go b/app/node/rpc.go new file mode 100644 index 0000000000..9456bb2480 --- /dev/null +++ b/app/node/rpc.go @@ -0,0 +1,136 @@ +package node + +import ( + "errors" + "reflect" + + "github.com/filecoin-project/go-jsonrpc" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/api/permission" + "github.com/ipfs-force-community/metrics/ratelimit" +) + +type RPCService interface{} + +type RPCBuilder struct { + namespace []string + v0APIStruct []interface{} + v1APIStruct []interface{} +} + +func NewBuilder() *RPCBuilder { + return &RPCBuilder{} +} + +func (builder *RPCBuilder) NameSpace(nameSpaece string) *RPCBuilder { + builder.namespace = append(builder.namespace, nameSpaece) + return builder +} + +func (builder *RPCBuilder) AddServices(services ...RPCService) error { + for _, service := range services { + err := builder.AddService(service) + if err != nil { + return err + } + } + return nil +} + +func (builder *RPCBuilder) AddService(service RPCService) error { + methodName := "V0API" + + serviceV := reflect.ValueOf(service) + apiMethod := serviceV.MethodByName(methodName) + if !apiMethod.IsValid() { + return errors.New("expect API function") + } + + apiImpls := apiMethod.Call([]reflect.Value{}) + + for _, apiImpl := range apiImpls { + rt := reflect.TypeOf(apiImpl) + rv := reflect.ValueOf(apiImpl) + if rt.Kind() == reflect.Array { + apiLen := rv.Len() + for i := 0; i < apiLen; i++ { + ele := rv.Index(i) + if ele.IsValid() { + builder.v0APIStruct = append(builder.v0APIStruct, apiImpl.Interface()) + } + } + } else { + builder.v0APIStruct = append(builder.v0APIStruct, apiImpl.Interface()) + } + } + + methodName = "API" + serviceV = reflect.ValueOf(service) + apiMethod = serviceV.MethodByName(methodName) + if !apiMethod.IsValid() { + return errors.New("expect API function") + } + + apiImpls = apiMethod.Call([]reflect.Value{}) + + for _, apiImpl := range apiImpls { + rt := reflect.TypeOf(apiImpl) + rv := reflect.ValueOf(apiImpl) + if rt.Kind() == reflect.Array { + apiLen := rv.Len() + for i := 0; i < apiLen; i++ { + ele := rv.Index(i) + if ele.IsValid() { + builder.v1APIStruct = append(builder.v1APIStruct, apiImpl.Interface()) + } + } + } else { + builder.v1APIStruct = append(builder.v1APIStruct, apiImpl.Interface()) + } + } + return nil +} + +func (builder *RPCBuilder) Build(version string, limiter *ratelimit.RateLimiter) *jsonrpc.RPCServer { + serverOptions := make([]jsonrpc.ServerOption, 0) + serverOptions = append(serverOptions, jsonrpc.WithProxyBind(jsonrpc.PBMethod)) + + server := jsonrpc.NewServer(serverOptions...) + switch version { + case "v0": + var fullNodeV0 v0api.FullNodeStruct + for _, apiStruct := range builder.v0APIStruct { + permission.PermissionProxy(apiStruct, &fullNodeV0) + } + + if limiter != nil { + var rateLimitAPI v0api.FullNodeStruct + limiter.WraperLimiter(fullNodeV0, &rateLimitAPI) + fullNodeV0 = rateLimitAPI + } + + for _, nameSpace := range builder.namespace { + server.Register(nameSpace, &fullNodeV0) + } + case "v1": + var fullNode v1api.FullNodeStruct + for _, apiStruct := range builder.v1APIStruct { + permission.PermissionProxy(apiStruct, &fullNode) + } + + if limiter != nil { + var rateLimitAPI v1api.FullNodeStruct + limiter.WraperLimiter(fullNode, &rateLimitAPI) + fullNode = rateLimitAPI + } + + for _, nameSpace := range builder.namespace { + server.Register(nameSpace, &fullNode) + } + default: + panic("invalid version: " + version) + } + + return server +} diff --git a/app/node/rpc_test.go b/app/node/rpc_test.go new file mode 100644 index 0000000000..8e08543db7 --- /dev/null +++ b/app/node/rpc_test.go @@ -0,0 +1,169 @@ +package node + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/filecoin-project/go-jsonrpc" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/api/permission" + "github.com/stretchr/testify/require" + "gotest.tools/assert" +) + +func TestWsBuilder(t *testing.T) { + tf.UnitTest(t) + + nameSpace := "Test" + builder := NewBuilder().NameSpace(nameSpace) + err := builder.AddServices(&tmodule1{}, &tmodule2{}) + require.NoError(t, err) + + server := mockBuild(builder) + testServ := httptest.NewServer(server) + defer testServ.Close() + var client FullAdapter + closer, err := jsonrpc.NewClient( + context.Background(), + "ws://"+testServ.Listener.Addr().String(), + nameSpace, + &client, + nil) + require.NoError(t, err) + defer closer() + + result, err := client.Test1(context.Background()) + require.NoError(t, err) + assert.Equal(t, result, "test") +} + +func TestJsonrpc(t *testing.T) { + tf.UnitTest(t) + + nameSpace := "Test" + builder := NewBuilder().NameSpace(nameSpace) + err := builder.AddService(&tmodule1{}) + require.NoError(t, err) + + server := mockBuild(builder) + testServ := httptest.NewServer(server) + defer testServ.Close() + + http.Handle("/rpc/v1", server) + + req := struct { + Jsonrpc string `json:"jsonrpc"` + ID int64 `json:"id,omitempty"` + Method string `json:"method"` + Meta map[string]string `json:"meta,omitempty"` + }{ + Jsonrpc: "2.0", + ID: 1, + Method: "Test.Test1", + } + reqBytes, err := json.Marshal(req) + require.NoError(t, err) + httpRes, err := http.Post("http://"+testServ.Listener.Addr().String()+"/rpc/v1", "", bytes.NewReader(reqBytes)) + require.NoError(t, err) + assert.Equal(t, httpRes.Status, "200 OK") + result, err := io.ReadAll(httpRes.Body) + require.NoError(t, err) + res := struct { + Result string `json:"result"` + }{} + err = json.Unmarshal(result, &res) + require.NoError(t, err) + assert.Equal(t, res.Result, "test") +} + +type tmodule1 struct{} + +func (m *tmodule1) V0API() MockAPI1 { //nolint + return &mockAPI1{} +} + +func (m *tmodule1) API() MockAPI1 { //nolint + return &mockAPI1{} +} + +type tmodule2 struct{} + +func (m *tmodule2) V0API() MockAPI2 { //nolint + return &mockAPI2{} +} + +func (m *tmodule2) API() MockAPI2 { //nolint + return &mockAPI2{} +} + +var _ MockAPI1 = &mockAPI1{} + +type MockAPI1 interface { + Test1(ctx context.Context) (string, error) +} + +type MockAPI2 interface { + Test2(ctx context.Context) error +} +type mockAPI1 struct{} + +func (m *mockAPI1) Test1(ctx context.Context) (string, error) { + return "test", nil +} + +var _ MockAPI2 = &mockAPI2{} + +type mockAPI2 struct{} + +func (m *mockAPI2) Test2(ctx context.Context) error { + return nil +} + +type FullAdapter struct { + CommonAdapter + Adapter2 +} + +func (f *FullAdapter) Test1(ctx context.Context) (string, error) { + return f.CommonAdapter.Internal.Test1(ctx) +} + +type CommonAdapter struct { + Adapter1 +} +type Adapter1 struct { + Internal struct { + Test1 func(ctx context.Context) (string, error) `perm:"read"` + } +} + +func (adp *Adapter1) Test1(ctx context.Context) (string, error) { + return adp.Internal.Test1(ctx) +} + +type Adapter2 struct { + Internal struct { + Test2 func(ctx context.Context) (string, error) `perm:"read"` + } +} + +func (adp *Adapter2) Test2(ctx context.Context) (string, error) { + return adp.Internal.Test2(ctx) +} + +func mockBuild(builder *RPCBuilder) *jsonrpc.RPCServer { + server := jsonrpc.NewServer(jsonrpc.WithProxyBind(jsonrpc.PBField)) + var fullNode FullAdapter + for _, apiStruct := range builder.v1APIStruct { + permission.PermissionProxy(apiStruct, &fullNode) + } + for _, nameSpace := range builder.namespace { + server.Register(nameSpace, &fullNode) + } + return server +} diff --git a/app/node/test/api.go b/app/node/test/api.go new file mode 100644 index 0000000000..d9cd5ebe63 --- /dev/null +++ b/app/node/test/api.go @@ -0,0 +1,156 @@ +package test + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "testing" + + "github.com/filecoin-project/venus/cmd" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/app/node" + th "github.com/filecoin-project/venus/pkg/testhelpers" +) + +// NodeAPI wraps an in-process Node to provide a command API server and client for testing. +type NodeAPI struct { + node *node.Node + tb testing.TB +} + +// NewNodeAPI creates a wrangler for a node. +func NewNodeAPI(node *node.Node, tb testing.TB) *NodeAPI { + return &NodeAPI{node, tb} +} + +// RunNodeAPI creates a new API server and `Run()`s it. +func RunNodeAPI(ctx context.Context, node *node.Node, tb testing.TB) (client *Client, stop func()) { + api := NewNodeAPI(node, tb) + return api.Run(ctx) +} + +// Node returns the node backing the API. +func (a *NodeAPI) Node() *node.Node { + return a.node +} + +// Run start s a command API server for the node. +// Returns a client proxy and a function to terminate the NodeAPI server. +func (a *NodeAPI) Run(ctx context.Context) (client *Client, stop func()) { + ready := make(chan interface{}) + ctx, cancel := context.WithCancel(ctx) + go func() { + err := a.node.RunRPCAndWait(ctx, cmd.RootCmdDaemon, ready) + require.NoError(a.tb, err) + }() + <-ready + + addr, err := a.node.Repo().APIAddr() + require.NoError(a.tb, err) + require.NotEmpty(a.tb, addr, "empty API address") + + token, err := a.node.Repo().APIToken() + require.NoError(a.tb, err) + require.NotEmpty(a.tb, token, "empty token") + return &Client{addr, token, a.tb}, func() { + cancel() + } +} + +// Client is an in-process client to a command API. +type Client struct { + address string + token string + tb testing.TB +} + +// Address returns the address string to which the client sends command RPCs. +func (c *Client) Address() string { + return c.address +} + +func (c *Client) run(ctx context.Context, command ...string) (*th.CmdOutput, int, error) { + c.tb.Helper() + args := []string{ + "venus", // A dummy first arg is required, simulating shell invocation. + fmt.Sprintf("--cmdapiaddr=%s", c.address), + fmt.Sprintf("--token=%s", c.token), + } + args = append(args, command...) + + // Create pipes for the client to write stdout and stderr. + readStdOut, writeStdOut, err := os.Pipe() + require.NoError(c.tb, err) + readStdErr, writeStdErr, err := os.Pipe() + require.NoError(c.tb, err) + var readStdin *os.File // no stdin needed + + exitCode, err := cmd.Run(ctx, args, readStdin, writeStdOut, writeStdErr) + // Close the output side of the pipes so that ReadAll() on the read ends can complete. + require.NoError(c.tb, writeStdOut.Close()) + require.NoError(c.tb, writeStdErr.Close()) + + out := th.ReadOutput(c.tb, command, readStdOut, readStdErr) + + return out, exitCode, err +} + +// Run runs a CLI command and returns its output. +func (c *Client) Run(ctx context.Context, command ...string) *th.CmdOutput { + out, exitCode, err := c.run(ctx, command...) + if err != nil { + out.SetInvocationError(err) + } else { + out.SetStatus(exitCode) + } + require.NoError(c.tb, err, "client execution error") + + return out +} + +// RunSuccess runs a command and asserts that it succeeds (status of zero and logs no errors). +func (c *Client) RunSuccess(ctx context.Context, command ...string) *th.CmdOutput { + output := c.Run(ctx, command...) + output.AssertSuccess() + return output +} + +// RunFail runs a command and asserts that it fails with a specified message on stderr. +func (c *Client) RunFail(ctx context.Context, err string, command ...string) *th.CmdOutput { + output, exitCode, _ := c.run(ctx, command...) + output.SetStatus(exitCode) + output.AssertFail(err) + return output +} + +// RunJSON runs a command, asserts success, and parses the response as JSON. +func (c *Client) RunJSON(ctx context.Context, command ...string) map[string]interface{} { + out := c.RunSuccess(ctx, command...) + var parsed map[string]interface{} + require.NoError(c.tb, json.Unmarshal([]byte(out.ReadStdout()), &parsed)) + return parsed +} + +// RunMarshaledJSON runs a command, asserts success, and marshals the JSON response. +func (c *Client) RunMarshaledJSON(ctx context.Context, result interface{}, command ...string) { + out := c.RunSuccess(ctx, command...) + require.NoError(c.tb, json.Unmarshal([]byte(out.ReadStdout()), &result)) +} + +// RunSuccessFirstLine executes the given command, asserts success and returns +// the first line of stdout. +func (c *Client) RunSuccessFirstLine(ctx context.Context, args ...string) string { + return c.RunSuccessLines(ctx, args...)[0] +} + +// RunSuccessLines executes the given command, asserts success and returns +// an array of lines of the stdout. +func (c *Client) RunSuccessLines(ctx context.Context, args ...string) []string { + output := c.RunSuccess(ctx, args...) + result := output.ReadStdoutTrimNewlines() + return strings.Split(result, "\n") +} diff --git a/app/node/test/builder.go b/app/node/test/builder.go new file mode 100644 index 0000000000..9e94083965 --- /dev/null +++ b/app/node/test/builder.go @@ -0,0 +1,139 @@ +package test + +import ( + "context" + "testing" + + "github.com/filecoin-project/venus/pkg/wallet" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/genesis" + "github.com/filecoin-project/venus/pkg/repo" + gengen "github.com/filecoin-project/venus/tools/gengen/util" +) + +// NodeBuilder creates and configures Filecoin nodes for in-process testing. +// This is intended to replace use of GenNode and the various other node construction entry points +// that end up there. +// Note that (August 2019) there are two things called "config": the configuration read in from +// file to the config.configModule structure, and node.configModule which is really just some dependency +// injection. This builder avoids exposing the latter directly. +type NodeBuilder struct { + // Initialisation function for the genesis block and state. + gif genesis.InitFunc + // Options to the repo initialisation. + initOpts []node.InitOpt + // Mutations to be applied to node config after initialisation. + configMutations []node.ConfigOpt + // Mutations to be applied to the node builder config before building. + builderOpts []node.BuilderOpt + + tb testing.TB +} + +// NewNodeBuilder creates a new node builder. +func NewNodeBuilder(tb testing.TB) *NodeBuilder { + return &NodeBuilder{ + gif: gengen.MakeGenesisFunc(gengen.NetworkName("gfctest")), + initOpts: []node.InitOpt{}, + configMutations: []node.ConfigOpt{ + // Default configurations that make sense for integration tests. + // The can be overridden by subsequent `withConfigChanges`. + node.ConfigOpt(func(c *config.Config) { + // Bind only locally, defer port selection until binding. + c.API.APIAddress = "/ip4/127.0.0.1/tcp/0" + c.Swarm.Address = "/ip4/0.0.0.0/tcp/0" + }), + node.ConfigOpt(func(c *config.Config) { + c.Bootstrap.MinPeerThreshold = 0 + }), + }, + builderOpts: []node.BuilderOpt{ + node.SetWalletPassword(wallet.TestPassword), + }, + tb: tb, + } +} + +// WithGenesisInit sets the built nodes' genesis function. +func (b *NodeBuilder) WithGenesisInit(gif genesis.InitFunc) *NodeBuilder { + b.gif = gif + return b +} + +// WithInitOpt adds one or more options to repo initialisation. +func (b *NodeBuilder) WithInitOpt(opts ...node.InitOpt) *NodeBuilder { + b.initOpts = append(b.initOpts, opts...) + return b +} + +// WithBuilderOpt adds one or more node building options to node creation. +func (b *NodeBuilder) WithBuilderOpt(opts ...node.BuilderOpt) *NodeBuilder { + b.builderOpts = append(b.builderOpts, opts...) + return b +} + +// WithConfig adds a configuration mutation function to be invoked after repo initialisation. +func (b *NodeBuilder) WithConfig(cm node.ConfigOpt) *NodeBuilder { + b.configMutations = append(b.configMutations, cm) + return b +} + +// Build creates a node as specified by this builder. +// This many be invoked multiple times to create many nodes. +func (b *NodeBuilder) Build(ctx context.Context) *node.Node { + // Initialise repo. + repo := repo.NewInMemoryRepo() + + for _, opt := range b.configMutations { + opt(repo.Config()) + } + b.requireNoError(node.Init(ctx, repo, b.gif, b.initOpts...)) + + // Initialize the node. + repoConfigOpts, err := node.OptionsFromRepo(repo) + b.requireNoError(err) + + nd, err := node.New(ctx, append(repoConfigOpts, b.builderOpts...)...) + b.requireNoError(err) + return nd +} + +// BuildAndStart build a node and starts it. +func (b *NodeBuilder) BuildAndStart(ctx context.Context) *node.Node { + n := b.Build(ctx) + err := n.Start(ctx) + b.requireNoError(err) + return n +} + +// BuildAndStartAPI is a convenience function composing BuildAndStart with +// RunNodeAPI +func (b *NodeBuilder) BuildAndStartAPI(ctx context.Context) (*node.Node, *Client, func()) { + n := b.BuildAndStart(ctx) + c, apiDone := RunNodeAPI(ctx, n, b.tb) + done := func() { + apiDone() + n.Stop(ctx) + } + return n, c, done +} + +func (b *NodeBuilder) requireNoError(err error) { + b.tb.Helper() + require.NoError(b.tb, err) +} + +// BuildMany builds numNodes nodes with the builder's configuration. +func (b *NodeBuilder) BuildMany(ctx context.Context, numNodes int) []*node.Node { + var out []*node.Node + for i := 0; i < numNodes; i++ { + nd := b.Build(ctx) + out = append(out, nd) + } + + return out +} diff --git a/app/node/test/node.go b/app/node/test/node.go new file mode 100644 index 0000000000..f30603b712 --- /dev/null +++ b/app/node/test/node.go @@ -0,0 +1,219 @@ +package test + +import ( + "context" + "math/rand" + "testing" + + ds "github.com/ipfs/go-datastore" + blockstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/fixtures/fortest" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/pkg/wallet" + gengen "github.com/filecoin-project/venus/tools/gengen/util" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// ChainSeed is a generalized struct for configuring node +type ChainSeed struct { + info *gengen.RenderedGenInfo + bstore blockstoreutil.Blockstore +} + +// MakeChainSeed creates a chain seed struct (see above) from a given +// genesis config +func MakeChainSeed(t *testing.T, cfg *gengen.GenesisCfg) *ChainSeed { + t.Helper() + + mds := ds.NewMapDatastore() + bstore := blockstoreutil.Adapt(blockstore.NewBlockstore(mds)) + info, err := gengen.GenGen(context.TODO(), cfg, bstore) + require.NoError(t, err) + return &ChainSeed{ + info: info, + bstore: bstore, + } +} + +// GenesisInitFunc is a th.GenesisInitFunc using the chain seed +func (cs *ChainSeed) GenesisInitFunc(cst cbor.IpldStore, bs blockstoreutil.Blockstore) (*types.BlockHeader, error) { + err := blockstoreutil.CopyBlockstore(context.TODO(), cs.bstore, bs) + if err != nil { + return nil, err + } + + var blk types.BlockHeader + if err := cst.Get(context.TODO(), cs.info.GenesisCid, &blk); err != nil { + return nil, err + } + + return &blk, nil +} + +// GiveKey gives the given key to the given node +func (cs *ChainSeed) GiveKey(ctx context.Context, t *testing.T, nd *node.Node, key int) address.Address { + t.Helper() + bcks := nd.Wallet().Wallet.Backends(wallet.DSBackendType) + require.Len(t, bcks, 1, "expected to get exactly one datastore backend") + + dsb := bcks[0].(*wallet.DSBackend) + _ = dsb.SetPassword(ctx, wallet.TestPassword) + kinfo := cs.info.Keys[key] + require.NoError(t, dsb.ImportKey(ctx, kinfo)) + + addr, err := kinfo.Address() + require.NoError(t, err) + + return addr +} + +// GiveMiner gives the specified miner to the node. Returns the address and the owner addresss +func (cs *ChainSeed) GiveMiner(t *testing.T, nd *node.Node, which int) (address.Address, address.Address) { + t.Helper() + cfg := nd.Repo().Config() + m := cs.info.Miners[which] + + require.NoError(t, nd.Repo().ReplaceConfig(cfg)) + + ownerAddr, err := cs.info.Keys[m.Owner].Address() + require.NoError(t, err) + + return m.Address, ownerAddr +} + +// Addr returns the address for the given key +func (cs *ChainSeed) Addr(t *testing.T, key int) address.Address { + t.Helper() + k := cs.info.Keys[key] + + a, err := k.Address() + if err != nil { + t.Fatal(err) + } + + return a +} + +// KeyInitOpt is a node init option that imports one of the chain seed's +// keys to a node's wallet +func (cs *ChainSeed) KeyInitOpt(which int) node.InitOpt { + kinfo := cs.info.Keys[which] + return node.ImportKeyOpt(kinfo) +} + +// FixtureChainSeed returns the genesis function that +func FixtureChainSeed(t *testing.T) *ChainSeed { + return MakeChainSeed(t, &fortest.TestGenGenConfig) +} + +// DefaultAddressConfigOpt is a node config option setting the default address +func DefaultAddressConfigOpt(addr address.Address) node.ConfigOpt { + return func(cfg *config.Config) { + cfg.Wallet.DefaultAddress = addr + } +} + +// ConnectNodes connects two nodes together +func ConnectNodes(t *testing.T, a, b *node.Node) { + t.Helper() + pi := peer.AddrInfo{ + ID: b.Network().Host.ID(), + Addrs: b.Network().Host.Addrs(), + } + + err := a.Network().Host.Connect(context.TODO(), pi) + if err != nil { + t.Fatal(err) + } +} + +// FakeProofVerifierBuilderOpts returns default configuration for testing +func FakeProofVerifierBuilderOpts() []node.BuilderOpt { + return []node.BuilderOpt{ + node.VerifierConfigOption(&impl.FakeVerifier{}), + } +} + +// StartNodes starts some nodes, failing on any error. +func StartNodes(t *testing.T, nds []*node.Node) { + t.Helper() + for _, nd := range nds { + if err := nd.Start(context.Background()); err != nil { + t.Fatal(err) + } + } +} + +// StopNodes initiates shutdown of some nodes. +func StopNodes(nds []*node.Node) { + for _, nd := range nds { + nd.Stop(context.Background()) + } +} + +// MustCreateStorageMinerResult contains the result of a CreateStorageMiner command +type MustCreateStorageMinerResult struct { + MinerAddress *address.Address + Err error +} + +// PeerKeys are a list of keys for peers that can be used in testing. +var PeerKeys = []crypto.PrivKey{ + mustGenKey(101), + mustGenKey(102), +} + +// MakeTestGenCfg returns a genesis configuration used for tests. +// This config has one miner with numSectors sectors and two accounts, +// the first is the miner's owner/worker and the accounts both have 10000 FIL +func MakeTestGenCfg(t *testing.T, numSectors int) *gengen.GenesisCfg { + commCfgs, err := gengen.MakeCommitCfgs(numSectors) + require.NoError(t, err) + return &gengen.GenesisCfg{ + KeysToGen: 2, + Miners: []*gengen.CreateStorageMinerConfig{ + { + Owner: 0, + PeerID: mustPeerID(PeerKeys[0]).Pretty(), + CommittedSectors: commCfgs, + SealProofType: constants.DevSealProofType, + MarketBalance: abi.NewTokenAmount(0), + }, + }, + Network: "gfctest", + PreallocatedFunds: []string{ + "10000", + "10000", + }, + } +} + +func mustGenKey(seed int64) crypto.PrivKey { + r := rand.New(rand.NewSource(seed)) + priv, _, err := crypto.GenerateEd25519Key(r) + if err != nil { + panic(err) + } + + return priv +} + +func mustPeerID(k crypto.PrivKey) peer.ID { + pid, err := peer.IDFromPrivateKey(k) + if err != nil { + panic(err) + } + return pid +} diff --git a/app/node/test/setup.go b/app/node/test/setup.go new file mode 100644 index 0000000000..34d3aa0404 --- /dev/null +++ b/app/node/test/setup.go @@ -0,0 +1,82 @@ +package test + +import ( + "context" + "encoding/json" + "os" + "testing" + "time" + + th "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/go-address" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/constants" + gengen "github.com/filecoin-project/venus/tools/gengen/util" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" +) + +const blockTime = builtin.EpochDurationSeconds * time.Second + +func CreateBootstrapSetup(t *testing.T) (*ChainSeed, *gengen.GenesisCfg, clock.ChainEpochClock) { + // set up paths and fake clock. + genTime := int64(1000000000) + fakeClock := clock.NewFake(time.Unix(genTime, 0)) + + // Load genesis config fixture. + genCfgPath := th.Root("fixtures/setup.json") + genCfg := loadGenesisConfig(t, genCfgPath) + genCfg.Miners = append(genCfg.Miners, &gengen.CreateStorageMinerConfig{ + Owner: 5, + SealProofType: constants.DevSealProofType, + }) + seed := MakeChainSeed(t, genCfg) + chainClock := clock.NewChainClockFromClock(uint64(genTime), blockTime, fakeClock) + + return seed, genCfg, chainClock +} + +func CreateBootstrapMiner(ctx context.Context, t *testing.T, seed *ChainSeed, chainClock clock.ChainEpochClock, genCfg *gengen.GenesisCfg) *node.Node { + // create bootstrap miner + bootstrapMiner := NewNodeBuilder(t). + WithGenesisInit(seed.GenesisInitFunc). + WithBuilderOpt(FakeProofVerifierBuilderOpts()...). + WithBuilderOpt(node.ChainClockConfigOption(chainClock)). + WithBuilderOpt(node.MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)). + Build(ctx) + + addr := seed.GiveKey(ctx, t, bootstrapMiner, 0) + err := bootstrapMiner.ConfigModule().API().ConfigSet(ctx, "walletModule.defaultAddress", addr.String()) + require.NoError(t, err) + + _, _, err = initNodeGenesisMiner(ctx, t, bootstrapMiner, seed, genCfg.Miners[0].Owner) + require.NoError(t, err) + err = bootstrapMiner.Start(ctx) + require.NoError(t, err) + + return bootstrapMiner +} + +func initNodeGenesisMiner(ctx context.Context, t *testing.T, nd *node.Node, seed *ChainSeed, minerIdx int) (address.Address, address.Address, error) { + seed.GiveKey(ctx, t, nd, minerIdx) + miner, owner := seed.GiveMiner(t, nd, 0) + + return miner, owner, nil +} + +func loadGenesisConfig(t *testing.T, path string) *gengen.GenesisCfg { + configFile, err := os.Open(path) + if err != nil { + t.Errorf("failed to open config file %s: %s", path, err) + } + defer func() { _ = configFile.Close() }() + + var cfg gengen.GenesisCfg + if err := json.NewDecoder(configFile).Decode(&cfg); err != nil { + t.Errorf("failed to parse config: %s", err) + } + return &cfg +} diff --git a/app/paths/paths.go b/app/paths/paths.go new file mode 100644 index 0000000000..e09448ef47 --- /dev/null +++ b/app/paths/paths.go @@ -0,0 +1,29 @@ +package paths + +import ( + "os" + + "github.com/mitchellh/go-homedir" +) + +// node repo path defaults +const ( + filPathVar = "VENUS_PATH" + defaultRepoDir = "~/.venus" +) + +// GetRepoPath returns the path of the venus repo from a potential override +// string, the VENUS_PATH environment variable and a default of ~/.venus/repo. +func GetRepoPath(override string) (string, error) { + // override is first precedence + if override != "" { + return homedir.Expand(override) + } + // Environment variable is second precedence + envRepoDir := os.Getenv(filPathVar) + if envRepoDir != "" { + return homedir.Expand(envRepoDir) + } + // Default is third precedence + return homedir.Expand(defaultRepoDir) +} diff --git a/app/paths/paths_test.go b/app/paths/paths_test.go new file mode 100644 index 0000000000..67f0561ba2 --- /dev/null +++ b/app/paths/paths_test.go @@ -0,0 +1,18 @@ +package paths + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/require" +) + +func TestRepoPathGet(t *testing.T) { + tf.UnitTest(t) + + t.Run("get default repo path", func(t *testing.T) { + _, err := GetRepoPath("") + + require.NoError(t, err) + }) +} diff --git a/app/submodule/blockstore/blockstore_api.go b/app/submodule/blockstore/blockstore_api.go new file mode 100644 index 0000000000..5982c91162 --- /dev/null +++ b/app/submodule/blockstore/blockstore_api.go @@ -0,0 +1,97 @@ +package blockstore + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/venus/venus-shared/types" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +var _ v1api.IBlockStore = &blockstoreAPI{} + +type blockstoreAPI struct { //nolint + blockstore *BlockstoreSubmodule +} + +func (blockstoreAPI *blockstoreAPI) ChainReadObj(ctx context.Context, ocid cid.Cid) ([]byte, error) { + blk, err := blockstoreAPI.blockstore.Blockstore.Get(ctx, ocid) + if err != nil { + return nil, fmt.Errorf("blockstore get: %w", err) + } + + return blk.RawData(), nil +} + +func (blockstoreAPI *blockstoreAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { + return blockstoreAPI.blockstore.Blockstore.DeleteBlock(ctx, obj) +} + +func (blockstoreAPI *blockstoreAPI) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { + return blockstoreAPI.blockstore.Blockstore.Has(ctx, obj) +} + +func (blockstoreAPI *blockstoreAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (types.ObjStat, error) { + bs := blockstoreAPI.blockstore.Blockstore + bsvc := blockservice.New(bs, offline.Exchange(bs)) + + dag := merkledag.NewDAGService(bsvc) + + seen := cid.NewSet() + + var statslk sync.Mutex + var stats types.ObjStat + collect := true + + walker := func(ctx context.Context, c cid.Cid) ([]*ipld.Link, error) { + if c.Prefix().Codec == cid.FilCommitmentSealed || c.Prefix().Codec == cid.FilCommitmentUnsealed { + return []*ipld.Link{}, nil + } + + nd, err := dag.Get(ctx, c) + if err != nil { + return nil, err + } + + if collect { + s := uint64(len(nd.RawData())) + statslk.Lock() + stats.Size = stats.Size + s + stats.Links = stats.Links + 1 + statslk.Unlock() + } + + return nd.Links(), nil + } + + if base != cid.Undef { + collect = false + if err := merkledag.Walk(ctx, walker, base, seen.Visit, merkledag.Concurrent()); err != nil { + return types.ObjStat{}, err + } + collect = true + } + + if err := merkledag.Walk(ctx, walker, obj, seen.Visit, merkledag.Concurrent()); err != nil { + return types.ObjStat{}, err + } + + return stats, nil +} + +func (blockstoreAPI *blockstoreAPI) ChainPutObj(ctx context.Context, blk blocks.Block) error { + return blockstoreAPI.blockstore.Blockstore.Put(ctx, blk) +} + +func (blockstoreAPI *blockstoreAPI) PutMany(ctx context.Context, blocks []blocks.Block) error { + return blockstoreAPI.blockstore.Blockstore.PutMany(ctx, blocks) +} diff --git a/app/submodule/blockstore/blockstore_submodule.go b/app/submodule/blockstore/blockstore_submodule.go new file mode 100644 index 0000000000..058db49f27 --- /dev/null +++ b/app/submodule/blockstore/blockstore_submodule.go @@ -0,0 +1,39 @@ +package blockstore + +import ( + "context" + + "github.com/filecoin-project/venus/pkg/repo" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" +) + +// BlockstoreSubmodule enhances the `Node` with local key/value storing capabilities. +// Note: at present: +// - `blockstore` is shared by chain/graphsync and piece/bitswap data +// - `cborStore` is used for chain state and shared with piece data exchange for deals at the moment. +type BlockstoreSubmodule struct { //nolint + // blockstore is the un-networked blocks interface + Blockstore blockstoreutil.Blockstore +} + +type blockstoreRepo interface { + Repo() repo.Repo +} + +// NewBlockstoreSubmodule creates a new block store submodule. +func NewBlockstoreSubmodule(ctx context.Context, repo blockstoreRepo) (*BlockstoreSubmodule, error) { + // set up block store + bs := repo.Repo().Datastore() + return &BlockstoreSubmodule{ + Blockstore: bs, + }, nil +} + +func (bsm *BlockstoreSubmodule) API() v0api.IBlockStore { + return &blockstoreAPI{blockstore: bsm} +} + +func (bsm *BlockstoreSubmodule) V0API() v0api.IBlockStore { + return &blockstoreAPI{blockstore: bsm} +} diff --git a/app/submodule/chain/account_api.go b/app/submodule/chain/account_api.go new file mode 100644 index 0000000000..0b9f1408d7 --- /dev/null +++ b/app/submodule/chain/account_api.go @@ -0,0 +1,31 @@ +package chain + +import ( + "context" + "fmt" + + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" +) + +var _ v1api.IAccount = &accountAPI{} + +type accountAPI struct { + chain *ChainSubmodule +} + +// NewAccountAPI create a new account api +func NewAccountAPI(chain *ChainSubmodule) v1api.IAccount { + return &accountAPI{chain: chain} +} + +// StateAccountKey returns the public key address of the given ID address +func (accountAPI *accountAPI) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + ts, err := accountAPI.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return address.Undef, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + return accountAPI.chain.Stmgr.ResolveToKeyAddress(ctx, addr, ts) +} diff --git a/app/submodule/chain/actor_api.go b/app/submodule/chain/actor_api.go new file mode 100644 index 0000000000..6aa7b68a72 --- /dev/null +++ b/app/submodule/chain/actor_api.go @@ -0,0 +1,30 @@ +package chain + +import ( + "context" + + "github.com/filecoin-project/go-address" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v1api.IActor = &actorAPI{} + +type actorAPI struct { + chain *ChainSubmodule +} + +// NewActorAPI new actor api +func NewActorAPI(chain *ChainSubmodule) v1api.IActor { + return &actorAPI{chain: chain} +} + +// StateGetActor returns the indicated actor's nonce and balance. +func (actorAPI *actorAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + return actorAPI.chain.Stmgr.GetActorAtTsk(ctx, actor, tsk) +} + +// ActorLs returns a channel with actors from the latest state on the chain +func (actorAPI *actorAPI) ListActor(ctx context.Context) (map[address.Address]*types.Actor, error) { + return actorAPI.chain.ChainReader.LsActors(ctx) +} diff --git a/app/submodule/chain/chain_api.go b/app/submodule/chain/chain_api.go new file mode 100644 index 0000000000..61aadda010 --- /dev/null +++ b/app/submodule/chain/chain_api.go @@ -0,0 +1,14 @@ +package chain + +import ( + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +type chainAPI struct { // nolint: golint + v1api.IAccount + v1api.IActor + v1api.IMinerState + v1api.IChainInfo +} + +var _ v1api.IChain = &chainAPI{} diff --git a/app/submodule/chain/chain_submodule.go b/app/submodule/chain/chain_submodule.go new file mode 100644 index 0000000000..8e570f768f --- /dev/null +++ b/app/submodule/chain/chain_submodule.go @@ -0,0 +1,121 @@ +package chain + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + + apiwrapper "github.com/filecoin-project/venus/app/submodule/chain/v0api" + "github.com/filecoin-project/venus/pkg/beacon" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/consensusfault" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vmsupport" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// ChainSubmodule enhances the `Node` with chain capabilities. +type ChainSubmodule struct { //nolint + ChainReader *chain.Store + MessageStore *chain.MessageStore + Processor *consensus.DefaultProcessor + Fork fork.IFork + SystemCall vm.SyscallsImpl + + CheckPoint types.TipSetKey + Drand beacon.Schedule + + config chainConfig + + Stmgr *statemanger.Stmgr + // Wait for confirm message + Waiter *chain.Waiter +} + +type chainConfig interface { + GenesisCid() cid.Cid + BlockTime() time.Duration + Repo() repo.Repo + Verifier() ffiwrapper.Verifier +} + +// NewChainSubmodule creates a new chain submodule. +func NewChainSubmodule(ctx context.Context, + config chainConfig, + circulatiingSupplyCalculator chain.ICirculatingSupplyCalcualtor, +) (*ChainSubmodule, error) { + repo := config.Repo() + // initialize chain store + chainStore := chain.NewStore(repo.ChainDatastore(), repo.Datastore(), config.GenesisCid(), circulatiingSupplyCalculator) + // drand + genBlk, err := chainStore.GetGenesisBlock(context.TODO()) + if err != nil { + return nil, err + } + + drand, err := beacon.DrandConfigSchedule(genBlk.Timestamp, repo.Config().NetworkParams.BlockDelay, repo.Config().NetworkParams.DrandSchedule) + if err != nil { + return nil, err + } + + messageStore := chain.NewMessageStore(config.Repo().Datastore(), repo.Config().NetworkParams.ForkUpgradeParam) + fork, err := fork.NewChainFork(ctx, chainStore, cbor.NewCborStore(config.Repo().Datastore()), config.Repo().Datastore(), repo.Config().NetworkParams) + if err != nil { + return nil, err + } + faultChecker := consensusfault.NewFaultChecker(chainStore, fork) + syscalls := vmsupport.NewSyscalls(faultChecker, config.Verifier()) + processor := consensus.NewDefaultProcessor(syscalls, circulatiingSupplyCalculator) + + waiter := chain.NewWaiter(chainStore, messageStore, config.Repo().Datastore(), cbor.NewCborStore(config.Repo().Datastore())) + + store := &ChainSubmodule{ + ChainReader: chainStore, + MessageStore: messageStore, + Processor: processor, + SystemCall: syscalls, + Fork: fork, + Drand: drand, + config: config, + Waiter: waiter, + CheckPoint: chainStore.GetCheckPoint(), + } + err = store.ChainReader.Load(context.TODO()) + if err != nil { + return nil, err + } + return store, nil +} + +// Start loads the chain from disk. +func (chain *ChainSubmodule) Start(ctx context.Context) error { + return chain.Fork.Start(ctx) +} + +// Stop stop the chain head event +func (chain *ChainSubmodule) Stop(ctx context.Context) { + chain.ChainReader.Stop() +} + +// API chain module api implement +func (chain *ChainSubmodule) API() v1api.IChain { + return &chainAPI{ + IAccount: NewAccountAPI(chain), + IActor: NewActorAPI(chain), + IChainInfo: NewChainInfoAPI(chain), + IMinerState: NewMinerStateAPI(chain), + } +} + +func (chain *ChainSubmodule) V0API() v0api.IChain { + return &apiwrapper.WrapperV1IChain{IChain: chain.API()} +} diff --git a/app/submodule/chain/chaininfo_api.go b/app/submodule/chain/chaininfo_api.go new file mode 100644 index 0000000000..a73648bb93 --- /dev/null +++ b/app/submodule/chain/chaininfo_api.go @@ -0,0 +1,749 @@ +package chain + +import ( + "bufio" + "context" + "fmt" + "io" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + acrypto "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/venus-shared/actors" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v1api.IChainInfo = &chainInfoAPI{} + +type chainInfoAPI struct { //nolint + chain *ChainSubmodule +} + +var log = logging.Logger("chain") + +// NewChainInfoAPI new chain info api +func NewChainInfoAPI(chain *ChainSubmodule) v1api.IChainInfo { + return &chainInfoAPI{chain: chain} +} + +// todo think which module should this api belong +// BlockTime returns the block time used by the consensus protocol. +// BlockTime returns the block time +func (cia *chainInfoAPI) BlockTime(ctx context.Context) time.Duration { + return cia.chain.config.BlockTime() +} + +// ChainLs returns an iterator of tipsets from specified head by tsKey to genesis +func (cia *chainInfoAPI) ChainList(ctx context.Context, tsKey types.TipSetKey, count int) ([]types.TipSetKey, error) { + fromTS, err := cia.chain.ChainReader.GetTipSet(ctx, tsKey) + if err != nil { + return nil, fmt.Errorf("could not retrieve network name %w", err) + } + tipset, err := cia.chain.ChainReader.Ls(ctx, fromTS, count) + if err != nil { + return nil, err + } + tipsetKey := make([]types.TipSetKey, len(tipset)) + for i, ts := range tipset { + tipsetKey[i] = ts.Key() + } + return tipsetKey, nil +} + +// ProtocolParameters return chain parameters +func (cia *chainInfoAPI) ProtocolParameters(ctx context.Context) (*types.ProtocolParams, error) { + networkName, err := cia.getNetworkName(ctx) + if err != nil { + return nil, fmt.Errorf("could not retrieve network name %w", err) + } + + var supportedSectors []types.SectorInfo + for proof := range miner0.SupportedProofTypes { + size, err := proof.SectorSize() + if err != nil { + return nil, fmt.Errorf("could not retrieve network name %w", err) + } + maxUserBytes := abi.PaddedPieceSize(size).Unpadded() + supportedSectors = append(supportedSectors, types.SectorInfo{Size: size, MaxPieceSize: maxUserBytes}) + } + + return &types.ProtocolParams{ + Network: networkName, + BlockTime: cia.chain.config.BlockTime(), + SupportedSectors: supportedSectors, + }, nil +} + +func (cia *chainInfoAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { + return cia.chain.ChainReader.GetHead(), nil +} + +// ChainSetHead sets `key` as the new head of this chain iff it exists in the nodes chain store. +func (cia *chainInfoAPI) ChainSetHead(ctx context.Context, key types.TipSetKey) error { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, key) + if err != nil { + return err + } + return cia.chain.ChainReader.SetHead(ctx, ts) +} + +// ChainTipSet returns the tipset at the given key +func (cia *chainInfoAPI) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + return cia.chain.ChainReader.GetTipSet(ctx, key) +} + +// ChainGetTipSetByHeight looks back for a tipset at the specified epoch. +// If there are no blocks at the specified epoch, a tipset at an earlier epoch +// will be returned. +func (cia *chainInfoAPI) ChainGetTipSetByHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("fail to load tipset %v", err) + } + return cia.chain.ChainReader.GetTipSetByHeight(ctx, ts, height, true) +} + +// ChainGetTipSetAfterHeight looks back for a tipset at the specified epoch. +// If there are no blocks at the specified epoch, the first non-nil tipset at a later epoch +// will be returned. +func (cia *chainInfoAPI) ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + return cia.chain.ChainReader.GetTipSetByHeight(ctx, ts, h, false) +} + +// GetParentStateRootActor get the ts ParentStateRoot actor +func (cia *chainInfoAPI) GetActor(ctx context.Context, addr address.Address) (*types.Actor, error) { + return cia.chain.Stmgr.GetActorAtTsk(ctx, addr, types.EmptyTSK) +} + +// GetParentStateRootActor get the ts ParentStateRoot actor +func (cia *chainInfoAPI) GetParentStateRootActor(ctx context.Context, ts *types.TipSet, addr address.Address) (*types.Actor, error) { + _, v, err := cia.chain.Stmgr.ParentStateView(ctx, ts) + if err != nil { + return nil, err + } + act, err := v.LoadActor(ctx, addr) + if err != nil { + return nil, err + } + return act, nil +} + +// ChainGetBlock gets a block by CID +func (cia *chainInfoAPI) ChainGetBlock(ctx context.Context, id cid.Cid) (*types.BlockHeader, error) { + return cia.chain.ChainReader.GetBlock(ctx, id) +} + +// ChainGetMessage reads a message referenced by the specified CID from the +// chain blockstore. +func (cia *chainInfoAPI) ChainGetMessage(ctx context.Context, msgID cid.Cid) (*types.Message, error) { + msg, err := cia.chain.MessageStore.LoadMessage(ctx, msgID) + if err != nil { + return nil, err + } + return msg.VMMessage(), nil +} + +// ChainGetMessages gets a message collection by CID +func (cia *chainInfoAPI) ChainGetBlockMessages(ctx context.Context, bid cid.Cid) (*types.BlockMessages, error) { + b, err := cia.chain.ChainReader.GetBlock(ctx, bid) + if err != nil { + return nil, err + } + + smsgs, bmsgs, err := cia.chain.MessageStore.LoadMetaMessages(ctx, b.Messages) + if err != nil { + return nil, err + } + + cids := make([]cid.Cid, len(bmsgs)+len(smsgs)) + + for i, m := range bmsgs { + cids[i] = m.Cid() + } + + for i, m := range smsgs { + cids[i+len(bmsgs)] = m.Cid() + } + + return &types.BlockMessages{ + BlsMessages: bmsgs, + SecpkMessages: smsgs, + Cids: cids, + }, nil +} + +// ChainGetReceipts gets a receipt collection by CID +func (cia *chainInfoAPI) ChainGetReceipts(ctx context.Context, id cid.Cid) ([]types.MessageReceipt, error) { + return cia.chain.MessageStore.LoadReceipts(ctx, id) +} + +// ChainGetFullBlock gets full block(include message) by cid +func (cia *chainInfoAPI) GetFullBlock(ctx context.Context, id cid.Cid) (*types.FullBlock, error) { + var out types.FullBlock + var err error + + out.Header, err = cia.chain.ChainReader.GetBlock(ctx, id) + if err != nil { + return nil, err + } + out.SECPMessages, out.BLSMessages, err = cia.chain.MessageStore.LoadMetaMessages(ctx, out.Header.Messages) + if err != nil { + return nil, err + } + + return &out, nil +} + +// ChainGetMessagesInTipset returns message stores in current tipset +func (cia *chainInfoAPI) ChainGetMessagesInTipset(ctx context.Context, key types.TipSetKey) ([]types.MessageCID, error) { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, key) + if err != nil { + return nil, err + } + if ts.Height() == 0 { + return nil, nil + } + + cm, err := cia.chain.MessageStore.MessagesForTipset(ts) + if err != nil { + return nil, err + } + + var out []types.MessageCID + for _, m := range cm { + out = append(out, types.MessageCID{ + Cid: m.Cid(), + Message: m.VMMessage(), + }) + } + + return out, nil +} + +// ChainGetParentMessages returns messages stored in parent tipset of the +// specified block. +func (cia *chainInfoAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([]types.MessageCID, error) { + b, err := cia.ChainGetBlock(ctx, bcid) + if err != nil { + return nil, err + } + + // genesis block has no parent messages... + if b.Height == 0 { + return nil, nil + } + + // TODO: need to get the number of messages better than this + pts, err := cia.chain.ChainReader.GetTipSet(ctx, types.NewTipSetKey(b.Parents...)) + if err != nil { + return nil, err + } + + cm, err := cia.chain.MessageStore.MessagesForTipset(pts) + if err != nil { + return nil, err + } + + var out []types.MessageCID + for _, m := range cm { + out = append(out, types.MessageCID{ + Cid: m.Cid(), + Message: m.VMMessage(), + }) + } + + return out, nil +} + +// ChainGetParentReceipts returns receipts for messages in parent tipset of +// the specified block. +func (cia *chainInfoAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) { + b, err := cia.ChainGetBlock(ctx, bcid) + if err != nil { + return nil, err + } + + if b.Height == 0 { + return nil, nil + } + + // TODO: need to get the number of messages better than this + pts, err := cia.chain.ChainReader.GetTipSet(ctx, types.NewTipSetKey(b.Parents...)) + if err != nil { + return nil, err + } + + cm, err := cia.chain.MessageStore.MessagesForTipset(pts) + if err != nil { + return nil, err + } + + var out []*types.MessageReceipt + for i := 0; i < len(cm); i++ { + r, err := cia.chain.ChainReader.GetParentReceipt(b, i) + if err != nil { + return nil, err + } + + out = append(out, r) + } + + return out, nil +} + +// ResolveToKeyAddr resolve user address to t0 address +func (cia *chainInfoAPI) ResolveToKeyAddr(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + if ts == nil { + ts = cia.chain.ChainReader.GetHead() + } + return cia.chain.Stmgr.ResolveToKeyAddress(ctx, addr, ts) +} + +// ************Drand****************// +// ChainNotify subscribe to chain head change event +func (cia *chainInfoAPI) ChainNotify(ctx context.Context) (<-chan []*types.HeadChange, error) { + return cia.chain.ChainReader.SubHeadChanges(ctx), nil +} + +//************Drand****************// + +// GetEntry retrieves an entry from the drand server +func (cia *chainInfoAPI) GetEntry(ctx context.Context, height abi.ChainEpoch, round uint64) (*types.BeaconEntry, error) { + rch := cia.chain.Drand.BeaconForEpoch(height).Entry(ctx, round) + select { + case resp := <-rch: + if resp.Err != nil { + return nil, fmt.Errorf("beacon entry request returned error: %s", resp.Err) + } + return &resp.Entry, nil + case <-ctx.Done(): + return nil, fmt.Errorf("context timed out waiting on beacon entry to come back for round %d: %s", round, ctx.Err()) + } +} + +// VerifyEntry verifies that child is a valid entry if its parent is. +func (cia *chainInfoAPI) VerifyEntry(parent, child *types.BeaconEntry, height abi.ChainEpoch) bool { + return cia.chain.Drand.BeaconForEpoch(height).VerifyEntry(*parent, *child) != nil +} + +// StateGetBeaconEntry returns the beacon entry for the given filecoin epoch. If +// the entry has not yet been produced, the call will block until the entry +// becomes available +func (cia *chainInfoAPI) StateGetBeaconEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) { + b := cia.chain.Drand.BeaconForEpoch(epoch) + nv := cia.chain.Fork.GetNetworkVersion(ctx, epoch) + rr := b.MaxBeaconRoundForEpoch(nv, epoch) + e := b.Entry(ctx, rr) + + select { + case be, ok := <-e: + if !ok { + return nil, fmt.Errorf("beacon get returned no value") + } + if be.Err != nil { + return nil, be.Err + } + return &be.Entry, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// StateNetworkName returns the name of the network the node is synced to +func (cia *chainInfoAPI) StateNetworkName(ctx context.Context) (types.NetworkName, error) { + networkName, err := cia.getNetworkName(ctx) + + return types.NetworkName(networkName), err +} + +func (cia *chainInfoAPI) getNetworkName(ctx context.Context) (string, error) { + _, view, err := cia.chain.Stmgr.ParentStateView(ctx, cia.chain.ChainReader.GetHead()) + if err != nil { + return "", err + } + + return view.InitNetworkName(ctx) +} + +// StateGetRandomnessFromTickets is used to sample the chain for randomness. +func (cia *chainInfoAPI) StateGetRandomnessFromTickets(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { + ts, err := cia.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + + r := chain.NewChainRandomnessSource(cia.chain.ChainReader, ts.Key(), cia.chain.Drand, cia.chain.Fork.GetNetworkVersion) + rnv := cia.chain.Fork.GetNetworkVersion(ctx, randEpoch) + + if rnv >= network.Version13 { + return r.GetChainRandomnessV2(ctx, personalization, randEpoch, entropy) + } + + return r.GetChainRandomnessV1(ctx, personalization, randEpoch, entropy) +} + +// StateGetRandomnessFromBeacon is used to sample the beacon for randomness. +func (cia *chainInfoAPI) StateGetRandomnessFromBeacon(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { + ts, err := cia.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + r := chain.NewChainRandomnessSource(cia.chain.ChainReader, ts.Key(), cia.chain.Drand, cia.chain.Fork.GetNetworkVersion) + rnv := cia.chain.Fork.GetNetworkVersion(ctx, randEpoch) + + if rnv >= network.Version14 { + return r.GetBeaconRandomnessV3(ctx, personalization, randEpoch, entropy) + } else if rnv == network.Version13 { + return r.GetBeaconRandomnessV2(ctx, personalization, randEpoch, entropy) + } + + return r.GetBeaconRandomnessV1(ctx, personalization, randEpoch, entropy) +} + +// StateNetworkVersion returns the network version at the given tipset +func (cia *chainInfoAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return network.VersionMax, fmt.Errorf("loading tipset %s: %v", tsk, err) + } + return cia.chain.Fork.GetNetworkVersion(ctx, ts.Height()), nil +} + +func (cia *chainInfoAPI) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return address.Undef, fmt.Errorf("loading tipset %s: %v", tsk, err) + } + _, view, err := cia.chain.Stmgr.ParentStateView(ctx, ts) + if err != nil { + return address.Undef, fmt.Errorf("filed to load parent state view:%v", err) + } + + vrs, err := view.LoadVerifregActor(ctx) + if err != nil { + return address.Undef, fmt.Errorf("failed to load verified registry state: %w", err) + } + + return vrs.RootKey() +} + +func (cia *chainInfoAPI) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %v", tsk, err) + } + _, view, err := cia.chain.Stmgr.ParentStateView(ctx, ts) + if err != nil { + return nil, err + } + + aid, err := view.LookupID(ctx, addr) + if err != nil { + log.Warnf("lookup failure %v", err) + return nil, err + } + + vrs, err := view.LoadVerifregActor(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load verified registry state: %w", err) + } + + verified, dcap, err := vrs.VerifierDataCap(aid) + if err != nil { + return nil, fmt.Errorf("looking up verifier: %w", err) + } + if !verified { + return nil, nil + } + + return &dcap, nil +} + +// MessageWait invokes the callback when a message with the given cid appears on chain. +// It will find the message in both the case that it is already on chain and +// the case that it appears in a newly mined block. An error is returned if one is +// encountered or if the context is canceled. Otherwise, it waits forever for the message +// to appear on chain. +func (cia *chainInfoAPI) MessageWait(ctx context.Context, msgCid cid.Cid, confidence, lookback abi.ChainEpoch) (*types.ChainMessage, error) { + chainMsg, err := cia.chain.MessageStore.LoadMessage(ctx, msgCid) + if err != nil { + return nil, err + } + return cia.chain.Waiter.Wait(ctx, chainMsg, uint64(confidence), lookback, true) +} + +// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed +func (cia *chainInfoAPI) StateSearchMsg(ctx context.Context, from types.TipSetKey, mCid cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) { + chainMsg, err := cia.chain.MessageStore.LoadMessage(ctx, mCid) + if err != nil { + return nil, err + } + // todo add a api for head tipset directly + head, err := cia.chain.ChainReader.GetTipSet(ctx, from) + if err != nil { + return nil, err + } + msgResult, found, err := cia.chain.Waiter.Find(ctx, chainMsg, lookbackLimit, head, allowReplaced) + if err != nil { + return nil, err + } + + if found { + return &types.MsgLookup{ + Message: mCid, + Receipt: *msgResult.Receipt, + TipSet: msgResult.TS.Key(), + Height: msgResult.TS.Height(), + }, nil + } + return nil, nil +} + +// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the +// message arrives on chain, and gets to the indicated confidence depth. +func (cia *chainInfoAPI) StateWaitMsg(ctx context.Context, mCid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) { + chainMsg, err := cia.chain.MessageStore.LoadMessage(ctx, mCid) + if err != nil { + return nil, err + } + msgResult, err := cia.chain.Waiter.Wait(ctx, chainMsg, confidence, lookbackLimit, allowReplaced) + if err != nil { + return nil, err + } + if msgResult != nil { + return &types.MsgLookup{ + Message: mCid, + Receipt: *msgResult.Receipt, + TipSet: msgResult.TS.Key(), + Height: msgResult.TS.Height(), + }, nil + } + return nil, nil +} + +func (cia *chainInfoAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, skipoldmsgs bool, tsk types.TipSetKey) (<-chan []byte, error) { + ts, err := cia.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %v", tsk, err) + } + r, w := io.Pipe() + out := make(chan []byte) + go func() { + bw := bufio.NewWriterSize(w, 1<<20) + + err := cia.chain.ChainReader.Export(ctx, ts, nroots, skipoldmsgs, bw) + bw.Flush() //nolint:errcheck // it is a write to a pipe + w.CloseWithError(err) //nolint:errcheck // it is a pipe + }() + + go func() { + defer close(out) + for { + buf := make([]byte, 1<<20) + n, err := r.Read(buf) + if err != nil && err != io.EOF { + log.Errorf("chain export pipe read failed: %s", err) + return + } + if n > 0 { + select { + case out <- buf[:n]: + case <-ctx.Done(): + log.Warnf("export writer failed: %s", ctx.Err()) + return + } + } + if err == io.EOF { + // send empty slice to indicate correct eof + select { + case out <- []byte{}: + case <-ctx.Done(): + log.Warnf("export writer failed: %s", ctx.Err()) + return + } + + return + } + } + }() + + return out, nil +} + +// ChainGetPath returns a set of revert/apply operations needed to get from +// one tipset to another, for example: +// ``` +// +// to +// ^ +// +// from tAA +// +// ^ ^ +// +// tBA tAB +// +// ^---*--^ +// ^ +// tRR +// +// ``` +// Would return `[revert(tBA), apply(tAB), apply(tAA)]` +func (cia *chainInfoAPI) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*types.HeadChange, error) { + fts, err := cia.chain.ChainReader.GetTipSet(ctx, from) + if err != nil { + return nil, fmt.Errorf("loading from tipset %s: %w", from, err) + } + tts, err := cia.chain.ChainReader.GetTipSet(ctx, to) + if err != nil { + return nil, fmt.Errorf("loading to tipset %s: %w", to, err) + } + + revert, apply, err := chain.ReorgOps(cia.chain.ChainReader.GetTipSet, fts, tts) + if err != nil { + return nil, fmt.Errorf("error getting tipset branches: %w", err) + } + + path := make([]*types.HeadChange, len(revert)+len(apply)) + for i, r := range revert { + path[i] = &types.HeadChange{Type: types.HCRevert, Val: r} + } + for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 { + path[j+len(revert)] = &types.HeadChange{Type: types.HCApply, Val: apply[i]} + } + return path, nil +} + +// StateGetNetworkParams returns current network params +func (cia *chainInfoAPI) StateGetNetworkParams(ctx context.Context) (*types.NetworkParams, error) { + networkName, err := cia.getNetworkName(ctx) + if err != nil { + return nil, err + } + cfg := cia.chain.config.Repo().Config() + params := &types.NetworkParams{ + NetworkName: types.NetworkName(networkName), + BlockDelaySecs: cfg.NetworkParams.BlockDelay, + ConsensusMinerMinPower: abi.NewStoragePower(int64(cfg.NetworkParams.ConsensusMinerMinPower)), + SupportedProofTypes: cfg.NetworkParams.ReplaceProofTypes, + PreCommitChallengeDelay: cfg.NetworkParams.PreCommitChallengeDelay, + ForkUpgradeParams: types.ForkUpgradeParams{ + UpgradeSmokeHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeSmokeHeight, + UpgradeBreezeHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeBreezeHeight, + UpgradeIgnitionHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeIgnitionHeight, + UpgradeLiftoffHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeLiftoffHeight, + UpgradeAssemblyHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeAssemblyHeight, + UpgradeRefuelHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeRefuelHeight, + UpgradeTapeHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeTapeHeight, + UpgradeKumquatHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeKumquatHeight, + BreezeGasTampingDuration: cfg.NetworkParams.ForkUpgradeParam.BreezeGasTampingDuration, + UpgradeCalicoHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeCalicoHeight, + UpgradePersianHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradePersianHeight, + UpgradeOrangeHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeOrangeHeight, + UpgradeClausHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeClausHeight, + UpgradeTrustHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeTrustHeight, + UpgradeNorwegianHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeNorwegianHeight, + UpgradeTurboHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeTurboHeight, + UpgradeHyperdriveHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeHyperdriveHeight, + UpgradeChocolateHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight, + UpgradeOhSnapHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeOhSnapHeight, + UpgradeSkyrHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeSkyrHeight, + UpgradeSharkHeight: cfg.NetworkParams.ForkUpgradeParam.UpgradeSharkHeight, + }, + } + + return params, nil +} + +// StateActorCodeCIDs returns the CIDs of all the builtin actors for the given network version +func (cia *chainInfoAPI) StateActorCodeCIDs(ctx context.Context, nv network.Version) (map[string]cid.Cid, error) { + actorVersion, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return nil, fmt.Errorf("invalid network version") + } + + cids := make(map[string]cid.Cid) + + manifestCid, ok := actors.GetManifest(actorVersion) + if !ok { + return nil, fmt.Errorf("cannot get manifest CID") + } + + cids["_manifest"] = manifestCid + + actorKeys := actors.GetBuiltinActorsKeys(actorVersion) + for _, name := range actorKeys { + actorCID, ok := actors.GetActorCodeID(actorVersion, name) + if !ok { + return nil, fmt.Errorf("didn't find actor %v code id for actor version %d", name, + actorVersion) + } + cids[name] = actorCID + } + return cids, nil +} + +// ChainGetGenesis returns the genesis tipset. +func (cia *chainInfoAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { + genb, err := cia.chain.ChainReader.GetGenesisBlock(ctx) + if err != nil { + return nil, err + } + + return types.NewTipSet([]*types.BlockHeader{genb}) +} + +// StateActorManifestCID returns the CID of the builtin actors manifest for the given network version +func (cia *chainInfoAPI) StateActorManifestCID(ctx context.Context, nv network.Version) (cid.Cid, error) { + actorVersion, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return cid.Undef, fmt.Errorf("invalid network version") + } + + c, ok := actors.GetManifest(actorVersion) + if !ok { + return cid.Undef, fmt.Errorf("could not find manifest cid for network version %d, actors version %d", nv, actorVersion) + } + + return c, nil +} + +// StateCall runs the given message and returns its result without any persisted changes. +// +// StateCall applies the message to the tipset's parent state. The +// message is not applied on-top-of the messages in the passed-in +// tipset. +func (cia *chainInfoAPI) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*types.InvocResult, error) { + start := time.Now() + ts, err := cia.chain.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %v", tsk, err) + } + ret, err := cia.chain.Stmgr.Call(ctx, msg, ts) + if err != nil { + return nil, err + } + duration := time.Since(start) + + mcid := msg.Cid() + return &types.InvocResult{ + MsgCid: mcid, + Msg: msg, + MsgRct: &ret.Receipt, + ExecutionTrace: types.ExecutionTrace{}, + Duration: duration, + }, nil +} diff --git a/app/submodule/chain/miner_api.go b/app/submodule/chain/miner_api.go new file mode 100644 index 0000000000..51ab7103e0 --- /dev/null +++ b/app/submodule/chain/miner_api.go @@ -0,0 +1,1271 @@ +package chain + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + cbornode "github.com/ipfs/go-ipld-cbor" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm/register" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + _init "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + lminer "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/utils" +) + +var _ v1api.IMinerState = &minerStateAPI{} + +type minerStateAPI struct { + *ChainSubmodule +} + +// NewMinerStateAPI create miner state api +func NewMinerStateAPI(chain *ChainSubmodule) v1api.IMinerState { + return &minerStateAPI{ChainSubmodule: chain} +} + +// StateMinerSectorAllocated checks if a sector is allocated +func (msa *minerStateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return false, fmt.Errorf("load Stmgr.ParentStateViewTsk(%s): %v", tsk, err) + } + mas, err := view.LoadMinerState(ctx, maddr) + if err != nil { + return false, fmt.Errorf("failed to load miner actor state: %v", err) + } + return mas.IsAllocated(s) +} + +// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector +func (msa *minerStateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorPreCommitOnChainInfo, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset:%s parent state view: %v", tsk, err) + } + + return view.SectorPreCommitInfo(ctx, maddr, n) +} + +// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found +// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate +// expiration epoch +func (msa *minerStateAPI) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorOnChainInfo, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %v", tsk, err) + } + + return view.MinerSectorInfo(ctx, maddr, n) +} + +// StateSectorPartition finds deadline/partition with the specified sector +func (msa *minerStateAPI) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loadParentStateViewTsk(%s) failed:%v", tsk.String(), err) + } + + return view.StateSectorPartition(ctx, maddr, sectorNumber) +} + +// StateMinerSectorSize get miner sector size +func (msa *minerStateAPI) StateMinerSectorSize(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (abi.SectorSize, error) { + // TODO: update storage-fsm to just StateMinerSectorAllocated + mi, err := msa.StateMinerInfo(ctx, maddr, tsk) + if err != nil { + return 0, err + } + return mi.SectorSize, nil +} + +// StateMinerInfo returns info about the indicated miner +func (msa *minerStateAPI) StateMinerInfo(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.MinerInfo, error) { + ts, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return types.MinerInfo{}, fmt.Errorf("loading view %s: %v", tsk, err) + } + + nv := msa.Fork.GetNetworkVersion(ctx, ts.Height()) + minfo, err := view.MinerInfo(ctx, maddr, nv) + if err != nil { + return types.MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(minfo.PeerId); err == nil { + pid = &peerID + } + + ret := types.MinerInfo{ + Owner: minfo.Owner, + Worker: minfo.Worker, + ControlAddresses: minfo.ControlAddresses, + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + PeerId: pid, + Multiaddrs: minfo.Multiaddrs, + WindowPoStProofType: minfo.WindowPoStProofType, + SectorSize: minfo.SectorSize, + WindowPoStPartitionSectors: minfo.WindowPoStPartitionSectors, + ConsensusFaultElapsed: minfo.ConsensusFaultElapsed, + Beneficiary: minfo.Beneficiary, + BeneficiaryTerm: &minfo.BeneficiaryTerm, + PendingBeneficiaryTerm: minfo.PendingBeneficiaryTerm, + } + + if minfo.PendingWorkerKey != nil { + ret.NewWorker = minfo.PendingWorkerKey.NewWorker + ret.WorkerChangeEpoch = minfo.PendingWorkerKey.EffectiveAt + } + + return ret, nil +} + +// StateMinerWorkerAddress get miner worker address +func (msa *minerStateAPI) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (address.Address, error) { + // TODO: update storage-fsm to just StateMinerInfo + mi, err := msa.StateMinerInfo(ctx, maddr, tsk) + if err != nil { + return address.Undef, err + } + return mi.Worker, nil +} + +// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner +func (msa *minerStateAPI) StateMinerRecoveries(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return bitfield.BitField{}, fmt.Errorf("loading view %s: %v", tsk, err) + } + + mas, err := view.LoadMinerState(ctx, maddr) + if err != nil { + return bitfield.BitField{}, fmt.Errorf("failed to load miner actor state: %v", err) + } + + return lminer.AllPartSectors(mas, lminer.Partition.RecoveringSectors) +} + +// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner +func (msa *minerStateAPI) StateMinerFaults(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return bitfield.BitField{}, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + mas, err := view.LoadMinerState(ctx, maddr) + if err != nil { + return bitfield.BitField{}, fmt.Errorf("failed to load miner actor state: %v", err) + } + + return lminer.AllPartSectors(mas, lminer.Partition.FaultySectors) +} + +func (msa *minerStateAPI) StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, endTsk types.TipSetKey) ([]*types.Fault, error) { + return nil, fmt.Errorf("fixme") +} + +// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period +// and returns the deadline-related calculations. +func (msa *minerStateAPI) StateMinerProvingDeadline(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (*dline.Info, error) { + ts, err := msa.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("GetTipset failed:%v", err) + } + + _, view, err := msa.Stmgr.ParentStateView(ctx, ts) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + mas, err := view.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + di, err := mas.DeadlineInfo(ts.Height()) + if err != nil { + return nil, fmt.Errorf("failed to get deadline info: %v", err) + } + + return di.NextNotElapsed(), nil +} + +// StateMinerPartitions returns all partitions in the specified deadline +func (msa *minerStateAPI) StateMinerPartitions(ctx context.Context, maddr address.Address, dlIdx uint64, tsk types.TipSetKey) ([]types.Partition, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + mas, err := view.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + dl, err := mas.LoadDeadline(dlIdx) + if err != nil { + return nil, fmt.Errorf("failed to load the deadline: %v", err) + } + + var out []types.Partition + err = dl.ForEachPartition(func(_ uint64, part lminer.Partition) error { + allSectors, err := part.AllSectors() + if err != nil { + return fmt.Errorf("getting AllSectors: %v", err) + } + + faultySectors, err := part.FaultySectors() + if err != nil { + return fmt.Errorf("getting FaultySectors: %v", err) + } + + recoveringSectors, err := part.RecoveringSectors() + if err != nil { + return fmt.Errorf("getting RecoveringSectors: %v", err) + } + + liveSectors, err := part.LiveSectors() + if err != nil { + return fmt.Errorf("getting LiveSectors: %v", err) + } + + activeSectors, err := part.ActiveSectors() + if err != nil { + return fmt.Errorf("getting ActiveSectors: %v", err) + } + + out = append(out, types.Partition{ + AllSectors: allSectors, + FaultySectors: faultySectors, + RecoveringSectors: recoveringSectors, + LiveSectors: liveSectors, + ActiveSectors: activeSectors, + }) + return nil + }) + + return out, err +} + +// StateMinerDeadlines returns all the proving deadlines for the given miner +func (msa *minerStateAPI) StateMinerDeadlines(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]types.Deadline, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + mas, err := view.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + deadlines, err := mas.NumDeadlines() + if err != nil { + return nil, fmt.Errorf("getting deadline count: %v", err) + } + + out := make([]types.Deadline, deadlines) + if err := mas.ForEachDeadline(func(i uint64, dl lminer.Deadline) error { + ps, err := dl.PartitionsPoSted() + if err != nil { + return err + } + + l, err := dl.DisputableProofCount() + if err != nil { + return err + } + + out[i] = types.Deadline{ + PostSubmissions: ps, + DisputableProofCount: l, + } + return nil + }); err != nil { + return nil, err + } + return out, nil +} + +// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. +func (msa *minerStateAPI) StateMinerSectors(ctx context.Context, maddr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + mas, err := view.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + return mas.LoadSectors(sectorNos) +} + +// StateMarketStorageDeal returns information about the indicated deal +func (msa *minerStateAPI) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.MarketDeal, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + mas, err := view.LoadMarketState(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + proposals, err := mas.Proposals() + if err != nil { + return nil, err + } + + proposal, found, err := proposals.Get(dealID) + + if err != nil { + return nil, err + } else if !found { + return nil, fmt.Errorf("deal %d not found", dealID) + } + + states, err := mas.States() + if err != nil { + return nil, err + } + + st, found, err := states.Get(dealID) + if err != nil { + return nil, err + } + + if !found { + st = market.EmptyDealState() + } + + return &types.MarketDeal{ + Proposal: *proposal, + State: *st, + }, nil +} + +// StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if +// pending allocation is not found. +func (msa *minerStateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.Allocation, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + st, err := view.LoadMarketState(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + allocationID, err := st.GetAllocationIdForPendingDeal(dealID) + if err != nil { + return nil, err + } + + if allocationID == types.NoAllocationID { + return nil, nil + } + + dealState, err := msa.StateMarketStorageDeal(ctx, dealID, tsk) + if err != nil { + return nil, err + } + + return msa.StateGetAllocation(ctx, dealState.Proposal.Client, allocationID, tsk) +} + +// StateGetAllocation returns the allocation for a given address and allocation ID. +func (msa *minerStateAPI) StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationID types.AllocationId, tsk types.TipSetKey) (*types.Allocation, error) { + idAddr, err := msa.ChainSubmodule.API().StateLookupID(ctx, clientAddr, tsk) + if err != nil { + return nil, err + } + + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + st, err := view.LoadVerifregActor(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + allocation, found, err := st.GetAllocation(idAddr, allocationID) + if err != nil { + return nil, fmt.Errorf("getting allocation: %w", err) + } + if !found { + return nil, nil + } + + return allocation, nil +} + +// StateGetAllocations returns the all the allocations for a given client. +func (msa *minerStateAPI) StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[types.AllocationId]types.Allocation, error) { + idAddr, err := msa.ChainSubmodule.API().StateLookupID(ctx, clientAddr, tsk) + if err != nil { + return nil, err + } + + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + st, err := view.LoadVerifregActor(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + allocations, err := st.GetAllocations(idAddr) + if err != nil { + return nil, fmt.Errorf("getting allocations: %w", err) + } + + return allocations, nil +} + +// StateGetClaim returns the claim for a given address and claim ID. +func (msa *minerStateAPI) StateGetClaim(ctx context.Context, providerAddr address.Address, claimID types.ClaimId, tsk types.TipSetKey) (*types.Claim, error) { + idAddr, err := msa.ChainSubmodule.API().StateLookupID(ctx, providerAddr, tsk) + if err != nil { + return nil, err + } + + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + st, err := view.LoadVerifregActor(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + claim, found, err := st.GetClaim(idAddr, claimID) + if err != nil { + return nil, fmt.Errorf("getting claim: %w", err) + } + if !found { + return nil, nil + } + + return claim, nil +} + +// StateGetClaims returns the all the claims for a given provider. +func (msa *minerStateAPI) StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[types.ClaimId]types.Claim, error) { + idAddr, err := msa.ChainSubmodule.API().StateLookupID(ctx, providerAddr, tsk) + if err != nil { + return nil, err + } + + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + st, err := view.LoadVerifregActor(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + claims, err := st.GetClaims(idAddr) + if err != nil { + return nil, fmt.Errorf("getting claims: %w", err) + } + + return claims, nil +} + +// StateComputeDataCID computes DataCID from a set of on-chain deals +func (msa *minerStateAPI) StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) { + nv, err := msa.API().StateNetworkVersion(ctx, tsk) + if err != nil { + return cid.Cid{}, err + } + + var ccparams []byte + if nv < network.Version13 { + ccparams, err = actors.SerializeParams(&market2.ComputeDataCommitmentParams{ + DealIDs: deals, + SectorType: sectorType, + }) + } else { + ccparams, err = actors.SerializeParams(&market5.ComputeDataCommitmentParams{ + Inputs: []*market5.SectorDataSpec{ + { + DealIDs: deals, + SectorType: sectorType, + }, + }, + }) + } + + if err != nil { + return cid.Undef, fmt.Errorf("computing params for ComputeDataCommitment: %w", err) + } + + ccmt := &types.Message{ + To: market.Address, + From: maddr, + Value: types.NewInt(0), + Method: market.Methods.ComputeDataCommitment, + Params: ccparams, + } + r, err := msa.API().StateCall(ctx, ccmt, tsk) + if err != nil { + return cid.Undef, fmt.Errorf("calling ComputeDataCommitment: %w", err) + } + if r.MsgRct.ExitCode != 0 { + return cid.Undef, fmt.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode) + } + + if nv < network.Version13 { + var c cbg.CborCid + if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + return cid.Undef, fmt.Errorf("failed to unmarshal CBOR to CborCid: %w", err) + } + + return cid.Cid(c), nil + } + + var cr market5.ComputeDataCommitmentReturn + if err := cr.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + return cid.Undef, fmt.Errorf("failed to unmarshal CBOR to CborCid: %w", err) + } + + if len(cr.CommDs) != 1 { + return cid.Undef, fmt.Errorf("CommD output must have 1 entry") + } + + return cid.Cid(cr.CommDs[0]), nil +} + +var ( + initialPledgeNum = big.NewInt(110) + initialPledgeDen = big.NewInt(100) +) + +// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector +func (msa *minerStateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) { + ts, err := msa.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return big.Int{}, err + } + + var sTree *tree.State + _, sTree, err = msa.Stmgr.ParentState(ctx, ts) + if err != nil { + return big.Int{}, fmt.Errorf("ParentState failed:%v", err) + } + + ssize, err := pci.SealProof.SectorSize() + if err != nil { + return big.Int{}, fmt.Errorf("failed to get resolve size: %v", err) + } + + store := msa.ChainReader.Store(ctx) + var sectorWeight abi.StoragePower + if msa.Fork.GetNetworkVersion(ctx, ts.Height()) <= network.Version16 { + if act, found, err := sTree.GetActor(ctx, market.Address); err != nil || !found { + return big.Int{}, fmt.Errorf("loading market actor %s: %v", maddr, err) + } else if s, err := market.Load(store, act); err != nil { + return big.Int{}, fmt.Errorf("loading market actor state %s: %v", maddr, err) + } else if w, vw, err := s.VerifyDealsForActivation(maddr, pci.DealIDs, ts.Height(), pci.Expiration); err != nil { + return big.Int{}, fmt.Errorf("verifying deals for activation: %v", err) + } else { + // NB: not exactly accurate, but should always lead us to *over* estimate, not under + duration := pci.Expiration - ts.Height() + sectorWeight = builtin.QAPowerForWeight(ssize, duration, w, vw) + } + } else { + sectorWeight = types.QAPowerMax(ssize) + } + + var powerSmoothed builtin.FilterEstimate + if act, found, err := sTree.GetActor(ctx, power.Address); err != nil || !found { + return big.Int{}, fmt.Errorf("loading power actor: %v", err) + } else if s, err := power.Load(store, act); err != nil { + return big.Int{}, fmt.Errorf("loading power actor state: %v", err) + } else if p, err := s.TotalPowerSmoothed(); err != nil { + return big.Int{}, fmt.Errorf("failed to determine total power: %v", err) + } else { + powerSmoothed = p + } + + rewardActor, found, err := sTree.GetActor(ctx, reward.Address) + if err != nil || !found { + return big.Int{}, fmt.Errorf("loading miner actor: %v", err) + } + + rewardState, err := reward.Load(store, rewardActor) + if err != nil { + return big.Int{}, fmt.Errorf("loading reward actor state: %v", err) + } + + deposit, err := rewardState.PreCommitDepositForPower(powerSmoothed, sectorWeight) + if err != nil { + return big.Zero(), fmt.Errorf("calculating precommit deposit: %v", err) + } + + return big.Div(big.Mul(deposit, initialPledgeNum), initialPledgeDen), nil +} + +// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector +func (msa *minerStateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) { + ts, err := msa.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return big.Int{}, fmt.Errorf("loading tipset %s: %v", tsk, err) + } + + _, state, err := msa.Stmgr.ParentState(ctx, ts) + if err != nil { + return big.Int{}, fmt.Errorf("loading tipset(%s) parent state failed: %v", tsk, err) + } + + ssize, err := pci.SealProof.SectorSize() + if err != nil { + return big.Int{}, fmt.Errorf("failed to get resolve size: %v", err) + } + + store := msa.ChainReader.Store(ctx) + var sectorWeight abi.StoragePower + if act, found, err := state.GetActor(ctx, market.Address); err != nil || !found { + return big.Int{}, fmt.Errorf("loading miner actor %s: %v", maddr, err) + } else if s, err := market.Load(store, act); err != nil { + return big.Int{}, fmt.Errorf("loading market actor state %s: %v", maddr, err) + } else if w, vw, err := s.VerifyDealsForActivation(maddr, pci.DealIDs, ts.Height(), pci.Expiration); err != nil { + return big.Int{}, fmt.Errorf("verifying deals for activation: %v", err) + } else { + // NB: not exactly accurate, but should always lead us to *over* estimate, not under + duration := pci.Expiration - ts.Height() + sectorWeight = builtin.QAPowerForWeight(ssize, duration, w, vw) + } + + var ( + powerSmoothed builtin.FilterEstimate + pledgeCollateral abi.TokenAmount + ) + if act, found, err := state.GetActor(ctx, power.Address); err != nil || !found { + return big.Int{}, fmt.Errorf("loading miner actor: %v", err) + } else if s, err := power.Load(store, act); err != nil { + return big.Int{}, fmt.Errorf("loading power actor state: %v", err) + } else if p, err := s.TotalPowerSmoothed(); err != nil { + return big.Int{}, fmt.Errorf("failed to determine total power: %v", err) + } else if c, err := s.TotalLocked(); err != nil { + return big.Int{}, fmt.Errorf("failed to determine pledge collateral: %v", err) + } else { + powerSmoothed = p + pledgeCollateral = c + } + + rewardActor, found, err := state.GetActor(ctx, reward.Address) + if err != nil || !found { + return big.Int{}, fmt.Errorf("loading miner actor: %v", err) + } + + rewardState, err := reward.Load(store, rewardActor) + if err != nil { + return big.Int{}, fmt.Errorf("loading reward actor state: %v", err) + } + + circSupply, err := msa.StateVMCirculatingSupplyInternal(ctx, ts.Key()) + if err != nil { + return big.Zero(), fmt.Errorf("getting circulating supply: %v", err) + } + + initialPledge, err := rewardState.InitialPledgeForPower( + sectorWeight, + pledgeCollateral, + &powerSmoothed, + circSupply.FilCirculating, + ) + if err != nil { + return big.Zero(), fmt.Errorf("calculating initial pledge: %v", err) + } + + return big.Div(big.Mul(initialPledge, initialPledgeNum), initialPledgeDen), nil +} + +// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset. +// This is the value reported by the runtime interface to actors code. +func (msa *minerStateAPI) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (types.CirculatingSupply, error) { + ts, err := msa.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return types.CirculatingSupply{}, err + } + + _, sTree, err := msa.Stmgr.ParentState(ctx, ts) + if err != nil { + return types.CirculatingSupply{}, err + } + + return msa.ChainReader.GetCirculatingSupplyDetailed(ctx, ts.Height(), sTree) +} + +// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset. +// This is not used anywhere in the protocol itself, and is only for external consumption. +func (msa *minerStateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { + // stmgr.ParentStateTsk make sure the parent state specified by 'tsk' exists + parent, _, err := msa.Stmgr.ParentStateTsk(ctx, tsk) + if err != nil { + return abi.TokenAmount{}, fmt.Errorf("tipset(%s) parent state failed:%v", + tsk.String(), err) + } + + return msa.ChainReader.StateCirculatingSupply(ctx, parent.Key()) +} + +// StateMarketDeals returns information about every deal in the Storage Market +func (msa *minerStateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]*types.MarketDeal, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%w", err) + } + return view.StateMarketDeals(ctx, tsk) +} + +// StateMinerActiveSectors returns info about sectors that a given miner is actively proving. +func (msa *minerStateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) { // TODO: only used in cli + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + return view.StateMinerActiveSectors(ctx, maddr, tsk) +} + +// StateLookupID retrieves the ID address of the given address +func (msa *minerStateAPI) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + _, state, err := msa.Stmgr.ParentStateTsk(ctx, tsk) + if err != nil { + return address.Undef, fmt.Errorf("load state failed: %v", err) + } + + return state.LookupID(addr) +} + +func (msa *minerStateAPI) StateLookupRobustAddress(ctx context.Context, idAddr address.Address, tsk types.TipSetKey) (address.Address, error) { + idAddrDecoded, err := address.IDFromAddress(idAddr) + if err != nil { + return address.Undef, fmt.Errorf("failed to decode provided address as id addr: %w", err) + } + + cst := cbornode.NewCborStore(msa.ChainReader.Blockstore()) + wrapStore := adt.WrapStore(ctx, cst) + + _, state, err := msa.Stmgr.ParentStateTsk(ctx, tsk) + if err != nil { + return address.Undef, fmt.Errorf("load state failed: %w", err) + } + + initActor, found, err := state.GetActor(ctx, _init.Address) + if err != nil { + return address.Undef, fmt.Errorf("load init actor: %w", err) + } + if !found { + return address.Undef, fmt.Errorf("not found actor: %w", err) + } + + initState, err := _init.Load(wrapStore, initActor) + if err != nil { + return address.Undef, fmt.Errorf("load init state: %w", err) + } + robustAddr := address.Undef + + err = initState.ForEachActor(func(id abi.ActorID, addr address.Address) error { + if uint64(id) == idAddrDecoded { + robustAddr = addr + // Hacky way to early return from ForEach + return errors.New("robust address found") + } + return nil + }) + if robustAddr == address.Undef { + if err == nil { + return address.Undef, fmt.Errorf("address %s not found", idAddr.String()) + } + return address.Undef, fmt.Errorf("finding address: %w", err) + } + return robustAddr, nil +} + +// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor +func (msa *minerStateAPI) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + return view.StateListMiners(ctx, tsk) +} + +// StateListActors returns the addresses of every actor in the state +func (msa *minerStateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + _, stat, err := msa.Stmgr.TipsetStateTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("load tipset state from key:%s failed:%v", + tsk.String(), err) + } + var out []address.Address + err = stat.ForEach(func(addr tree.ActorKey, act *types.Actor) error { + out = append(out, addr) + return nil + }) + if err != nil { + return nil, err + } + + return out, nil +} + +// StateMinerPower returns the power of the indicated miner +func (msa *minerStateAPI) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.MinerPower, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + mp, net, hmp, err := view.StateMinerPower(ctx, addr, tsk) + if err != nil { + return nil, err + } + + return &types.MinerPower{ + MinerPower: mp, + TotalPower: net, + HasMinPower: hmp, + }, nil +} + +// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent +func (msa *minerStateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (big.Int, error) { + ts, err := msa.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return big.Int{}, fmt.Errorf("failed to get tipset for %s, %v", tsk.String(), err) + } + _, view, err := msa.Stmgr.ParentStateView(ctx, ts) + if err != nil { + return big.Int{}, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + return view.StateMinerAvailableBalance(ctx, maddr, ts) +} + +// StateSectorExpiration returns epoch at which given sector will expire +func (msa *minerStateAPI) StateSectorExpiration(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorExpiration, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + return view.StateSectorExpiration(ctx, maddr, sectorNumber, tsk) +} + +// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set +func (msa *minerStateAPI) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MinerSectors, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return types.MinerSectors{}, fmt.Errorf("Stmgr.ParentStateViewTsk failed:%v", err) + } + + mas, err := view.LoadMinerState(ctx, addr) + if err != nil { + return types.MinerSectors{}, err + } + + var activeCount, liveCount, faultyCount uint64 + if err := mas.ForEachDeadline(func(_ uint64, dl lminer.Deadline) error { + return dl.ForEachPartition(func(_ uint64, part lminer.Partition) error { + if active, err := part.ActiveSectors(); err != nil { + return err + } else if count, err := active.Count(); err != nil { + return err + } else { + activeCount += count + } + if live, err := part.LiveSectors(); err != nil { + return err + } else if count, err := live.Count(); err != nil { + return err + } else { + liveCount += count + } + if faulty, err := part.FaultySectors(); err != nil { + return err + } else if count, err := faulty.Count(); err != nil { + return err + } else { + faultyCount += count + } + return nil + }) + }); err != nil { + return types.MinerSectors{}, err + } + return types.MinerSectors{Live: liveCount, Active: activeCount, Faulty: faultyCount}, nil +} + +// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market +func (msa *minerStateAPI) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MarketBalance, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return types.MarketBalanceNil, fmt.Errorf("loading view %s: %v", tsk, err) + } + + mstate, err := view.LoadMarketState(ctx) + if err != nil { + return types.MarketBalanceNil, err + } + + addr, err = view.LookupID(ctx, addr) + if err != nil { + return types.MarketBalanceNil, err + } + + var out types.MarketBalance + + et, err := mstate.EscrowTable() + if err != nil { + return types.MarketBalanceNil, err + } + out.Escrow, err = et.Get(addr) + if err != nil { + return types.MarketBalanceNil, fmt.Errorf("getting escrow balance: %v", err) + } + + lt, err := mstate.LockedTable() + if err != nil { + return types.MarketBalanceNil, err + } + out.Locked, err = lt.Get(addr) + if err != nil { + return types.MarketBalanceNil, fmt.Errorf("getting locked balance: %v", err) + } + + return out, nil +} + +var ( + dealProviderCollateralNum = types.NewInt(110) + dealProviderCollateralDen = types.NewInt(100) +) + +// StateDealProviderCollateralBounds returns the min and max collateral a storage provider +// can issue. It takes the deal size and verified status as parameters. +func (msa *minerStateAPI) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (types.DealCollateralBounds, error) { + ts, _, view, err := msa.Stmgr.StateViewTsk(ctx, tsk) + if err != nil { + return types.DealCollateralBounds{}, fmt.Errorf("loading state view %s: %v", tsk, err) + } + + pst, err := view.LoadPowerState(ctx) + if err != nil { + return types.DealCollateralBounds{}, fmt.Errorf("failed to load power actor state: %v", err) + } + + rst, err := view.LoadRewardState(ctx) + if err != nil { + return types.DealCollateralBounds{}, fmt.Errorf("failed to load reward actor state: %v", err) + } + + circ, err := msa.StateVMCirculatingSupplyInternal(ctx, ts.Key()) + if err != nil { + return types.DealCollateralBounds{}, fmt.Errorf("getting total circulating supply: %v", err) + } + + powClaim, err := pst.TotalPower() + if err != nil { + return types.DealCollateralBounds{}, fmt.Errorf("getting total power: %v", err) + } + + rewPow, err := rst.ThisEpochBaselinePower() + if err != nil { + return types.DealCollateralBounds{}, fmt.Errorf("getting reward baseline power: %v", err) + } + + min, max, err := policy.DealProviderCollateralBounds(size, + verified, + powClaim.RawBytePower, + powClaim.QualityAdjPower, + rewPow, + circ.FilCirculating, + msa.Fork.GetNetworkVersion(ctx, ts.Height())) + if err != nil { + return types.DealCollateralBounds{}, fmt.Errorf("getting deal provider coll bounds: %v", err) + } + return types.DealCollateralBounds{ + Min: types.BigDiv(types.BigMul(min, dealProviderCollateralNum), dealProviderCollateralDen), + Max: max, + }, nil +} + +// StateVerifiedClientStatus returns the data cap for the given address. +// Returns zero if there is no entry in the data cap table for the +// address. +func (msa *minerStateAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + _, _, view, err := msa.Stmgr.StateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading state view %s: %v", tsk, err) + } + + aid, err := view.LookupID(ctx, addr) + if err != nil { + return nil, fmt.Errorf("loook up id of %s : %v", addr, err) + } + + nv, err := msa.ChainSubmodule.API().StateNetworkVersion(ctx, tsk) + if err != nil { + return nil, err + } + + av, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return nil, err + } + + var dcap abi.StoragePower + var verified bool + if av <= 8 { + vrs, err := view.LoadVerifregActor(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load verified registry state: %v", err) + } + + verified, dcap, err = vrs.VerifiedClientDataCap(aid) + if err != nil { + return nil, fmt.Errorf("looking up verified client: %w", err) + } + } else { + dcs, err := view.LoadDatacapState(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load datacap actor state: %w", err) + } + + verified, dcap, err = dcs.VerifiedClientDataCap(aid) + if err != nil { + return nil, fmt.Errorf("looking up verified client: %w", err) + } + } + + if !verified { + return nil, nil + } + + return &dcap, nil +} + +func (msa *minerStateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.Cid) (map[string]types.Actor, error) { + store := msa.ChainReader.Store(ctx) + + oldTree, err := tree.LoadState(ctx, store, old) + if err != nil { + return nil, fmt.Errorf("failed to load old state tree: %w", err) + } + + newTree, err := tree.LoadState(ctx, store, new) + if err != nil { + return nil, fmt.Errorf("failed to load new state tree: %w", err) + } + + return tree.Diff(oldTree, newTree) +} + +func (msa *minerStateAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.ActorState, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset:%s parent state view: %v", tsk, err) + } + + act, err := view.LoadActor(ctx, actor) + if err != nil { + return nil, err + } + + blk, err := msa.ChainReader.Blockstore().Get(ctx, act.Head) + if err != nil { + return nil, fmt.Errorf("getting actor head: %w", err) + } + + oif, err := register.DumpActorState(register.GetDefaultActros(), act, blk.RawData()) + if err != nil { + return nil, fmt.Errorf("dumping actor state (a:%s): %w", actor, err) + } + + return &types.ActorState{ + Balance: act.Balance, + Code: act.Code, + State: oif, + }, nil +} + +func (msa *minerStateAPI) StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset:%s parent state view: %v", tsk, err) + } + + act, err := view.LoadActor(ctx, toAddr) + if err != nil { + return nil, err + } + + methodMeta, found := utils.MethodsMap[act.Code][method] + if !found { + return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code) + } + + paramType := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORUnmarshaler) + + if err = paramType.UnmarshalCBOR(bytes.NewReader(params)); err != nil { + return nil, err + } + + return paramType, nil +} + +func (msa *minerStateAPI) StateEncodeParams(ctx context.Context, toActCode cid.Cid, method abi.MethodNum, params json.RawMessage) ([]byte, error) { + methodMeta, found := utils.MethodsMap[toActCode][method] + if !found { + return nil, fmt.Errorf("method %d not found on actor %s", method, toActCode) + } + + paramType := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORUnmarshaler) + + if err := json.Unmarshal(params, ¶mType); err != nil { + return nil, fmt.Errorf("json unmarshal: %w", err) + } + + var cbb bytes.Buffer + if err := paramType.(cbor.Marshaler).MarshalCBOR(&cbb); err != nil { + return nil, fmt.Errorf("cbor marshal: %w", err) + } + + return cbb.Bytes(), nil +} + +func (msa *minerStateAPI) StateListMessages(ctx context.Context, match *types.MessageMatch, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) { + ts, err := msa.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + + if ts == nil { + ts = msa.ChainReader.GetHead() + } + + if match.To == address.Undef && match.From == address.Undef { + return nil, fmt.Errorf("must specify at least To or From in message filter") + } else if match.To != address.Undef { + _, err := msa.StateLookupID(ctx, match.To, tsk) + + // if the recipient doesn't exist at the start point, we're not gonna find any matches + if errors.Is(err, types.ErrActorNotFound) { + return nil, nil + } + + if err != nil { + return nil, fmt.Errorf("looking up match.To: %w", err) + } + } else if match.From != address.Undef { + _, err := msa.StateLookupID(ctx, match.From, tsk) + + // if the sender doesn't exist at the start point, we're not gonna find any matches + if errors.Is(err, types.ErrActorNotFound) { + return nil, nil + } + + if err != nil { + return nil, fmt.Errorf("looking up match.From: %w", err) + } + } + + // TODO: This should probably match on both ID and robust address, no? + matchFunc := func(msg *types.Message) bool { + if match.From != address.Undef && match.From != msg.From { + return false + } + + if match.To != address.Undef && match.To != msg.To { + return false + } + + return true + } + + var out []cid.Cid + for ts.Height() >= toheight { + msgs, err := msa.MessageStore.MessagesForTipset(ts) + if err != nil { + return nil, fmt.Errorf("failed to get messages for tipset (%s): %w", ts.Key(), err) + } + + for _, msg := range msgs { + if matchFunc(msg.VMMessage()) { + out = append(out, msg.Cid()) + } + } + + if ts.Height() == 0 { + break + } + + next, err := msa.ChainReader.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, fmt.Errorf("loading next tipset: %w", err) + } + + ts = next + } + + return out, nil +} + +// StateMinerAllocated returns a bitfield containing all sector numbers marked as allocated in miner state +func (msa *minerStateAPI) StateMinerAllocated(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*bitfield.BitField, error) { + _, view, err := msa.Stmgr.ParentStateViewTsk(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset:%s parent state view: %v", tsk, err) + } + + act, err := view.LoadActor(ctx, addr) + if err != nil { + return nil, err + } + mas, err := lminer.Load(msa.ChainReader.Store(ctx), act) + if err != nil { + return nil, err + } + return mas.GetAllocatedSectors() +} diff --git a/app/submodule/chain/v0api/chaininfo_v0api.go b/app/submodule/chain/v0api/chaininfo_v0api.go new file mode 100644 index 0000000000..80c8d47bae --- /dev/null +++ b/app/submodule/chain/v0api/chaininfo_v0api.go @@ -0,0 +1,76 @@ +package v0api + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v0api.IChain = &WrapperV1IChain{} + +type WrapperV1IChain struct { //nolint + v1api.IChain +} + +func (a *WrapperV1IChain) StateSearchMsg(ctx context.Context, msg cid.Cid) (*types.MsgLookup, error) { + return a.IChain.StateSearchMsg(ctx, types.EmptyTSK, msg, constants.LookbackNoLimit, true) +} + +func (a *WrapperV1IChain) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*types.MsgLookup, error) { + return a.IChain.StateSearchMsg(ctx, types.EmptyTSK, msg, limit, true) +} + +func (a *WrapperV1IChain) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*types.MsgLookup, error) { + return a.IChain.StateWaitMsg(ctx, msg, confidence, constants.LookbackNoLimit, true) +} + +func (a *WrapperV1IChain) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch) (*types.MsgLookup, error) { + return a.IChain.StateWaitMsg(ctx, msg, confidence, limit, true) +} + +func (a *WrapperV1IChain) StateGetReceipt(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) { + ml, err := a.IChain.StateSearchMsg(ctx, from, msg, constants.LookbackNoLimit, true) + if err != nil { + return nil, err + } + + if ml == nil { + return nil, nil + } + + return &ml.Receipt, nil +} + +func (a *WrapperV1IChain) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) { + return a.StateGetBeaconEntry(ctx, epoch) +} + +func (a *WrapperV1IChain) ChainGetRandomnessFromBeacon(ctx context.Context, key types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return a.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, key) +} + +func (a *WrapperV1IChain) ChainGetRandomnessFromTickets(ctx context.Context, key types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return a.StateGetRandomnessFromTickets(ctx, personalization, randEpoch, entropy, key) +} + +func (a *WrapperV1IChain) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (types.SectorPreCommitOnChainInfo, error) { + pi, err := a.IChain.StateSectorPreCommitInfo(ctx, maddr, n, tsk) + if err != nil { + return types.SectorPreCommitOnChainInfo{}, err + } + if pi == nil { + return types.SectorPreCommitOnChainInfo{}, fmt.Errorf("precommit info does not exist") + } + + return *pi, nil +} diff --git a/app/submodule/common/common.go b/app/submodule/common/common.go new file mode 100644 index 0000000000..7d0f1544cd --- /dev/null +++ b/app/submodule/common/common.go @@ -0,0 +1,128 @@ +package common + +import ( + "context" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + + chain2 "github.com/filecoin-project/venus/app/submodule/chain" + apiwrapper "github.com/filecoin-project/venus/app/submodule/common/v0api" + "github.com/filecoin-project/venus/app/submodule/network" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/net" + "github.com/filecoin-project/venus/venus-shared/api/chain" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v1api.ICommon = (*CommonModule)(nil) + +type CommonModule struct { // nolint + chainModule *chain2.ChainSubmodule + netModule *network.NetworkSubmodule + blockDelaySecs uint64 + start time.Time +} + +func NewCommonModule(chainModule *chain2.ChainSubmodule, netModule *network.NetworkSubmodule, blockDelaySecs uint64) *CommonModule { + return &CommonModule{ + chainModule: chainModule, + netModule: netModule, + blockDelaySecs: blockDelaySecs, + start: time.Now(), + } +} + +func (cm *CommonModule) Version(ctx context.Context) (types.Version, error) { + return types.Version{ + Version: constants.UserVersion(), + APIVersion: chain.FullAPIVersion1, + }, nil +} + +func (cm *CommonModule) NodeStatus(ctx context.Context, inclChainStatus bool) (status types.NodeStatus, err error) { + curTS, err := cm.chainModule.API().ChainHead(ctx) + if err != nil { + return status, err + } + + status.SyncStatus.Epoch = uint64(curTS.Height()) + timestamp := time.Unix(int64(curTS.MinTimestamp()), 0) + delta := time.Since(timestamp).Seconds() + status.SyncStatus.Behind = uint64(delta / float64(cm.blockDelaySecs)) + + // get peers in the messages and blocks topics + peersMsgs := make(map[peer.ID]struct{}) + peersBlocks := make(map[peer.ID]struct{}) + + for _, p := range cm.netModule.Pubsub.ListPeers(types.MessageTopic(cm.netModule.NetworkName)) { + peersMsgs[p] = struct{}{} + } + + for _, p := range cm.netModule.Pubsub.ListPeers(types.BlockTopic(cm.netModule.NetworkName)) { + peersBlocks[p] = struct{}{} + } + + // get scores for all connected and recent peers + scores, err := cm.netModule.API().NetPubsubScores(ctx) + if err != nil { + return status, err + } + + for _, score := range scores { + if score.Score.Score > net.PublishScoreThreshold { + _, inMsgs := peersMsgs[score.ID] + if inMsgs { + status.PeerStatus.PeersToPublishMsgs++ + } + + _, inBlocks := peersBlocks[score.ID] + if inBlocks { + status.PeerStatus.PeersToPublishBlocks++ + } + } + } + + if inclChainStatus && status.SyncStatus.Epoch > uint64(constants.Finality) { + blockCnt := 0 + ts := curTS + + for i := 0; i < 100; i++ { + blockCnt += len(ts.Blocks()) + tsk := ts.Parents() + ts, err = cm.chainModule.API().ChainGetTipSet(ctx, tsk) + if err != nil { + return status, err + } + } + + status.ChainStatus.BlocksPerTipsetLast100 = float64(blockCnt) / 100 + + for i := 100; i < int(constants.Finality); i++ { + blockCnt += len(ts.Blocks()) + tsk := ts.Parents() + ts, err = cm.chainModule.API().ChainGetTipSet(ctx, tsk) + if err != nil { + return status, err + } + } + + status.ChainStatus.BlocksPerTipsetLastFinality = float64(blockCnt) / float64(constants.Finality) + } + + return status, nil +} + +func (cm *CommonModule) StartTime(ctx context.Context) (time.Time, error) { + return cm.start, nil +} + +func (cm *CommonModule) API() v1api.ICommon { + return cm +} + +func (cm *CommonModule) V0API() v0api.ICommon { + return &apiwrapper.WrapperV1ICommon{ICommon: cm} +} diff --git a/app/submodule/common/v0api/common_v0api.go b/app/submodule/common/v0api/common_v0api.go new file mode 100644 index 0000000000..25309a79cb --- /dev/null +++ b/app/submodule/common/v0api/common_v0api.go @@ -0,0 +1,27 @@ +package v0api + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/api/chain" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v0api.ICommon = (*WrapperV1ICommon)(nil) + +type WrapperV1ICommon struct { //nolint + v1api.ICommon +} + +func (a *WrapperV1ICommon) Version(ctx context.Context) (types.Version, error) { + ver, err := a.ICommon.Version(ctx) + if err != nil { + return types.Version{}, err + } + + ver.APIVersion = chain.FullAPIVersion0 + + return ver, nil +} diff --git a/app/submodule/config/config.go b/app/submodule/config/config.go new file mode 100644 index 0000000000..7841d0dd63 --- /dev/null +++ b/app/submodule/config/config.go @@ -0,0 +1,51 @@ +package config + +import ( + "context" + "sync" + + repo2 "github.com/filecoin-project/venus/pkg/repo" +) + +type IConfig interface { + ConfigSet(ctx context.Context, dottedPath string, paramJSON string) error + ConfigGet(ctx context.Context, dottedPath string) (interface{}, error) +} + +// configModule is plumbing implementation for setting and retrieving values from local config. +type ConfigModule struct { //nolint + repo repo2.Repo + lock sync.Mutex +} + +// NewConfig returns a new configModule. +func NewConfigModule(repo repo2.Repo) *ConfigModule { + return &ConfigModule{repo: repo} +} + +// Set sets a value in config +func (s *ConfigModule) Set(dottedKey string, jsonString string) error { + s.lock.Lock() + defer s.lock.Unlock() + + cfg := s.repo.Config() + if err := cfg.Set(dottedKey, jsonString); err != nil { + return err + } + + return s.repo.ReplaceConfig(cfg) +} + +// Get gets a value from config +func (s *ConfigModule) Get(dottedKey string) (interface{}, error) { + return s.repo.Config().Get(dottedKey) +} + +// API create a new config api implement +func (s *ConfigModule) API() IConfig { + return &configAPI{config: s} +} + +func (s *ConfigModule) V0API() IConfig { + return &configAPI{config: s} +} diff --git a/app/submodule/config/config_api.go b/app/submodule/config/config_api.go new file mode 100644 index 0000000000..969de8b2df --- /dev/null +++ b/app/submodule/config/config_api.go @@ -0,0 +1,27 @@ +package config + +import ( + "context" +) + +var _ IConfig = &configAPI{} + +type configAPI struct { //nolint + config *ConfigModule +} + +// ConfigSet sets the given parameters at the given path in the local config. +// The given path may be either a single field name, or a dotted path to a field. +// The JSON value may be either a single value or a whole data structure to be replace. +// For example: +// ConfigSet("datastore.path", "dev/null") and ConfigSet("datastore", "{\"path\":\"dev/null\"}") +// are the same operation. +func (ca *configAPI) ConfigSet(ctx context.Context, dottedPath string, paramJSON string) error { + return ca.config.Set(dottedPath, paramJSON) +} + +// ConfigGet gets config parameters from the given path. +// The path may be either a single field name, or a dotted path to a field. +func (ca *configAPI) ConfigGet(ctx context.Context, dottedPath string) (interface{}, error) { + return ca.config.Get(dottedPath) +} diff --git a/app/submodule/config/config_test.go b/app/submodule/config/config_test.go new file mode 100644 index 0000000000..ddbb9a8c58 --- /dev/null +++ b/app/submodule/config/config_test.go @@ -0,0 +1,117 @@ +package config + +import ( + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/go-address" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/config" + repo2 "github.com/filecoin-project/venus/pkg/repo" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestConfigGet(t *testing.T) { + tf.UnitTest(t) + + t.Run("emits the referenced config value", func(t *testing.T) { + repo := repo2.NewInMemoryRepo() + cfgAPI := NewConfigModule(repo) + + out, err := cfgAPI.Get("bootstrap") + + require.NoError(t, err) + expected := config.NewDefaultConfig().Bootstrap + assert.Equal(t, expected, out) + }) + + t.Run("failure cases fail", func(t *testing.T) { + repo := repo2.NewInMemoryRepo() + cfgAPI := NewConfigModule(repo) + + _, err := cfgAPI.Get("nonexistantkey") + assert.EqualError(t, err, "key: nonexistantkey invalid for config") + + _, err = cfgAPI.Get("bootstrap.nope") + assert.EqualError(t, err, "key: bootstrap.nope invalid for config") + + _, err = cfgAPI.Get(".inval.id-key") + assert.EqualError(t, err, "key: .inval.id-key invalid for config") + }) +} + +func TestConfigSet(t *testing.T) { + tf.UnitTest(t) + + t.Run("sets the config value", func(t *testing.T) { + defaultCfg := config.NewDefaultConfig() + + repo := repo2.NewInMemoryRepo() + cfgAPI := NewConfigModule(repo) + + jsonBlob := `{"addresses": ["bootup1", "bootup2"]}` + + err := cfgAPI.Set("bootstrap", jsonBlob) + require.NoError(t, err) + out, err := cfgAPI.Get("bootstrap") + require.NoError(t, err) + + // validate output + expected := config.NewDefaultConfig().Bootstrap + expected.Addresses = []string{"bootup1", "bootup2"} + assert.Equal(t, expected, out) + + // validate config write + cfg := repo.Config() + assert.Equal(t, expected, cfg.Bootstrap) + assert.Equal(t, defaultCfg.Datastore, cfg.Datastore) + + err = cfgAPI.Set("api.apiAddress", ":1234") + require.NoError(t, err) + assert.Equal(t, ":1234", cfg.API.APIAddress) + + testAddr := testhelpers.RequireIDAddress(t, 100).String() + err = cfgAPI.Set("walletModule.defaultAddress", testAddr) + require.NoError(t, err) + assert.Equal(t, testAddr, cfg.Wallet.DefaultAddress.String()) + + testSwarmAddr := "/ip4/0.0.0.0/tcp/0" + err = cfgAPI.Set("swarm.address", testSwarmAddr) + require.NoError(t, err) + assert.Equal(t, testSwarmAddr, cfg.Swarm.Address) + + err = cfgAPI.Set("datastore.path", "/dev/null") + require.NoError(t, err) + assert.Equal(t, "/dev/null", cfg.Datastore.Path) + }) + + t.Run("failure cases fail", func(t *testing.T) { + repo := repo2.NewInMemoryRepo() + cfgAPI := NewConfigModule(repo) + + // bad key + jsonBlob := `{"addresses": ["bootup1", "bootup2"]}` + + err := cfgAPI.Set("botstrap", jsonBlob) + assert.EqualError(t, err, "json: unknown field \"botstrap\"") + + // bad value type (bootstrap is a struct not a list) + jsonBlobBadType := `["bootup1", "bootup2"]` + err = cfgAPI.Set("bootstrap", jsonBlobBadType) + assert.Error(t, err) + + // bad JSON + jsonBlobInvalid := `{"addresses": [bootup1, "bootup2"]}` + + err = cfgAPI.Set("bootstrap", jsonBlobInvalid) + assert.EqualError(t, err, "json: cannot unmarshal string into Go struct field Config.bootstrap of type config.BootstrapConfig") + + // bad address + jsonBlobBadAddr := "f4cqnyc0muxjajygqavu645m8ja04vckk2kcorrupt" + err = cfgAPI.Set("walletModule.defaultAddress", jsonBlobBadAddr) + assert.EqualError(t, err, address.ErrUnknownProtocol.Error()) + }) +} diff --git a/app/submodule/dagservice/blockservice_api.go b/app/submodule/dagservice/blockservice_api.go new file mode 100644 index 0000000000..46813707ef --- /dev/null +++ b/app/submodule/dagservice/blockservice_api.go @@ -0,0 +1,38 @@ +package dagservice + +import ( + "context" + "io" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +var _ IDagService = &dagServiceAPI{} + +type dagServiceAPI struct { //nolint + dagService *DagServiceSubmodule +} + +// DAGGetNode returns the associated DAG node for the passed in CID. +func (dagServiceAPI *dagServiceAPI) DAGGetNode(ctx context.Context, ref string) (interface{}, error) { + return dagServiceAPI.dagService.Dag.GetNode(ctx, ref) +} + +// DAGGetFileSize returns the file size for a given Cid +func (dagServiceAPI *dagServiceAPI) DAGGetFileSize(ctx context.Context, c cid.Cid) (uint64, error) { + return dagServiceAPI.dagService.Dag.GetFileSize(ctx, c) +} + +// DAGCat returns an iostream with a piece of data stored on the merkeldag with +// the given cid. +func (dagServiceAPI *dagServiceAPI) DAGCat(ctx context.Context, c cid.Cid) (io.Reader, error) { + return dagServiceAPI.dagService.Dag.Cat(ctx, c) +} + +// DAGImportData adds data from an io reader to the merkledag and returns the +// Cid of the given data. Once the data is in the DAG, it can fetched from the +// node via Bitswap and a copy will be kept in the blockstore. +func (dagServiceAPI *dagServiceAPI) DAGImportData(ctx context.Context, data io.Reader) (ipld.Node, error) { + return dagServiceAPI.dagService.Dag.ImportData(ctx, data) +} diff --git a/app/submodule/dagservice/blockservice_submoodule.go b/app/submodule/dagservice/blockservice_submoodule.go new file mode 100644 index 0000000000..41454a0a3d --- /dev/null +++ b/app/submodule/dagservice/blockservice_submoodule.go @@ -0,0 +1,53 @@ +package dagservice + +import ( + "context" + "io" + + "github.com/filecoin-project/venus/app/submodule/network" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/util/dag" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + + bserv "github.com/ipfs/go-blockservice" +) + +type IDagService interface { + DAGGetNode(ctx context.Context, ref string) (interface{}, error) + DAGGetFileSize(ctx context.Context, c cid.Cid) (uint64, error) + DAGCat(ctx context.Context, c cid.Cid) (io.Reader, error) + DAGImportData(ctx context.Context, data io.Reader) (ipld.Node, error) +} + +// DagServiceSubmodule enhances the `Node` with networked key/value fetching capabilities. +// - `BlockService` is shared by chain/graphsync and piece/bitswap data +type DagServiceSubmodule struct { //nolint + // dagservice is a higher level interface for fetching data + Blockservice bserv.BlockService + + Dag *dag.DAG +} + +type dagConfig interface { + Repo() repo.Repo +} + +// NewDagserviceSubmodule creates a new block service submodule. +func NewDagserviceSubmodule(ctx context.Context, dagCfg dagConfig, network *network.NetworkSubmodule) (*DagServiceSubmodule, error) { + bservice := bserv.New(dagCfg.Repo().Datastore(), network.Bitswap) + dag := dag.NewDAG(merkledag.NewDAGService(bservice)) + return &DagServiceSubmodule{ + Blockservice: bservice, + Dag: dag, + }, nil +} + +func (blockService *DagServiceSubmodule) API() IDagService { + return &dagServiceAPI{dagService: blockService} +} + +func (blockService *DagServiceSubmodule) V0API() IDagService { + return &dagServiceAPI{dagService: blockService} +} diff --git a/app/submodule/market/market_api.go b/app/submodule/market/market_api.go new file mode 100644 index 0000000000..3cd19718ef --- /dev/null +++ b/app/submodule/market/market_api.go @@ -0,0 +1,60 @@ +package market + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/pkg/statemanger" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type marketAPI struct { + chain v1api.IChain + stmgr statemanger.IStateManager +} + +func newMarketAPI(c v1api.IChain, stmgr statemanger.IStateManager) v1api.IMarket { + return &marketAPI{c, stmgr} +} + +// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market +func (m *marketAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]types.MarketBalance, error) { + out := map[string]types.MarketBalance{} + ts, err := m.chain.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + + state, err := m.stmgr.GetMarketState(ctx, ts) + if err != nil { + return nil, err + } + escrow, err := state.EscrowTable() + if err != nil { + return nil, err + } + locked, err := state.LockedTable() + if err != nil { + return nil, err + } + + err = escrow.ForEach(func(a address.Address, es abi.TokenAmount) error { + lk, err := locked.Get(a) + if err != nil { + return err + } + + out[a.String()] = types.MarketBalance{ + Escrow: es, + Locked: lk, + } + return nil + }) + if err != nil { + return nil, err + } + return out, nil +} diff --git a/app/submodule/market/market_submodule.go b/app/submodule/market/market_submodule.go new file mode 100644 index 0000000000..4f9751b20a --- /dev/null +++ b/app/submodule/market/market_submodule.go @@ -0,0 +1,26 @@ +package market + +import ( + "github.com/filecoin-project/venus/pkg/statemanger" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +// MarketSubmodule enhances the `Node` with market capabilities. +type MarketSubmodule struct { //nolint + c v1api.IChain + sm statemanger.IStateManager +} + +// NewMarketModule create new market module +func NewMarketModule(c v1api.IChain, sm statemanger.IStateManager) *MarketSubmodule { //nolint + return &MarketSubmodule{c, sm} +} + +func (ms *MarketSubmodule) API() v1api.IMarket { + return newMarketAPI(ms.c, ms.sm) +} + +func (ms *MarketSubmodule) V0API() v0api.IMarket { + return newMarketAPI(ms.c, ms.sm) +} diff --git a/app/submodule/mining/mining_api.go b/app/submodule/mining/mining_api.go new file mode 100644 index 0000000000..7bf547b8dd --- /dev/null +++ b/app/submodule/mining/mining_api.go @@ -0,0 +1,315 @@ +package mining + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + acrypto "github.com/filecoin-project/go-state-types/crypto" + + "github.com/ipfs/go-cid" + + ffi "github.com/filecoin-project/filecoin-ffi" + + "github.com/filecoin-project/venus/pkg/beacon" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v1api.IMining = &MiningAPI{} + +type MiningAPI struct { //nolint + Ming *MiningModule +} + +// MinerGetBaseInfo get current miner information +func (miningAPI *MiningAPI) MinerGetBaseInfo(ctx context.Context, maddr address.Address, round abi.ChainEpoch, tsk types.TipSetKey) (*types.MiningBaseInfo, error) { + chainStore := miningAPI.Ming.ChainModule.ChainReader + ts, err := chainStore.GetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("failed to load tipset for mining base: %v", err) + } + pt, _, err := miningAPI.Ming.Stmgr.RunStateTransition(ctx, ts) + if err != nil { + return nil, fmt.Errorf("failed to get tipset root for mining base: %v", err) + } + prev, err := chainStore.GetLatestBeaconEntry(ctx, ts) + if err != nil { + if os.Getenv("VENUS_IGNORE_DRAND") != "_yes_" { + return nil, fmt.Errorf("failed to get latest beacon entry: %v", err) + } + + prev = &types.BeaconEntry{} + } + + nv := miningAPI.Ming.ChainModule.Fork.GetNetworkVersion(ctx, ts.Height()) + + entries, err := beacon.BeaconEntriesForBlock(ctx, miningAPI.Ming.ChainModule.Drand, nv, round, ts.Height(), *prev) + if err != nil { + return nil, err + } + + rbase := *prev + if len(entries) > 0 { + rbase = entries[len(entries)-1] + } + version := miningAPI.Ming.ChainModule.Fork.GetNetworkVersion(ctx, round) + lbts, lbst, err := miningAPI.Ming.ChainModule.ChainReader.GetLookbackTipSetForRound(ctx, ts, round, version) + if err != nil { + return nil, fmt.Errorf("getting lookback miner actor state: %v", err) + } + + view := state.NewView(chainStore.Store(ctx), lbst) + act, err := view.LoadActor(ctx, maddr) + if errors.Is(err, types.ErrActorNotFound) { + // todo why + view = state.NewView(chainStore.Store(ctx), ts.At(0).ParentStateRoot) + _, err := view.LoadActor(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("loading miner in current state: %v", err) + } + + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to load miner actor: %v", err) + } + mas, err := miner.Load(chainStore.Store(ctx), act) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + + buf := new(bytes.Buffer) + if err := maddr.MarshalCBOR(buf); err != nil { + return nil, fmt.Errorf("failed to marshal miner address: %v", err) + } + + prand, err := chain.DrawRandomness(rbase.Data, acrypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to get randomness for winning post: %v", err) + } + + pv := miningAPI.Ming.proofVerifier + xsectors, err := view.GetSectorsForWinningPoSt(ctx, nv, pv, maddr, prand) + if err != nil { + return nil, fmt.Errorf("getting winning post proving set: %v", err) + } + + if len(xsectors) == 0 { + return nil, nil + } + + mpow, tpow, _, err := view.StateMinerPower(ctx, maddr, ts.Key()) + if err != nil { + return nil, fmt.Errorf("failed to get power: %v", err) + } + + info, err := mas.Info() + if err != nil { + return nil, err + } + + st, err := miningAPI.Ming.ChainModule.ChainReader.StateView(ctx, ts) + if err != nil { + return nil, fmt.Errorf("failed to load latest state: %v", err) + } + worker, err := st.ResolveToKeyAddr(ctx, info.Worker) + if err != nil { + return nil, fmt.Errorf("resolving worker address: %v", err) + } + + // TODO: Not ideal performance...This method reloads miner and power state (already looked up here and in GetPowerRaw) + eligible, err := miningAPI.Ming.SyncModule.BlockValidator.MinerEligibleToMine(ctx, maddr, pt, ts.Height(), lbts) + if err != nil { + return nil, fmt.Errorf("determining miner eligibility: %v", err) + } + + return &types.MiningBaseInfo{ + MinerPower: mpow.QualityAdjPower, + NetworkPower: tpow.QualityAdjPower, + Sectors: xsectors, + WorkerKey: worker, + SectorSize: info.SectorSize, + PrevBeaconEntry: *prev, + BeaconEntries: entries, + EligibleForMining: eligible, + }, nil +} + +// MinerCreateBlock create block base on template +func (miningAPI *MiningAPI) MinerCreateBlock(ctx context.Context, bt *types.BlockTemplate) (*types.BlockMsg, error) { + fblk, err := miningAPI.minerCreateBlock(ctx, bt) + if err != nil { + return nil, err + } + + var out types.BlockMsg + out.Header = fblk.Header + for _, msg := range fblk.BLSMessages { + out.BlsMessages = append(out.BlsMessages, msg.Cid()) + } + for _, msg := range fblk.SECPMessages { + out.SecpkMessages = append(out.SecpkMessages, msg.Cid()) + } + + return &out, nil +} + +func (miningAPI *MiningAPI) minerCreateBlock(ctx context.Context, bt *types.BlockTemplate) (*types.FullBlock, error) { + chainStore := miningAPI.Ming.ChainModule.ChainReader + messageStore := miningAPI.Ming.ChainModule.MessageStore + cfg := miningAPI.Ming.Config.Repo().Config() + pts, err := chainStore.GetTipSet(ctx, bt.Parents) + if err != nil { + return nil, fmt.Errorf("failed to load parent tipset: %v", err) + } + + st, receiptCid, err := miningAPI.Ming.Stmgr.RunStateTransition(ctx, pts) + if err != nil { + return nil, fmt.Errorf("failed to load tipset state: %v", err) + } + + version := miningAPI.Ming.ChainModule.Fork.GetNetworkVersion(ctx, bt.Epoch) + _, lbst, err := miningAPI.Ming.ChainModule.ChainReader.GetLookbackTipSetForRound(ctx, pts, bt.Epoch, version) + if err != nil { + return nil, fmt.Errorf("getting lookback miner actor state: %v", err) + } + + viewer := state.NewView(cbor.NewCborStore(miningAPI.Ming.BlockStore.Blockstore), lbst) + worker, err := viewer.GetMinerWorkerRaw(ctx, bt.Miner) + if err != nil { + return nil, fmt.Errorf("failed to get miner worker: %v", err) + } + + next := &types.BlockHeader{ + Miner: bt.Miner, + Parents: bt.Parents.Cids(), + Ticket: bt.Ticket, + ElectionProof: bt.Eproof, + + BeaconEntries: bt.BeaconValues, + Height: bt.Epoch, + Timestamp: bt.Timestamp, + WinPoStProof: bt.WinningPoStProof, + ParentStateRoot: st, + ParentMessageReceipts: receiptCid, + } + + var blsMessages []*types.Message + var secpkMessages []*types.SignedMessage + + var blsMsgCids, secpkMsgCids []cid.Cid + var blsSigs []crypto.Signature + for _, msg := range bt.Messages { + if msg.Signature.Type == crypto.SigTypeBLS { + blsSigs = append(blsSigs, msg.Signature) + blsMessages = append(blsMessages, &msg.Message) + c, err := messageStore.StoreMessage(&msg.Message) + if err != nil { + return nil, err + } + + blsMsgCids = append(blsMsgCids, c) + } else { + c, err := messageStore.StoreMessage(msg) + if err != nil { + return nil, err + } + + secpkMsgCids = append(secpkMsgCids, c) + secpkMessages = append(secpkMessages, msg) + + } + } + store := miningAPI.Ming.BlockStore.Blockstore + + mmcid, err := chain.ComputeMsgMeta(store, blsMsgCids, secpkMsgCids) + if err != nil { + return nil, err + } + next.Messages = mmcid + + aggSig, err := aggregateSignatures(blsSigs) + if err != nil { + return nil, err + } + + next.BLSAggregate = aggSig + + pweight, err := miningAPI.Ming.SyncModule.ChainSelector.Weight(ctx, pts) + if err != nil { + return nil, err + } + next.ParentWeight = pweight + + baseFee, err := messageStore.ComputeBaseFee(ctx, pts, cfg.NetworkParams.ForkUpgradeParam) + if err != nil { + return nil, fmt.Errorf("computing base fee: %v", err) + } + next.ParentBaseFee = baseFee + + bHas, err := miningAPI.Ming.Wallet.API().WalletHas(ctx, worker) + if err != nil { + return nil, fmt.Errorf("find wallet: %v", err) + } + + if bHas { + nosigbytes, err := next.SignatureData() + if err != nil { + return nil, err + } + sig, err := miningAPI.Ming.Wallet.API().WalletSign(ctx, worker, nosigbytes, types.MsgMeta{ + Type: types.MTBlock, + }) + if err != nil { + return nil, fmt.Errorf("failed to sign new block: %v", err) + } + + next.BlockSig = sig + } + + fullBlock := &types.FullBlock{ + Header: next, + BLSMessages: blsMessages, + SECPMessages: secpkMessages, + } + + return fullBlock, nil +} + +func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) { + sigsS := make([]ffi.Signature, len(sigs)) + for i := 0; i < len(sigs); i++ { + copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes]) + } + + aggSig := ffi.Aggregate(sigsS) + if aggSig == nil { + if len(sigs) > 0 { + return nil, fmt.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs)) + } + + zeroSig := ffi.CreateZeroSignature() + + // Note: for blst this condition should not happen - nil should not + // be returned + return &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: zeroSig[:], + }, nil + } + return &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: aggSig[:], + }, nil +} diff --git a/app/submodule/mining/mining_submodule.go b/app/submodule/mining/mining_submodule.go new file mode 100644 index 0000000000..6a9b3e035f --- /dev/null +++ b/app/submodule/mining/mining_submodule.go @@ -0,0 +1,62 @@ +package mining + +import ( + "github.com/filecoin-project/venus/app/submodule/blockstore" + chain2 "github.com/filecoin-project/venus/app/submodule/chain" + "github.com/filecoin-project/venus/app/submodule/network" + "github.com/filecoin-project/venus/app/submodule/syncer" + "github.com/filecoin-project/venus/app/submodule/wallet" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +type miningConfig interface { + Repo() repo.Repo + Verifier() ffiwrapper.Verifier +} + +// MiningModule enhances the `Node` with miner capabilities. +type MiningModule struct { //nolint + Config miningConfig + ChainModule *chain2.ChainSubmodule + BlockStore *blockstore.BlockstoreSubmodule + NetworkModule *network.NetworkSubmodule + SyncModule *syncer.SyncerSubmodule + Wallet wallet.WalletSubmodule + proofVerifier ffiwrapper.Verifier + Stmgr *statemanger.Stmgr +} + +// API create new miningAPi implement +func (miningModule *MiningModule) API() v1api.IMining { + return &MiningAPI{Ming: miningModule} +} + +func (miningModule *MiningModule) V0API() v0api.IMining { + return &MiningAPI{Ming: miningModule} +} + +// NewMiningModule create new mining module +func NewMiningModule( + stmgr *statemanger.Stmgr, + conf miningConfig, + chainModule *chain2.ChainSubmodule, + blockStore *blockstore.BlockstoreSubmodule, + networkModule *network.NetworkSubmodule, + syncModule *syncer.SyncerSubmodule, + wallet wallet.WalletSubmodule, +) *MiningModule { + return &MiningModule{ + Stmgr: stmgr, + Config: conf, + ChainModule: chainModule, + BlockStore: blockStore, + NetworkModule: networkModule, + SyncModule: syncModule, + Wallet: wallet, + proofVerifier: conf.Verifier(), + } +} diff --git a/app/submodule/mpool/mpool_api.go b/app/submodule/mpool/mpool_api.go new file mode 100644 index 0000000000..8c6b8f8ad2 --- /dev/null +++ b/app/submodule/mpool/mpool_api.go @@ -0,0 +1,351 @@ +package mpool + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/pkg/messagepool" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +var _ v1api.IMessagePool = &MessagePoolAPI{} + +// MessagePoolAPI messsage pool api implement +type MessagePoolAPI struct { + pushLocks *messagepool.MpoolLocker + + mp *MessagePoolSubmodule +} + +// MpoolDeleteByAdress delete msg in mpool of addr +func (a *MessagePoolAPI) MpoolDeleteByAdress(ctx context.Context, addr address.Address) error { + return a.mp.MPool.DeleteByAdress(addr) +} + +// MpoolPublish publish message of address +func (a *MessagePoolAPI) MpoolPublishByAddr(ctx context.Context, addr address.Address) error { + return a.mp.MPool.PublishMsgForWallet(ctx, addr) +} + +func (a *MessagePoolAPI) MpoolPublishMessage(ctx context.Context, smsg *types.SignedMessage) error { + return a.mp.MPool.PublishMsg(ctx, smsg) +} + +// MpoolPush pushes a signed message to mempool. +func (a *MessagePoolAPI) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { + return a.mp.MPool.Push(ctx, smsg) +} + +// MpoolGetConfig returns (a copy of) the current mpool config +func (a *MessagePoolAPI) MpoolGetConfig(context.Context) (*types.MpoolConfig, error) { + cfg := a.mp.MPool.GetConfig() + return &types.MpoolConfig{ + PriorityAddrs: cfg.PriorityAddrs, + SizeLimitHigh: cfg.SizeLimitHigh, + SizeLimitLow: cfg.SizeLimitLow, + ReplaceByFeeRatio: cfg.ReplaceByFeeRatio, + PruneCooldown: cfg.PruneCooldown, + GasLimitOverestimation: cfg.GasLimitOverestimation, + }, nil +} + +// MpoolSetConfig sets the mpool config to (a copy of) the supplied config +func (a *MessagePoolAPI) MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error { + return a.mp.MPool.SetConfig(ctx, &messagepool.MpoolConfig{ + PriorityAddrs: cfg.PriorityAddrs, + SizeLimitHigh: cfg.SizeLimitHigh, + SizeLimitLow: cfg.SizeLimitLow, + ReplaceByFeeRatio: cfg.ReplaceByFeeRatio, + PruneCooldown: cfg.PruneCooldown, + GasLimitOverestimation: cfg.GasLimitOverestimation, + }) +} + +// MpoolSelect returns a list of pending messages for inclusion in the next block +func (a *MessagePoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQuality float64) ([]*types.SignedMessage, error) { + ts, err := a.mp.chain.API().ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + + return a.mp.MPool.SelectMessages(ctx, ts, ticketQuality) +} + +// MpoolSelects The batch selection message is used when multiple blocks need to select messages at the same time +func (a *MessagePoolAPI) MpoolSelects(ctx context.Context, tsk types.TipSetKey, ticketQualitys []float64) ([][]*types.SignedMessage, error) { + ts, err := a.mp.chain.API().ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + + return a.mp.MPool.MultipleSelectMessages(ctx, ts, ticketQualitys) +} + +// MpoolPending returns pending mempool messages. +func (a *MessagePoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { + var ts *types.TipSet + var err error + if tsk.IsEmpty() { + ts, err = a.mp.chain.API().ChainHead(ctx) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + } else { + ts, err = a.mp.chain.API().ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + } + + pending, mpts := a.mp.MPool.Pending(ctx) + + haveCids := map[cid.Cid]struct{}{} + for _, m := range pending { + haveCids[m.Cid()] = struct{}{} + } + + mptsH := mpts.Height() + tsH := ts.Height() + if ts == nil || mptsH > tsH { + return pending, nil + } + + for { + mptsH = mpts.Height() + tsH = ts.Height() + if mptsH == tsH { + if mpts.Equals(ts) { + return pending, nil + } + // different blocks in tipsets + + have, err := a.mp.MPool.MessagesForBlocks(ctx, ts.Blocks()) + if err != nil { + return nil, fmt.Errorf("getting messages for base ts: %w", err) + } + + for _, m := range have { + haveCids[m.Cid()] = struct{}{} + } + } + + msgs, err := a.mp.MPool.MessagesForBlocks(ctx, ts.Blocks()) + if err != nil { + return nil, fmt.Errorf(": %w", err) + } + + for _, m := range msgs { + mc := m.Cid() + if _, ok := haveCids[mc]; ok { + continue + } + + haveCids[mc] = struct{}{} + pending = append(pending, m) + } + + mptsH = mpts.Height() + tsH = ts.Height() + if mptsH >= tsH { + return pending, nil + } + + ts, err = a.mp.chain.API().ChainGetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, fmt.Errorf("loading parent tipset: %w", err) + } + } +} + +// MpoolClear clears pending messages from the mpool +func (a *MessagePoolAPI) MpoolClear(ctx context.Context, local bool) error { + a.mp.MPool.Clear(ctx, local) + return nil +} + +// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources. +func (a *MessagePoolAPI) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { + return a.mp.MPool.PushUntrusted(ctx, smsg) +} + +// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message +// to mempool. +// maxFee is only used when GasFeeCap/GasPremium fields aren't specified +// +// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee +// based on current chain conditions +func (a *MessagePoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) { + cp := *msg + msg = &cp + inMsg := *msg + fromA, err := a.mp.chain.API().StateAccountKey(ctx, msg.From, types.EmptyTSK) + if err != nil { + return nil, fmt.Errorf("getting key address: %w", err) + } + { + done, err := a.pushLocks.TakeLock(ctx, fromA) + if err != nil { + return nil, fmt.Errorf("taking lock: %w", err) + } + defer done() + } + + if msg.Nonce != 0 { + return nil, fmt.Errorf("MpoolPushMessage expects message nonce to be 0, was %d", msg.Nonce) + } + + msg, err = a.GasEstimateMessageGas(ctx, msg, spec, types.TipSetKey{}) + if err != nil { + return nil, fmt.Errorf("GasEstimateMessageGas error: %w", err) + } + + if msg.GasPremium.GreaterThan(msg.GasFeeCap) { + inJSON, err := json.Marshal(inMsg) + if err != nil { + return nil, err + } + outJSON, err := json.Marshal(msg) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("after estimation, GasPremium is greater than GasFeeCap, inmsg: %s, outmsg: %s", + inJSON, outJSON) + } + + if msg.From.Protocol() == address.ID { + log.Warnf("Push from ID address (%s), adjusting to %s", msg.From, fromA) + msg.From = fromA + } + + b, err := a.mp.walletAPI.WalletBalance(ctx, msg.From) + if err != nil { + return nil, fmt.Errorf("mpool push: getting origin balance: %w", err) + } + + requiredFunds := big.Add(msg.Value, msg.RequiredFunds()) + if b.LessThan(requiredFunds) { + return nil, fmt.Errorf("mpool push: not enough funds: %s < %s", b, requiredFunds) + } + + // Sign and push the message + return a.mp.msgSigner.SignMessage(ctx, msg, func(smsg *types.SignedMessage) error { + if _, err := a.MpoolPush(ctx, smsg); err != nil { + return fmt.Errorf("mpool push: failed to push message: %w", err) + } + return nil + }) +} + +// MpoolBatchPush batch pushes a unsigned message to mempool. +func (a *MessagePoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + var messageCids []cid.Cid + for _, smsg := range smsgs { + smsgCid, err := a.mp.MPool.Push(ctx, smsg) + if err != nil { + return messageCids, err + } + messageCids = append(messageCids, smsgCid) + } + return messageCids, nil +} + +// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. +func (a *MessagePoolAPI) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + var messageCids []cid.Cid + for _, smsg := range smsgs { + smsgCid, err := a.mp.MPool.PushUntrusted(ctx, smsg) + if err != nil { + return messageCids, err + } + messageCids = append(messageCids, smsgCid) + } + return messageCids, nil +} + +// MpoolBatchPushMessage batch pushes a unsigned message to mempool. +func (a *MessagePoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *types.MessageSendSpec) ([]*types.SignedMessage, error) { + var smsgs []*types.SignedMessage + for _, msg := range msgs { + smsg, err := a.MpoolPushMessage(ctx, msg, spec) + if err != nil { + return smsgs, err + } + smsgs = append(smsgs, smsg) + } + return smsgs, nil +} + +// MpoolGetNonce gets next nonce for the specified sender. +// Note that this method may not be atomic. Use MpoolPushMessage instead. +func (a *MessagePoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { + return a.mp.MPool.GetNonce(ctx, addr, types.EmptyTSK) +} + +func (a *MessagePoolAPI) MpoolSub(ctx context.Context) (<-chan types.MpoolUpdate, error) { + return a.mp.MPool.Updates(ctx) +} + +// GasEstimateMessageGas estimates gas values for unset message gas fields +func (a *MessagePoolAPI) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { + return a.mp.MPool.GasEstimateMessageGas(ctx, &types.EstimateMessage{Msg: msg, Spec: spec}, tsk) +} + +func (a *MessagePoolAPI) GasBatchEstimateMessageGas(ctx context.Context, estimateMessages []*types.EstimateMessage, fromNonce uint64, tsk types.TipSetKey) ([]*types.EstimateResult, error) { + return a.mp.MPool.GasBatchEstimateMessageGas(ctx, estimateMessages, fromNonce, tsk) +} + +// GasEstimateFeeCap estimates gas fee cap +func (a *MessagePoolAPI) GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (big.Int, error) { + return a.mp.MPool.GasEstimateFeeCap(ctx, msg, maxqueueblks, tsk) +} + +func (a *MessagePoolAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { + return a.mp.MPool.GasEstimateGasLimit(ctx, msgIn, tsk) +} + +// GasEstimateGasPremium estimates what gas price should be used for a +// message to have high likelihood of inclusion in `nblocksincl` epochs. +func (a *MessagePoolAPI) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (big.Int, error) { + return a.mp.MPool.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk, a.mp.MPool.PriceCache) +} + +func (a *MessagePoolAPI) MpoolCheckMessages(ctx context.Context, protos []*types.MessagePrototype) ([][]types.MessageCheckStatus, error) { + return a.mp.MPool.CheckMessages(ctx, protos) +} + +func (a *MessagePoolAPI) MpoolCheckPendingMessages(ctx context.Context, addr address.Address) ([][]types.MessageCheckStatus, error) { + return a.mp.MPool.CheckPendingMessages(ctx, addr) +} + +func (a *MessagePoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msg []*types.Message) ([][]types.MessageCheckStatus, error) { + return a.mp.MPool.CheckReplaceMessages(ctx, msg) +} + +/*// WalletSign signs the given bytes using the given address. +func (a *MessagePoolAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) { + head := a.mp.chain.ChainReader.GetHead() + view, err := a.mp.chain.ChainReader.StateView(head) + if err != nil { + return nil, err + } + + keyAddr, err := view.ResolveToKeyAddr(ctx, k) + if err != nil { + return nil, fmt.Errorf("failed to resolve ID address: %v", keyAddr) + } + //var meta wallet.MsgMeta + //if len(metas) > 0 { + // meta = metas[0] + //} else { + meta := wallet.MsgMeta{ + Type: wallet.MTUnknown, + } + //} + return a.mp.walletAPI.WalletSign(ctx, keyAddr, msg, meta) +} +*/ diff --git a/app/submodule/mpool/mpool_submodule.go b/app/submodule/mpool/mpool_submodule.go new file mode 100644 index 0000000000..ac7f8add2e --- /dev/null +++ b/app/submodule/mpool/mpool_submodule.go @@ -0,0 +1,262 @@ +package mpool + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "strconv" + "sync" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/go-address" + logging "github.com/ipfs/go-log" + + "github.com/filecoin-project/venus/app/submodule/chain" + "github.com/filecoin-project/venus/app/submodule/network" + "github.com/filecoin-project/venus/app/submodule/wallet" + chainpkg "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/messagepool" + "github.com/filecoin-project/venus/pkg/messagepool/journal" + "github.com/filecoin-project/venus/pkg/repo" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var pubsubMsgsSyncEpochs = 10 + +func init() { + if s := os.Getenv("VENUS_MSGS_SYNC_EPOCHS"); s != "" { + val, err := strconv.Atoi(s) + if err != nil { + log.Errorf("failed to parse LOTUS_MSGS_SYNC_EPOCHS: %s", err) + return + } + pubsubMsgsSyncEpochs = val + } +} + +var log = logging.Logger("mpool") + +type messagepoolConfig interface { + Repo() repo.Repo +} + +// MessagingSubmodule enhances the `Node` with internal message capabilities. +type MessagePoolSubmodule struct { //nolint + // Network Fields + MessageSub *pubsub.Subscription + + MPool *messagepool.MessagePool + msgSigner *messagepool.MessageSigner + chain *chain.ChainSubmodule + network *network.NetworkSubmodule + walletAPI v1api.IWallet + networkCfg *config.NetworkParamsConfig +} + +func OpenFilesystemJournal(lr repo.Repo) (journal.Journal, error) { + jrnl, err := journal.OpenFSJournal(lr, journal.EnvDisabledEvents()) + if err != nil { + return nil, err + } + + return jrnl, err +} + +func NewMpoolSubmodule(ctx context.Context, cfg messagepoolConfig, + network *network.NetworkSubmodule, + chain *chain.ChainSubmodule, + wallet *wallet.WalletSubmodule, +) (*MessagePoolSubmodule, error) { + mpp := messagepool.NewProvider(chain.Stmgr, chain.ChainReader, chain.MessageStore, cfg.Repo().Config().NetworkParams, network.Pubsub) + + j, err := OpenFilesystemJournal(cfg.Repo()) + if err != nil { + return nil, err + } + mp, err := messagepool.New(ctx, mpp, chain.Stmgr, cfg.Repo().MetaDatastore(), cfg.Repo().Config().NetworkParams, + cfg.Repo().Config().Mpool, network.NetworkName, j) + if err != nil { + return nil, fmt.Errorf("constructing mpool: %s", err) + } + + return &MessagePoolSubmodule{ + MPool: mp, + chain: chain, + walletAPI: wallet.API(), + network: network, + networkCfg: cfg.Repo().Config().NetworkParams, + msgSigner: messagepool.NewMessageSigner(wallet.WalletIntersection(), mp, cfg.Repo().MetaDatastore()), + }, nil +} + +func (mp *MessagePoolSubmodule) handleIncomingMessage(ctx context.Context) { + for { + _, err := mp.MessageSub.Next(ctx) + if err != nil { + log.Warn("error from message subscription: ", err) + if ctx.Err() != nil { + log.Warn("quitting HandleIncomingMessages loop") + return + } + continue + } + } +} + +func (mp *MessagePoolSubmodule) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult { + if pid == mp.network.Host.ID() { + return mp.validateLocalMessage(ctx, msg) + } + + m := &types.SignedMessage{} + if err := m.UnmarshalCBOR(bytes.NewReader(msg.GetData())); err != nil { + log.Warnf("failed to decode incoming message: %s", err) + return pubsub.ValidationReject + } + + log.Debugf("validate incoming msg:%s", m.Cid().String()) + + if err := mp.MPool.Add(ctx, m); err != nil { + log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err) + switch { + case errors.Is(err, messagepool.ErrSoftValidationFailure): + fallthrough + case errors.Is(err, messagepool.ErrRBFTooLowPremium): + fallthrough + case errors.Is(err, messagepool.ErrTooManyPendingMessages): + fallthrough + case errors.Is(err, messagepool.ErrNonceGap): + fallthrough + case errors.Is(err, messagepool.ErrNonceTooLow): + return pubsub.ValidationIgnore + default: + return pubsub.ValidationReject + } + } + return pubsub.ValidationAccept +} + +func (mp *MessagePoolSubmodule) validateLocalMessage(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult { + m := &types.SignedMessage{} + if err := m.UnmarshalCBOR(bytes.NewReader(msg.GetData())); err != nil { + return pubsub.ValidationIgnore + } + + if m.ChainLength() > messagepool.MaxMessageSize { + log.Warnf("local message is too large! (%dB)", m.ChainLength()) + return pubsub.ValidationIgnore + } + + if m.Message.To == address.Undef { + log.Warn("local message has invalid destination address") + return pubsub.ValidationIgnore + } + + if !m.Message.Value.LessThan(types.TotalFilecoinInt) { + log.Warnf("local messages has too high value: %s", m.Message.Value) + return pubsub.ValidationIgnore + } + + if err := mp.MPool.VerifyMsgSig(m); err != nil { + log.Warnf("signature verification failed for local message: %s", err) + return pubsub.ValidationIgnore + } + + return pubsub.ValidationAccept +} + +// Start to the message pubsub topic to learn about messages to mine into blocks. +func (mp *MessagePoolSubmodule) Start(ctx context.Context) error { + topicName := types.MessageTopic(mp.network.NetworkName) + var err error + if err = mp.network.Pubsub.RegisterTopicValidator(topicName, mp.Validate); err != nil { + return err + } + + msgTopic, err := mp.network.Pubsub.Join(topicName) + if err != nil { + return err + } + + var once sync.Once + subscribe := func() { + once.Do(func() { + var err error + if mp.MessageSub, err = msgTopic.Subscribe(); err != nil { + panic(err) + } + go mp.handleIncomingMessage(ctx) + }) + } + + // wait until we are synced within 10 epochs + go mp.waitForSync(pubsubMsgsSyncEpochs, subscribe) + + return nil +} + +func (mp *MessagePoolSubmodule) waitForSync(epochs int, subscribe func()) { + nearsync := time.Duration(epochs*int(mp.networkCfg.BlockDelay)) * time.Second + + // early check, are we synced at start up? + ts := mp.chain.ChainReader.GetHead() + timestamp := ts.MinTimestamp() + timestampTime := time.Unix(int64(timestamp), 0) + if constants.Clock.Since(timestampTime) < nearsync { + subscribe() + return + } + + // we are not synced, subscribe to head changes and wait for sync + mp.chain.ChainReader.SubscribeHeadChanges(func(rev, app []*types.TipSet) error { + if len(app) == 0 { + return nil + } + + latest := app[0].MinTimestamp() + for _, ts := range app[1:] { + timestamp := ts.MinTimestamp() + if timestamp > latest { + latest = timestamp + } + } + + latestTime := time.Unix(int64(latest), 0) + if constants.Clock.Since(latestTime) < nearsync { + subscribe() + return chainpkg.ErrNotifeeDone + } + + return nil + }) +} + +func (mp *MessagePoolSubmodule) Stop(ctx context.Context) { + err := mp.MPool.Close() + if err != nil { + log.Errorf("failed to close mpool: %s", err) + } + if mp.MessageSub != nil { + mp.MessageSub.Cancel() + } +} + +// API create a new mpool api implement +func (mp *MessagePoolSubmodule) API() v1api.IMessagePool { + pushLocks := messagepool.NewMpoolLocker() + return &MessagePoolAPI{mp: mp, pushLocks: pushLocks} +} + +func (mp *MessagePoolSubmodule) V0API() v0api.IMessagePool { + pushLocks := messagepool.NewMpoolLocker() + return &MessagePoolAPI{mp: mp, pushLocks: pushLocks} +} diff --git a/app/submodule/multisig/multisig_api.go b/app/submodule/multisig/multisig_api.go new file mode 100644 index 0000000000..d7d6c7cd90 --- /dev/null +++ b/app/submodule/multisig/multisig_api.go @@ -0,0 +1,348 @@ +package multisig + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/multisig" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v1api.IMultiSig = &multiSig{} + +type multiSig struct { + *MultiSigSubmodule +} + +type MsigProposeResponse int + +const ( + MsigApprove MsigProposeResponse = iota + MsigCancel +) + +func newMultiSig(m *MultiSigSubmodule) v1api.IMultiSig { + return &multiSig{ + MultiSigSubmodule: m, + } +} + +func (a *multiSig) messageBuilder(ctx context.Context, from address.Address) (multisig.MessageBuilder, error) { + nver, err := a.state.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return nil, err + } + aver, err := actorstypes.VersionForNetwork(nver) + if err != nil { + return nil, err + } + return multisig.Message(aver, from), nil +} + +// MsigCreate creates a multisig wallet +// It takes the following params: , , +// , , +func (a *multiSig) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (*types.MessagePrototype, error) { + mb, err := a.messageBuilder(ctx, src) + if err != nil { + return nil, err + } + + msg, err := mb.Create(addrs, req, 0, duration, val) + if err != nil { + return nil, err + } + + return &types.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil +} + +func (a *multiSig) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) { + mb, err := a.messageBuilder(ctx, src) + if err != nil { + return nil, err + } + + msg, err := mb.Propose(msig, to, amt, abi.MethodNum(method), params) + if err != nil { + return nil, fmt.Errorf("failed to create proposal: %w", err) + } + + return &types.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil +} + +func (a *multiSig) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (*types.MessagePrototype, error) { + enc, actErr := serializeAddParams(newAdd, inc) + if actErr != nil { + return nil, actErr + } + + return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) +} + +func (a *multiSig) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (*types.MessagePrototype, error) { + enc, actErr := serializeAddParams(newAdd, inc) + if actErr != nil { + return nil, actErr + } + + return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) +} + +// MsigAddApprove approves a previously proposed AddSigner message +// It takes the following params: , , , +// , , +func (a *multiSig) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (*types.MessagePrototype, error) { + enc, actErr := serializeAddParams(newAdd, inc) + if actErr != nil { + return nil, actErr + } + + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) +} + +func (a *multiSig) MsigCancelTxnHash(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) { + return a.msigApproveOrCancelTxnHash(ctx, MsigCancel, msig, txID, src, to, amt, src, method, params) +} + +// MsigSwapPropose proposes swapping 2 signers in the multisig +// It takes the following params: , , +// , +func (a *multiSig) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) { + enc, actErr := serializeSwapParams(oldAdd, newAdd) + if actErr != nil { + return nil, actErr + } + + return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) +} + +// MsigSwapApprove approves a previously proposed SwapSigner +// It takes the following params: , , , +// , , +func (a *multiSig) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) { + enc, actErr := serializeSwapParams(oldAdd, newAdd) + if actErr != nil { + return nil, actErr + } + + return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) +} + +func (a *multiSig) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) { + enc, actErr := serializeSwapParams(oldAdd, newAdd) + if actErr != nil { + return nil, actErr + } + + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) +} + +// MsigSwapCancel cancels a previously proposed SwapSigner message +// It takes the following params: , , , +// , +func (a *multiSig) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*types.MessagePrototype, error) { + return a.msigApproveOrCancelSimple(ctx, MsigApprove, msig, txID, src) +} + +// MsigApproveTxnHash approves a previously-proposed multisig message, specified +// using both transaction ID and a hash of the parameters used in the +// proposal. This method of approval can be used to ensure you only approve +// exactly the transaction you think you are. +// It takes the following params: , , , , , +// , , +func (a *multiSig) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) { + return a.msigApproveOrCancelTxnHash(ctx, MsigApprove, msig, txID, proposer, to, amt, src, method, params) +} + +// MsigCancel cancels a previously-proposed multisig message +// It takes the following params: , , , , +// , , +func (a *multiSig) MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*types.MessagePrototype, error) { + return a.msigApproveOrCancelSimple(ctx, MsigCancel, msig, txID, src) +} + +// MsigRemoveSigner proposes the removal of a signer from the multisig. +// It accepts the multisig to make the change on, the proposer address to +// send the message from, the address to be removed, and a boolean +// indicating whether or not the signing threshold should be lowered by one +// along with the address removal. +func (a *multiSig) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*types.MessagePrototype, error) { + enc, actErr := serializeRemoveParams(toRemove, decrease) + if actErr != nil { + return nil, actErr + } + + return a.MsigPropose(ctx, msig, msig, types.NewInt(0), proposer, uint64(multisig.Methods.RemoveSigner), enc) +} + +// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. +// It takes the following params: , , +func (a *multiSig) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { + startTS, err := a.state.ChainGetTipSet(ctx, start) + if err != nil { + return types.EmptyInt, fmt.Errorf("loading start tipset %s: %w", start, err) + } + + endTS, err := a.state.ChainGetTipSet(ctx, end) + if err != nil { + return types.EmptyInt, fmt.Errorf("loading end tipset %s: %w", end, err) + } + + if startTS.Height() > endTS.Height() { + return types.EmptyInt, fmt.Errorf("start tipset %d is after end tipset %d", startTS.Height(), endTS.Height()) + } else if startTS.Height() == endTS.Height() { + return big.Zero(), nil + } + + // LoadActor(ctx, addr, endTs) + act, err := a.state.GetParentStateRootActor(ctx, endTS, addr) + if err != nil { + return types.EmptyInt, fmt.Errorf("failed to load multisig actor at end epoch: %w", err) + } + + msas, err := multisig.Load(a.store.Store(ctx), act) + if err != nil { + return types.EmptyInt, fmt.Errorf("failed to load multisig actor state: %w", err) + } + + startLk, err := msas.LockedBalance(startTS.Height()) + if err != nil { + return types.EmptyInt, fmt.Errorf("failed to compute locked balance at start height: %w", err) + } + + endLk, err := msas.LockedBalance(endTS.Height()) + if err != nil { + return types.EmptyInt, fmt.Errorf("failed to compute locked balance at end height: %w", err) + } + + return types.BigSub(startLk, endLk), nil +} + +func (a *multiSig) msigApproveOrCancelSimple(ctx context.Context, operation MsigProposeResponse, msig address.Address, txID uint64, src address.Address) (*types.MessagePrototype, error) { + if msig == address.Undef { + return nil, fmt.Errorf("must provide multisig address") + } + + if src == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + mb, err := a.messageBuilder(ctx, src) + if err != nil { + return nil, err + } + + var msg *types.Message + switch operation { + case MsigApprove: + msg, err = mb.Approve(msig, txID, nil) + case MsigCancel: + msg, err = mb.Cancel(msig, txID, nil) + default: + return nil, fmt.Errorf("invalid operation for msigApproveOrCancel") + } + if err != nil { + return nil, err + } + + return &types.MessagePrototype{Message: *msg, ValidNonce: false}, nil +} + +func (a *multiSig) msigApproveOrCancelTxnHash(ctx context.Context, operation MsigProposeResponse, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) { + if msig == address.Undef { + return nil, fmt.Errorf("must provide multisig address") + } + + if src == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + if proposer.Protocol() != address.ID { + proposerID, err := a.state.StateLookupID(ctx, proposer, types.EmptyTSK) + if err != nil { + return nil, err + } + proposer = proposerID + } + + p := multisig.ProposalHashData{ + Requester: proposer, + To: to, + Value: amt, + Method: abi.MethodNum(method), + Params: params, + } + + mb, err := a.messageBuilder(ctx, src) + if err != nil { + return nil, err + } + + var msg *types.Message + switch operation { + case MsigApprove: + msg, err = mb.Approve(msig, txID, &p) + case MsigCancel: + msg, err = mb.Cancel(msig, txID, &p) + default: + return nil, fmt.Errorf("invalid operation for msigApproveOrCancel") + } + if err != nil { + return nil, err + } + + return &types.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil +} + +func serializeAddParams(new address.Address, inc bool) ([]byte, error) { + enc, actErr := actors.SerializeParams(&multisig2.AddSignerParams{ + Signer: new, + Increase: inc, + }) + if actErr != nil { + return nil, actErr + } + + return enc, nil +} + +func serializeSwapParams(old address.Address, new address.Address) ([]byte, error) { + enc, actErr := actors.SerializeParams(&multisig2.SwapSignerParams{ + From: old, + To: new, + }) + if actErr != nil { + return nil, actErr + } + + return enc, nil +} + +func serializeRemoveParams(rem address.Address, dec bool) ([]byte, error) { + enc, actErr := actors.SerializeParams(&multisig2.RemoveSignerParams{ + Signer: rem, + Decrease: dec, + }) + if actErr != nil { + return nil, actErr + } + + return enc, nil +} diff --git a/app/submodule/multisig/multisig_submodule.go b/app/submodule/multisig/multisig_submodule.go new file mode 100644 index 0000000000..398fed72c3 --- /dev/null +++ b/app/submodule/multisig/multisig_submodule.go @@ -0,0 +1,31 @@ +package multisig + +import ( + apiwrapper "github.com/filecoin-project/venus/app/submodule/multisig/v0api" + chain2 "github.com/filecoin-project/venus/pkg/chain" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +type MultiSigSubmodule struct { //nolint + state v1api.IChain + mpool v1api.IMessagePool + store *chain2.Store +} + +// MessagingSubmodule enhances the `Node` with multisig capabilities. +func NewMultiSigSubmodule(chainState v1api.IChain, msgPool v1api.IMessagePool, store *chain2.Store) *MultiSigSubmodule { + return &MultiSigSubmodule{state: chainState, mpool: msgPool, store: store} +} + +// API create a new multisig implement +func (sb *MultiSigSubmodule) API() v1api.IMultiSig { + return newMultiSig(sb) +} + +func (sb *MultiSigSubmodule) V0API() v0api.IMultiSig { + return &apiwrapper.WrapperV1IMultiSig{ + IMultiSig: newMultiSig(sb), + IMessagePool: sb.mpool, + } +} diff --git a/app/submodule/multisig/v0api/multisig_v0api.go b/app/submodule/multisig/v0api/multisig_v0api.go new file mode 100644 index 0000000000..ef32a8f1dc --- /dev/null +++ b/app/submodule/multisig/v0api/multisig_v0api.go @@ -0,0 +1,147 @@ +package v0api + +import ( + "context" + "fmt" + + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" +) + +type WrapperV1IMultiSig struct { + v1api.IMultiSig + v1api.IMessagePool +} + +var _ v0api.IMultiSig = (*WrapperV1IMultiSig)(nil) + +func (a *WrapperV1IMultiSig) executePrototype(ctx context.Context, p *types.MessagePrototype) (cid.Cid, error) { + sm, err := a.IMessagePool.MpoolPushMessage(ctx, &p.Message, nil) + if err != nil { + return cid.Undef, fmt.Errorf("pushing message: %w", err) + } + + return sm.Cid(), nil +} + +func (a *WrapperV1IMultiSig) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { + p, err := a.IMultiSig.MsigCreate(ctx, req, addrs, duration, val, src, gp) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + p, err := a.IMultiSig.MsigPropose(ctx, msig, to, amt, src, method, params) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { + p, err := a.IMultiSig.MsigApprove(ctx, msig, txID, src) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + p, err := a.IMultiSig.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { + p, err := a.IMultiSig.MsigCancel(ctx, msig, txID, src) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigCancelTxnHash(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + p, err := a.IMultiSig.MsigCancelTxnHash(ctx, msig, txID, to, amt, src, method, params) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + p, err := a.IMultiSig.MsigAddPropose(ctx, msig, src, newAdd, inc) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + p, err := a.IMultiSig.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { + p, err := a.IMultiSig.MsigAddCancel(ctx, msig, src, txID, newAdd, inc) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + p, err := a.IMultiSig.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + p, err := a.IMultiSig.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + p, err := a.IMultiSig.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} + +func (a *WrapperV1IMultiSig) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) { + p, err := a.IMultiSig.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease) + if err != nil { + return cid.Undef, fmt.Errorf("creating prototype: %w", err) + } + + return a.executePrototype(ctx, p) +} diff --git a/app/submodule/network/libp2p.go b/app/submodule/network/libp2p.go new file mode 100644 index 0000000000..908f3cae3e --- /dev/null +++ b/app/submodule/network/libp2p.go @@ -0,0 +1,157 @@ +package network + +import ( + "context" + "crypto/rand" + + "github.com/go-errors/errors" + "github.com/jbenet/goprocess" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/event" + net "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + "github.com/multiformats/go-multiaddr" +) + +type noopLibP2PHost struct { + peerId peer.ID //nolint +} + +// nolint +func NewNoopLibP2PHost() noopLibP2PHost { + pk, _, _ := crypto.GenerateEd25519Key(rand.Reader) //nolint + pid, _ := peer.IDFromPrivateKey(pk) + return noopLibP2PHost{pid} +} + +func (h noopLibP2PHost) ID() peer.ID { + return h.peerId +} + +func (noopLibP2PHost) Peerstore() (peerstore.Peerstore, error) { + return pstoremem.NewPeerstore() +} + +func (noopLibP2PHost) Addrs() []multiaddr.Multiaddr { + return []multiaddr.Multiaddr{} +} + +func (noopLibP2PHost) EventBus() event.Bus { + return eventbus.NewBus() +} + +func (noopLibP2PHost) Network() net.Network { + return noopLibP2PNetwork{} +} + +func (noopLibP2PHost) Mux() protocol.Switch { + panic("implement me") +} + +func (noopLibP2PHost) Connect(ctx context.Context, pi peer.AddrInfo) error { + return errors.New("Connect called on noopLibP2PHost") +} + +func (noopLibP2PHost) SetStreamHandler(pid protocol.ID, handler net.StreamHandler) { +} + +func (noopLibP2PHost) SetStreamHandlerMatch(protocol.ID, func(string) bool, net.StreamHandler) { +} + +func (noopLibP2PHost) RemoveStreamHandler(pid protocol.ID) { + panic("implement me") +} + +func (noopLibP2PHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (net.Stream, error) { + return nil, errors.New("NewStream on noopLibP2PHost") +} + +func (noopLibP2PHost) Close() error { + return nil +} + +func (noopLibP2PHost) ConnManager() connmgr.ConnManager { + return &connmgr.NullConnMgr{} +} + +type noopLibP2PNetwork struct{} + +func (n noopLibP2PNetwork) ResourceManager() net.ResourceManager { + panic("implement me") +} + +func (noopLibP2PNetwork) Peerstore() peerstore.Peerstore { + panic("implement me") +} + +func (noopLibP2PNetwork) LocalPeer() peer.ID { + panic("implement me") +} + +func (noopLibP2PNetwork) DialPeer(context.Context, peer.ID) (net.Conn, error) { + panic("implement me") +} + +func (noopLibP2PNetwork) ClosePeer(peer.ID) error { + panic("implement me") +} + +func (noopLibP2PNetwork) Connectedness(peer.ID) net.Connectedness { + panic("implement me") +} + +func (noopLibP2PNetwork) Peers() []peer.ID { + return []peer.ID{} +} + +func (noopLibP2PNetwork) Conns() []net.Conn { + return []net.Conn{} +} + +func (noopLibP2PNetwork) ConnsToPeer(p peer.ID) []net.Conn { + return []net.Conn{} +} + +func (noopLibP2PNetwork) Notify(net.Notifiee) { +} + +func (noopLibP2PNetwork) StopNotify(net.Notifiee) { + panic("implement me") +} + +func (noopLibP2PNetwork) Close() error { + panic("implement me") +} + +func (noopLibP2PNetwork) SetStreamHandler(net.StreamHandler) { + panic("implement me") +} + +func (noopLibP2PNetwork) NewStream(context.Context, peer.ID) (net.Stream, error) { + panic("implement me") +} + +func (noopLibP2PNetwork) Listen(...multiaddr.Multiaddr) error { + panic("implement me") +} + +func (noopLibP2PNetwork) ListenAddresses() []multiaddr.Multiaddr { + panic("implement me") +} + +func (noopLibP2PNetwork) InterfaceListenAddresses() ([]multiaddr.Multiaddr, error) { + panic("implement me") +} + +func (noopLibP2PNetwork) Process() goprocess.Process { + panic("implement me") +} + +func (noopLibP2PNetwork) API() interface{} { + return nil +} diff --git a/app/submodule/network/network_api.go b/app/submodule/network/network_api.go new file mode 100644 index 0000000000..18899aa9ab --- /dev/null +++ b/app/submodule/network/network_api.go @@ -0,0 +1,148 @@ +package network + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/libp2p/go-libp2p/p2p/protocol/ping" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +var _ v1api.INetwork = &networkAPI{} + +type networkAPI struct { //nolint + network *NetworkSubmodule +} + +// NetBandwidthStats gets stats on the current bandwidth usage of the network +func (na *networkAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { + return na.network.Network.GetBandwidthStats(), nil +} + +// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth +// usage and current rate per peer +func (na *networkAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { + return na.network.Network.GetBandwidthStatsByPeer() +} + +// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth +// usage and current rate per protocol +func (na *networkAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { + return na.network.Network.GetBandwidthStatsByProtocol() +} + +// ID returns the current peer id of the node +func (na *networkAPI) ID(ctx context.Context) (peer.ID, error) { + return na.network.Network.GetPeerID(), nil +} + +// NetFindProvidersAsync issues a findProviders query to the filecoin network content router. +func (na *networkAPI) NetFindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo { + return na.network.Network.Router.FindProvidersAsync(ctx, key, count) +} + +// NetGetClosestPeers issues a getClosestPeers query to the filecoin network. +func (na *networkAPI) NetGetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) { + return na.network.Network.GetClosestPeers(ctx, key) +} + +// NetFindPeer searches the libp2p router for a given peer id +func (na *networkAPI) NetFindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo, error) { + return na.network.Network.FindPeer(ctx, peerID) +} + +// NetPeerInfo searches the peer info for a given peer id +func (na *networkAPI) NetPeerInfo(ctx context.Context, peerID peer.ID) (*types.ExtendedPeerInfo, error) { + return na.network.Network.PeerInfo(ctx, peerID) +} + +// NetConnect connects to peer at the given address +func (na *networkAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { + return na.network.Network.Connect(ctx, p) +} + +// NetPeers lists peers currently available on the network +func (na *networkAPI) NetPeers(ctx context.Context) ([]peer.AddrInfo, error) { + return na.network.Network.Peers(ctx) +} + +// NetAgentVersion returns agent version for a given peer id +func (na *networkAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { + return na.network.Network.AgentVersion(ctx, p) +} + +// NetPing returns result of a ping attempt +func (na *networkAPI) NetPing(ctx context.Context, p peer.ID) (time.Duration, error) { + result, ok := <-ping.Ping(ctx, na.network.Host, p) + if !ok { + return 0, fmt.Errorf("didn't get ping result: %w", ctx.Err()) + } + return result.RTT, result.Error +} + +// NetAddrsListen return local p2p address info +func (na *networkAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) { + return peer.AddrInfo{ + ID: na.network.Host.ID(), + Addrs: na.network.Host.Addrs(), + }, nil +} + +// NetDisconnect disconnect to peer at the given address +func (na *networkAPI) NetDisconnect(_ context.Context, p peer.ID) error { + return na.network.Network.Disconnect(p) +} + +// NetProtectAdd protect peer at the given peers id +func (na *networkAPI) NetProtectAdd(ctx context.Context, peers []peer.ID) error { + return na.network.Network.ProtectAdd(peers) +} + +// NetProtectRemove unprotect peer at the given peers id +func (na *networkAPI) NetProtectRemove(ctx context.Context, peers []peer.ID) error { + return na.network.Network.ProtectRemove(peers) +} + +// NetProtectList returns the peers that are protected +func (na *networkAPI) NetProtectList(ctx context.Context) ([]peer.ID, error) { + return na.network.Network.ProtectList() +} + +// NetConnectedness returns a state signaling connection capabilities +func (na *networkAPI) NetConnectedness(ctx context.Context, p peer.ID) (network.Connectedness, error) { + return na.network.Network.Connectedness(p) +} + +// NetAutoNatStatus return a struct with current NAT status and public dial address +func (na *networkAPI) NetAutoNatStatus(context.Context) (types.NatInfo, error) { + return na.network.Network.AutoNatStatus() +} + +// NetPubsubScores return scores for all connected and recent peers +func (na *networkAPI) NetPubsubScores(context.Context) ([]types.PubsubScore, error) { + scores := na.network.ScoreKeeper.Get() + out := make([]types.PubsubScore, len(scores)) + i := 0 + for k, v := range scores { + out[i] = types.PubsubScore{ID: k, Score: v} + i++ + } + + sort.Slice(out, func(i, j int) bool { + return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0 + }) + + return out, nil +} diff --git a/app/submodule/network/network_submodule.go b/app/submodule/network/network_submodule.go new file mode 100644 index 0000000000..e657c9102b --- /dev/null +++ b/app/submodule/network/network_submodule.go @@ -0,0 +1,409 @@ +package network + +import ( + "bytes" + "context" + "fmt" + "os" + "time" + + "github.com/filecoin-project/venus/pkg/net/helloprotocol" + + "github.com/dchest/blake2b" + "github.com/ipfs/go-bitswap" + bsnet "github.com/ipfs/go-bitswap/network" + blocks "github.com/ipfs/go-block-format" + bserv "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-graphsync" + graphsyncimpl "github.com/ipfs/go-graphsync/impl" + gsnet "github.com/ipfs/go-graphsync/network" + "github.com/ipfs/go-graphsync/storeutil" + exchange "github.com/ipfs/go-ipfs-exchange-interface" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p" + dht "github.com/libp2p/go-libp2p-kad-dht" + libp2pps "github.com/libp2p/go-libp2p-pubsub" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/host" + p2pmetrics "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" + yamux "github.com/libp2p/go-libp2p/p2p/muxer/yamux" + ma "github.com/multiformats/go-multiaddr" + "github.com/pkg/errors" + + datatransfer "github.com/filecoin-project/go-data-transfer" + dtimpl "github.com/filecoin-project/go-data-transfer/impl" + dtnet "github.com/filecoin-project/go-data-transfer/network" + dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/net" + filexchange "github.com/filecoin-project/venus/pkg/net/exchange" + "github.com/filecoin-project/venus/pkg/net/peermgr" + "github.com/filecoin-project/venus/pkg/repo" + appstate "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/venus-shared/types" + + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +var networkLogger = logging.Logger("network_module") + +// NetworkSubmodule enhances the `Node` with networking capabilities. +type NetworkSubmodule struct { //nolint + NetworkName string + + Host host.Host + RawHost types.RawHost + + // Router is a router from IPFS + Router routing.Routing + + Pubsub *libp2pps.PubSub + + // TODO: split chain bitswap from storage bitswap (issue: ???) + Bitswap exchange.Interface + + Network *net.Network + + GraphExchange graphsync.GraphExchange + + HelloHandler *helloprotocol.HelloProtocolHandler + + PeerMgr peermgr.IPeerMgr + ExchangeClient filexchange.Client + // data transfer + DataTransfer datatransfer.Manager + DataTransferHost dtnet.DataTransferNetwork + + ScoreKeeper *net.ScoreKeeper + + cfg networkConfig +} + +// API create a new network implement +func (networkSubmodule *NetworkSubmodule) API() v1api.INetwork { + return &networkAPI{network: networkSubmodule} +} + +func (networkSubmodule *NetworkSubmodule) V0API() v0api.INetwork { + return &networkAPI{network: networkSubmodule} +} + +func (networkSubmodule *NetworkSubmodule) Stop(ctx context.Context) { + networkLogger.Infof("closing bitswap") + if err := networkSubmodule.Bitswap.Close(); err != nil { + networkLogger.Errorf("error closing bitswap: %s", err.Error()) + } + networkLogger.Infof("closing host") + if err := networkSubmodule.Host.Close(); err != nil { + networkLogger.Errorf("error closing host: %s", err.Error()) + } + if err := networkSubmodule.Router.(*dht.IpfsDHT).Close(); err != nil { + networkLogger.Errorf("error closing dht: %s", err.Error()) + } +} + +type networkConfig interface { + GenesisCid() cid.Cid + OfflineMode() bool + IsRelay() bool + Libp2pOpts() []libp2p.Option + Repo() repo.Repo +} + +// NewNetworkSubmodule creates a new network submodule. +func NewNetworkSubmodule(ctx context.Context, chainStore *chain.Store, + messageStore *chain.MessageStore, config networkConfig, +) (*NetworkSubmodule, error) { + bandwidthTracker := p2pmetrics.NewBandwidthCounter() + libP2pOpts := append(config.Libp2pOpts(), libp2p.BandwidthReporter(bandwidthTracker), makeSmuxTransportOption()) + var networkName string + var err error + if !config.Repo().Config().NetworkParams.DevNet { + networkName = "testnetnet" + } else { + config.Repo().ChainDatastore() + networkName, err = retrieveNetworkName(ctx, config.GenesisCid(), cbor.NewCborStore(config.Repo().Datastore())) + if err != nil { + return nil, err + } + } + + // peer manager + bootNodes, err := net.ParseAddresses(ctx, config.Repo().Config().Bootstrap.Addresses) + if err != nil { + return nil, err + } + + // set up host + rawHost, err := buildHost(ctx, config, libP2pOpts, config.Repo().Config()) + if err != nil { + return nil, err + } + + router, err := makeDHT(ctx, rawHost, config, networkName, bootNodes) + if err != nil { + return nil, err + } + + peerHost := routedHost(rawHost, router) + period, err := time.ParseDuration(config.Repo().Config().Bootstrap.Period) + if err != nil { + return nil, err + } + + peerMgr, err := peermgr.NewPeerMgr(peerHost, router.(*dht.IpfsDHT), period, bootNodes) + if err != nil { + return nil, err + } + + sk := net.NewScoreKeeper() + gsub, err := net.NewGossipSub(ctx, peerHost, sk, networkName, config.Repo().Config().NetworkParams.DrandSchedule, bootNodes) + if err != nil { + return nil, errors.Wrap(err, "failed to set up network") + } + + // set up bitswap + nwork := bsnet.NewFromIpfsHost(peerHost, router, bsnet.Prefix("/chain")) + bitswapOptions := []bitswap.Option{bitswap.ProvideEnabled(false)} + bswap := bitswap.New(ctx, nwork, config.Repo().Datastore(), bitswapOptions...) + + // set up graphsync + graphsyncNetwork := gsnet.NewFromLibp2pHost(peerHost) + lsys := storeutil.LinkSystemForBlockstore(config.Repo().Datastore()) + gsync := graphsyncimpl.New(ctx, graphsyncNetwork, lsys, graphsyncimpl.RejectAllRequestsByDefault()) + + // dataTransger + // sc := storedcounter.New(repo.ChainDatastore(), datastore.NewKey("/datatransfer/api/counter")) + // go-data-transfer protocol retries: + // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour + dtRetryParams := dtnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) + dtn := dtnet.NewFromLibp2pHost(peerHost, dtRetryParams) + + dtNet := dtnet.NewFromLibp2pHost(peerHost) + dtDs := namespace.Wrap(config.Repo().ChainDatastore(), datastore.NewKey("/datatransfer/api/transfers")) + transport := dtgstransport.NewTransport(peerHost.ID(), gsync) + + dt, err := dtimpl.NewDataTransfer(dtDs, dtn, transport) + if err != nil { + return nil, err + } + // build network + network := net.New(peerHost, rawHost, net.NewRouter(router), bandwidthTracker) + exchangeClient := filexchange.NewClient(peerHost, peerMgr) + helloHandler := helloprotocol.NewHelloProtocolHandler(peerHost, peerMgr, exchangeClient, chainStore, messageStore, config.GenesisCid(), time.Duration(config.Repo().Config().NetworkParams.BlockDelay)*time.Second) + // build the network submdule + return &NetworkSubmodule{ + NetworkName: networkName, + Host: peerHost, + RawHost: rawHost, + Router: router, + Pubsub: gsub, + Bitswap: bswap, + GraphExchange: gsync, + ExchangeClient: exchangeClient, + Network: network, + DataTransfer: dt, + DataTransferHost: dtNet, + PeerMgr: peerMgr, + HelloHandler: helloHandler, + cfg: config, + ScoreKeeper: sk, + }, nil +} + +func (networkSubmodule *NetworkSubmodule) Start(ctx context.Context) error { + // do NOT start `peerMgr` in `offline` mode + if !networkSubmodule.cfg.OfflineMode() { + go networkSubmodule.PeerMgr.Run(ctx) + } + return nil +} + +func (networkSubmodule *NetworkSubmodule) FetchMessagesByCids( + ctx context.Context, + service bserv.BlockService, + cids []cid.Cid, +) ([]*types.Message, error) { + out := make([]*types.Message, len(cids)) + err := networkSubmodule.fetchCids(ctx, service, cids, func(idx int, blk blocks.Block) error { + var msg types.Message + if err := msg.UnmarshalCBOR(bytes.NewReader(blk.RawData())); err != nil { + return err + } + out[idx] = &msg + return nil + }) + return out, err +} + +func (networkSubmodule *NetworkSubmodule) FetchSignedMessagesByCids( + ctx context.Context, + service bserv.BlockService, + cids []cid.Cid, +) ([]*types.SignedMessage, error) { + out := make([]*types.SignedMessage, len(cids)) + err := networkSubmodule.fetchCids(ctx, service, cids, func(idx int, blk blocks.Block) error { + var msg types.SignedMessage + if err := msg.UnmarshalCBOR(bytes.NewReader(blk.RawData())); err != nil { + return err + } + out[idx] = &msg + return nil + }) + return out, err +} + +func (networkSubmodule *NetworkSubmodule) fetchCids( + ctx context.Context, + srv bserv.BlockService, + cids []cid.Cid, + onfetchOneBlock func(int, blocks.Block) error, +) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + cidIndex := make(map[cid.Cid]int) + for i, c := range cids { + cidIndex[c] = i + } + + if len(cids) != len(cidIndex) { + return fmt.Errorf("duplicate CIDs in fetchCids input") + } + + msgBlocks := make([]blocks.Block, len(cids)) + for block := range srv.GetBlocks(ctx, cids) { + ix, ok := cidIndex[block.Cid()] + if !ok { + // Ignore duplicate/unexpected blocks. This shouldn't + // happen, but we can be safe. + networkLogger.Errorw("received duplicate/unexpected block when syncing", "cid", block.Cid()) + continue + } + + // Record that we've received the block. + delete(cidIndex, block.Cid()) + msgBlocks[ix] = block + if onfetchOneBlock != nil { + if err := onfetchOneBlock(ix, block); err != nil { + return err + } + } + } + + // 'cidIndex' should be 0 here, that means we had fetched all blocks in 'cids'. + if len(cidIndex) > 0 { + err := ctx.Err() + if err == nil { + err = fmt.Errorf("failed to fetch %d messages for unknown reasons", len(cidIndex)) + } + return err + } + + return nil +} + +func retrieveNetworkName(ctx context.Context, genCid cid.Cid, cborStore cbor.IpldStore) (string, error) { + var genesis types.BlockHeader + err := cborStore.Get(ctx, genCid, &genesis) + if err != nil { + return "", errors.Wrapf(err, "failed to get block %s", genCid.String()) + } + + return appstate.NewView(cborStore, genesis.ParentStateRoot).InitNetworkName(ctx) +} + +// address determines if we are publically dialable. If so use public +// address, if not configure node to announce relay address. +func buildHost(ctx context.Context, config networkConfig, libP2pOpts []libp2p.Option, cfg *config.Config) (types.RawHost, error) { + if config.IsRelay() { + publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress) + if err != nil { + return nil, err + } + publicAddrFactory := func(lc *libp2p.Config) error { + lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { + if cfg.Swarm.PublicRelayAddress == "" { + return addrs + } + return append(addrs, publicAddr) + } + return nil + } + + relayHost, err := libp2p.New( + libp2p.EnableRelay(), + libp2p.EnableAutoRelay(), + publicAddrFactory, + libp2p.ChainOptions(libP2pOpts...), + libp2p.Ping(true), + libp2p.EnableNATService(), + ) + if err != nil { + return nil, err + } + return relayHost, nil + } + + opts := []libp2p.Option{ + libp2p.UserAgent("venus"), + libp2p.ChainOptions(libP2pOpts...), + libp2p.Ping(true), + libp2p.DisableRelay(), + } + + return libp2p.New(opts...) +} + +func makeDHT(ctx context.Context, h types.RawHost, config networkConfig, networkName string, bootNodes []peer.AddrInfo) (routing.Routing, error) { + mode := dht.ModeAuto + opts := []dht.Option{ + dht.Mode(mode), + dht.Datastore(config.Repo().ChainDatastore()), + dht.ProtocolPrefix(net.FilecoinDHT(networkName)), + dht.QueryFilter(dht.PublicQueryFilter), + dht.RoutingTableFilter(dht.PublicRoutingTableFilter), + dht.DisableProviders(), //do not add dht bootstrap.make the peer-mgr unable to work + dht.DisableValues(), + } + r, err := dht.New( + ctx, h, opts..., + ) + if err != nil { + return nil, errors.Wrap(err, "failed to setup routing") + } + + return r, nil +} + +func routedHost(rh types.RawHost, r routing.Routing) host.Host { + return routedhost.Wrap(rh, r) +} + +func makeSmuxTransportOption() libp2p.Option { + const yamuxID = "/yamux/1.0.0" + + ymxtpt := *yamux.DefaultTransport + ymxtpt.AcceptBacklog = 512 + + if os.Getenv("YAMUX_DEBUG") != "" { + ymxtpt.LogOutput = os.Stderr + } + + return libp2p.Muxer(yamuxID, &ymxtpt) +} + +func HashMsgId(m *pubsub_pb.Message) string { + hash := blake2b.Sum256(m.Data) + return string(hash[:]) +} diff --git a/app/submodule/paych/paych_api.go b/app/submodule/paych/paych_api.go new file mode 100644 index 0000000000..f91cda8e5a --- /dev/null +++ b/app/submodule/paych/paych_api.go @@ -0,0 +1,178 @@ +package paych + +import ( + "context" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/venus/pkg/paychmgr" +) + +type PaychAPI struct { //nolint + paychMgr *paychmgr.Manager +} + +func NewPaychAPI(p *paychmgr.Manager) *PaychAPI { + return &PaychAPI{p} +} + +func (a *PaychAPI) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt, opts types.PaychGetOpts) (*types.ChannelInfo, error) { + ch, mcid, err := a.paychMgr.GetPaych(ctx, from, to, amt, paychmgr.GetOpts{ + Reserve: true, + OffChain: opts.OffChain, + }) + if err != nil { + return nil, err + } + + return &types.ChannelInfo{ + Channel: ch, + WaitSentinel: mcid, + }, nil +} + +func (a *PaychAPI) PaychFund(ctx context.Context, from, to address.Address, amt types.BigInt) (*types.ChannelInfo, error) { + ch, mcid, err := a.paychMgr.GetPaych(ctx, from, to, amt, paychmgr.GetOpts{ + Reserve: false, + OffChain: false, + }) + if err != nil { + return nil, err + } + + return &types.ChannelInfo{ + Channel: ch, + WaitSentinel: mcid, + }, nil +} + +func (a *PaychAPI) PaychAvailableFunds(ctx context.Context, ch address.Address) (*types.ChannelAvailableFunds, error) { + return a.paychMgr.AvailableFunds(ctx, ch) +} + +func (a *PaychAPI) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*types.ChannelAvailableFunds, error) { + return a.paychMgr.AvailableFundsByFromTo(ctx, from, to) +} + +func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) { + return a.paychMgr.GetPaychWaitReady(ctx, sentinel) +} + +func (a *PaychAPI) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + return a.paychMgr.AllocateLane(ctx, ch) +} + +func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []types.VoucherSpec) (*types.PaymentInfo, error) { + amount := vouchers[len(vouchers)-1].Amount + + // TODO: Fix free fund tracking in PaychGet + // TODO: validate voucher spec before locking funds + ch, err := a.PaychGet(ctx, from, to, amount, types.PaychGetOpts{OffChain: false}) + if err != nil { + return nil, err + } + + lane, err := a.paychMgr.AllocateLane(ctx, ch.Channel) + if err != nil { + return nil, err + } + + svs := make([]*paych.SignedVoucher, len(vouchers)) + + for i, v := range vouchers { + sv, err := a.paychMgr.CreateVoucher(ctx, ch.Channel, paych.SignedVoucher{ + Amount: v.Amount, + Lane: lane, + + Extra: v.Extra, + TimeLockMin: v.TimeLockMin, + TimeLockMax: v.TimeLockMax, + MinSettleHeight: v.MinSettle, + }) + if err != nil { + return nil, err + } + if sv.Voucher == nil { + return nil, fmt.Errorf("could not create voucher - shortfall of %d", sv.Shortfall) + } + + svs[i] = sv.Voucher + } + + return &types.PaymentInfo{ + Channel: ch.Channel, + WaitSentinel: ch.WaitSentinel, + Vouchers: svs, + }, nil +} + +func (a *PaychAPI) PaychList(ctx context.Context) ([]address.Address, error) { + return a.paychMgr.ListChannels(ctx) +} + +func (a *PaychAPI) PaychStatus(ctx context.Context, pch address.Address) (*types.Status, error) { + ci, err := a.paychMgr.GetChannelInfo(ctx, pch) + if err != nil { + return nil, err + } + return &types.Status{ + ControlAddr: ci.Control, + Direction: types.PCHDir(ci.Direction), + }, nil +} + +func (a *PaychAPI) PaychSettle(ctx context.Context, addr address.Address) (cid.Cid, error) { + return a.paychMgr.Settle(ctx, addr) +} + +func (a *PaychAPI) PaychCollect(ctx context.Context, addr address.Address) (cid.Cid, error) { + return a.paychMgr.Collect(ctx, addr) +} + +func (a *PaychAPI) PaychVoucherCheckValid(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) error { + return a.paychMgr.CheckVoucherValid(ctx, ch, sv) +} + +func (a *PaychAPI) PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) { + return a.paychMgr.CheckVoucherSpendable(ctx, ch, sv, secret, proof) +} + +func (a *PaychAPI) PaychVoucherAdd(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, proof []byte, minDelta big.Int) (big.Int, error) { + return a.paychMgr.AddVoucherInbound(ctx, ch, sv, proof, minDelta) +} + +// PaychVoucherCreate creates a new signed voucher on the given payment channel +// with the given lane and amount. The value passed in is exactly the value +// that will be used to create the voucher, so if previous vouchers exist, the +// actual additional value of this voucher will only be the difference between +// the two. +// If there are insufficient funds in the channel to create the voucher, +// returns a nil voucher and the shortfall. +func (a *PaychAPI) PaychVoucherCreate(ctx context.Context, pch address.Address, amt big.Int, lane uint64) (*types.VoucherCreateResult, error) { + return a.paychMgr.CreateVoucher(ctx, pch, paych.SignedVoucher{Amount: amt, Lane: lane}) +} + +func (a *PaychAPI) PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) { + vi, err := a.paychMgr.ListVouchers(ctx, pch) + if err != nil { + return nil, err + } + + out := make([]*paych.SignedVoucher, len(vi)) + for k, v := range vi { + out[k] = v.Voucher + } + + return out, nil +} + +func (a *PaychAPI) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) { + return a.paychMgr.SubmitVoucher(ctx, ch, sv, secret, proof) +} diff --git a/app/submodule/paych/paych_submodule.go b/app/submodule/paych/paych_submodule.go new file mode 100644 index 0000000000..67b937588f --- /dev/null +++ b/app/submodule/paych/paych_submodule.go @@ -0,0 +1,40 @@ +package paych + +import ( + "context" + + "github.com/ipfs/go-datastore" + + v0api2 "github.com/filecoin-project/venus/app/submodule/paych/v0api" + "github.com/filecoin-project/venus/pkg/paychmgr" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +// PaychSubmodule support paych related functions, including paych construction, extraction, query and other functions +type PaychSubmodule struct { //nolint + pmgr *paychmgr.Manager +} + +// PaychSubmodule enhances the `Node` with paych capabilities. +func NewPaychSubmodule(ctx context.Context, ds datastore.Batching, params *paychmgr.ManagerParams) (*PaychSubmodule, error) { + mgr, err := paychmgr.NewManager(ctx, ds, params) + return &PaychSubmodule{mgr}, err +} + +func (ps *PaychSubmodule) Start(ctx context.Context) error { + return ps.pmgr.Start(ctx) +} + +func (ps *PaychSubmodule) Stop() { + ps.pmgr.Stop() +} + +// API create a new paych implement +func (ps *PaychSubmodule) API() v1api.IPaychan { + return NewPaychAPI(ps.pmgr) +} + +func (ps *PaychSubmodule) V0API() v0api.IPaychan { + return &v0api2.WrapperV1IPaych{IPaychan: ps.API()} +} diff --git a/app/submodule/paych/v0api/v1_wrapper.go b/app/submodule/paych/v0api/v1_wrapper.go new file mode 100644 index 0000000000..b84f87936d --- /dev/null +++ b/app/submodule/paych/v0api/v1_wrapper.go @@ -0,0 +1,21 @@ +package v0api + +import ( + "context" + + "github.com/filecoin-project/go-address" + + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type WrapperV1IPaych struct { + v1api.IPaychan +} + +func (w *WrapperV1IPaych) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*types.ChannelInfo, error) { + return w.PaychFund(ctx, from, to, amt) +} + +var _ v0api.IPaychan = &WrapperV1IPaych{} diff --git a/app/submodule/storagenetworking/storage_networking_api.go b/app/submodule/storagenetworking/storage_networking_api.go new file mode 100644 index 0000000000..f1a9f4f018 --- /dev/null +++ b/app/submodule/storagenetworking/storage_networking_api.go @@ -0,0 +1,9 @@ +package storagenetworking + +type IStorageNetworking interface{} + +var _ IStorageNetworking = &storageNetworkingAPI{} + +type storageNetworkingAPI struct { //nolint + storageNetworking *StorageNetworkingSubmodule +} diff --git a/app/submodule/storagenetworking/storage_networking_submodule.go b/app/submodule/storagenetworking/storage_networking_submodule.go new file mode 100644 index 0000000000..bcb9db2d15 --- /dev/null +++ b/app/submodule/storagenetworking/storage_networking_submodule.go @@ -0,0 +1,31 @@ +package storagenetworking + +import ( + "context" + + "github.com/filecoin-project/venus/app/submodule/network" + + exchange "github.com/ipfs/go-ipfs-exchange-interface" +) + +// StorageNetworkingSubmodule enhances the `Node` with data transfer capabilities. +type StorageNetworkingSubmodule struct { //nolint + // Exchange is the interface for fetching data from other nodes. + Exchange exchange.Interface +} + +// NewStorgeNetworkingSubmodule creates a new storage networking submodule. +func NewStorgeNetworkingSubmodule(ctx context.Context, network *network.NetworkSubmodule) (*StorageNetworkingSubmodule, error) { + return &StorageNetworkingSubmodule{ + Exchange: network.Bitswap, + }, nil +} + +// API create a new storage implement +func (storageNetworking *StorageNetworkingSubmodule) API() IStorageNetworking { + return &storageNetworkingAPI{storageNetworking: storageNetworking} +} + +func (storageNetworking *StorageNetworkingSubmodule) V0API() IStorageNetworking { + return &storageNetworkingAPI{storageNetworking: storageNetworking} +} diff --git a/app/submodule/syncer/chain_sync.go b/app/submodule/syncer/chain_sync.go new file mode 100644 index 0000000000..2f4923c4ac --- /dev/null +++ b/app/submodule/syncer/chain_sync.go @@ -0,0 +1,37 @@ +package syncer + +import ( + "time" + + "github.com/filecoin-project/venus/pkg/chainsync" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type chainSync interface { + BlockProposer() chainsync.BlockProposer +} + +// ChainSyncProvider provides access to chain sync operations and their status. +type ChainSyncProvider struct { + sync chainSync +} + +// NewChainSyncProvider returns a new ChainSyncProvider. +func NewChainSyncProvider(chainSyncer chainSync) *ChainSyncProvider { + return &ChainSyncProvider{ + sync: chainSyncer, + } +} + +// HandleNewTipSet extends the Syncer's chain store with the given tipset if they +// represent a valid extension. It limits the length of new chains it will +// attempt to validate and caches invalid blocks it has encountered to +// help prevent DOS. +func (chs *ChainSyncProvider) HandleNewTipSet(ci *types.ChainInfo) error { + return chs.sync.BlockProposer().SendOwnBlock(ci) +} + +const ( + incomeBlockLargeDelayDuration = time.Second * 5 + slowFetchMessageDuration = time.Second * 3 +) diff --git a/app/submodule/syncer/syncer_api.go b/app/submodule/syncer/syncer_api.go new file mode 100644 index 0000000000..2876013ca8 --- /dev/null +++ b/app/submodule/syncer/syncer_api.go @@ -0,0 +1,206 @@ +package syncer + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/filecoin-project/go-state-types/big" + syncTypes "github.com/filecoin-project/venus/pkg/chainsync/types" + "github.com/filecoin-project/venus/pkg/fvm" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + logging "github.com/ipfs/go-log/v2" +) + +var syncAPILog = logging.Logger("syncAPI") + +var _ v1api.ISyncer = &syncerAPI{} + +type syncerAPI struct { //nolint + syncer *SyncerSubmodule +} + +// SyncerTracker returns the TargetTracker of syncing. +func (sa *syncerAPI) SyncerTracker(ctx context.Context) *types.TargetTracker { + tracker := sa.syncer.ChainSyncManager.BlockProposer().SyncTracker() + tt := &types.TargetTracker{ + History: make([]*types.Target, 0), + Buckets: make([]*types.Target, 0), + } + convertTarget := func(src *syncTypes.Target) *types.Target { + return &types.Target{ + State: convertSyncStateStage(src.State), + Base: src.Base, + Current: src.Current, + Start: src.Start, + End: src.End, + Err: src.Err, + ChainInfo: src.ChainInfo, + } + } + for _, target := range tracker.History() { + tt.History = append(tt.History, convertTarget(target)) + } + for _, target := range tracker.Buckets() { + tt.Buckets = append(tt.Buckets, convertTarget(target)) + } + + return tt +} + +func convertSyncStateStage(srtState syncTypes.SyncStateStage) types.SyncStateStage { + var state types.SyncStateStage + switch srtState { + case syncTypes.StageIdle: + state = types.StageIdle + case syncTypes.StageSyncErrored: + state = types.StageSyncErrored + case syncTypes.StageSyncComplete: + state = types.StageSyncComplete + case syncTypes.StateInSyncing: + state = types.StageMessages + } + + return state +} + +// SetConcurrent set the syncer worker(go-routine) number of chain syncing +func (sa *syncerAPI) SetConcurrent(ctx context.Context, concurrent int64) error { + sa.syncer.ChainSyncManager.BlockProposer().SetConcurrent(concurrent) + return nil +} + +// Concurrent get the syncer worker(go-routine) number of chain syncing. +func (sa *syncerAPI) Concurrent(ctx context.Context) int64 { + return sa.syncer.ChainSyncManager.BlockProposer().Concurrent() +} + +// ChainTipSetWeight computes weight for the specified tipset. +func (sa *syncerAPI) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (big.Int, error) { + ts, err := sa.syncer.ChainModule.ChainReader.GetTipSet(ctx, tsk) + if err != nil { + return big.Int{}, err + } + return sa.syncer.ChainSelector.Weight(ctx, ts) +} + +// ChainSyncHandleNewTipSet submits a chain head to the syncer for processing. +func (sa *syncerAPI) ChainSyncHandleNewTipSet(ctx context.Context, ci *types.ChainInfo) error { + return sa.syncer.SyncProvider.HandleNewTipSet(ci) +} + +// SyncSubmitBlock can be used to submit a newly created block to the. +// network through this node +func (sa *syncerAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error { + // todo many dot. how to get directly + chainModule := sa.syncer.ChainModule + parent, err := chainModule.ChainReader.GetBlock(ctx, blk.Header.Parents[0]) + if err != nil { + return fmt.Errorf("loading parent block: %v", err) + } + + if err := sa.syncer.SlashFilter.MinedBlock(ctx, blk.Header, parent.Height); err != nil { + log.Errorf(" SLASH FILTER ERROR: %s", err) + return fmt.Errorf(" SLASH FILTER ERROR: %v", err) + } + + // TODO: should we have some sort of fast path to adding a local block? + bmsgs, err := chainModule.MessageStore.LoadUnsignedMessagesFromCids(ctx, blk.BlsMessages) + if err != nil { + return fmt.Errorf("failed to load bls messages: %v", err) + } + smsgs, err := chainModule.MessageStore.LoadSignedMessagesFromCids(ctx, blk.SecpkMessages) + if err != nil { + return fmt.Errorf("failed to load secpk message: %v", err) + } + + fb := &types.FullBlock{ + Header: blk.Header, + BLSMessages: bmsgs, + SECPMessages: smsgs, + } + + if err := sa.syncer.BlockValidator.ValidateMsgMeta(ctx, fb); err != nil { + return fmt.Errorf("provided messages did not match block: %v", err) + } + + ts, err := types.NewTipSet([]*types.BlockHeader{blk.Header}) + if err != nil { + return fmt.Errorf("somehow failed to make a tipset out of a single block: %v", err) + } + + if _, err := chainModule.ChainReader.PutObject(ctx, blk.Header); err != nil { + return err + } + localPeer := sa.syncer.NetworkModule.Network.GetPeerID() + ci := types.NewChainInfo(localPeer, localPeer, ts) + if err := sa.syncer.SyncProvider.HandleNewTipSet(ci); err != nil { + return fmt.Errorf("sync to submitted block failed: %v", err) + } + + b, err := blk.Serialize() + if err != nil { + return fmt.Errorf("serializing block for pubsub publishing failed: %v", err) + } + go func() { + tCtx, tCancel := context.WithTimeout(context.TODO(), time.Minute) + defer tCancel() + err = sa.syncer.BlockTopic.Publish(tCtx, b) //nolint:staticcheck + if err != nil { + syncAPILog.Warnf("publish block failed: %s, %v", blk.Cid(), err) + } + }() + return nil +} + +// SyncState just compatible code lotus +func (sa *syncerAPI) SyncState(ctx context.Context) (*types.SyncState, error) { + tracker := sa.syncer.ChainSyncManager.BlockProposer().SyncTracker() + tracker.History() + + syncState := &types.SyncState{ + VMApplied: atomic.LoadUint64(&fvm.StatApplied), + } + + count := 0 + toActiveSync := func(t *syncTypes.Target) types.ActiveSync { + currentHeight := t.Base.Height() + if t.Current != nil { + currentHeight = t.Current.Height() + } + + msg := "" + if t.Err != nil { + msg = t.Err.Error() + } + count++ + + activeSync := types.ActiveSync{ + WorkerID: uint64(count), + Base: t.Base, + Target: t.Head, + Stage: convertSyncStateStage(t.State), + Height: currentHeight, + Start: t.Start, + End: t.End, + Message: msg, + } + return activeSync + } + // current + for _, t := range tracker.Buckets() { + if t.State != syncTypes.StageSyncErrored { + syncState.ActiveSyncs = append(syncState.ActiveSyncs, toActiveSync(t)) + } + } + // history + for _, t := range tracker.History() { + if t.State != syncTypes.StageSyncErrored { + syncState.ActiveSyncs = append(syncState.ActiveSyncs, toActiveSync(t)) + } + } + + return syncState, nil +} diff --git a/app/submodule/syncer/syncer_submodule.go b/app/submodule/syncer/syncer_submodule.go new file mode 100644 index 0000000000..f0736a07c9 --- /dev/null +++ b/app/submodule/syncer/syncer_submodule.go @@ -0,0 +1,294 @@ +package syncer + +import ( + "bytes" + "context" + "reflect" + "runtime" + "time" + + chain2 "github.com/filecoin-project/venus/app/submodule/chain" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + cbor "github.com/ipfs/go-ipld-cbor" + + fbig "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/pkg/errors" + "go.opencensus.io/trace" + + "github.com/filecoin-project/venus/app/submodule/blockstore" + "github.com/filecoin-project/venus/app/submodule/network" + "github.com/filecoin-project/venus/pkg/beacon" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/chainsync" + "github.com/filecoin-project/venus/pkg/chainsync/slashfilter" + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/net/blocksub" + "github.com/filecoin-project/venus/pkg/net/pubsub" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-blockservice" +) + +var log = logging.Logger("sync.module") // nolint: deadcode + +// SyncerSubmodule enhances the node with chain syncing capabilities +type SyncerSubmodule struct { //nolint + BlockstoreModule *blockstore.BlockstoreSubmodule + ChainModule *chain2.ChainSubmodule + NetworkModule *network.NetworkSubmodule + + // todo: use the 'Topic' and 'Subscription' defined in + // "github.com/libp2p/go-libp2p-pubsub" replace which defined in + // 'venus/pkg/net/pubsub/topic.go' + BlockTopic *pubsub.Topic + BlockSub pubsub.Subscription + ChainSelector nodeChainSelector + Stmgr *statemanger.Stmgr + ChainSyncManager *chainsync.Manager + Drand beacon.Schedule + SyncProvider ChainSyncProvider + SlashFilter slashfilter.ISlashFilter + BlockValidator *consensus.BlockValidator + + // cancelChainSync cancels the context for chain sync subscriptions and handlers. + CancelChainSync context.CancelFunc +} + +type syncerConfig interface { + GenesisCid() cid.Cid + BlockTime() time.Duration + ChainClock() clock.ChainEpochClock + Repo() repo.Repo + Verifier() ffiwrapper.Verifier +} + +type nodeChainSelector interface { + Weight(context.Context, *types.TipSet) (fbig.Int, error) + IsHeavier(ctx context.Context, a, b *types.TipSet) (bool, error) +} + +// NewSyncerSubmodule creates a new chain submodule. +func NewSyncerSubmodule(ctx context.Context, + config syncerConfig, + blockstore *blockstore.BlockstoreSubmodule, + network *network.NetworkSubmodule, + chn *chain2.ChainSubmodule, + circulatingSupplyCalculator chain.ICirculatingSupplyCalcualtor, +) (*SyncerSubmodule, error) { + // setup validation + gasPriceSchedule := gas.NewPricesSchedule(config.Repo().Config().NetworkParams.ForkUpgradeParam) + + tickets := consensus.NewTicketMachine(chn.ChainReader) + cborStore := cbor.NewCborStore(config.Repo().Datastore()) + stateViewer := consensus.AsDefaultStateViewer(state.NewViewer(cborStore)) + nodeChainSelector := consensus.NewChainSelector(cborStore, &stateViewer) + + blkValid := consensus.NewBlockValidator(tickets, + blockstore.Blockstore, + chn.MessageStore, + chn.Drand, + cborStore, + config.Verifier(), + &stateViewer, + chn.ChainReader, + nodeChainSelector, + chn.Fork, + config.Repo().Config().NetworkParams, + gasPriceSchedule) + + // register block validation on pubsub + btv := blocksub.NewBlockTopicValidator(blkValid) + if err := network.Pubsub.RegisterTopicValidator(btv.Topic(network.NetworkName), btv.Validator(), btv.Opts()...); err != nil { + return nil, errors.Wrap(err, "failed to register block validator") + } + + rnd := chn.API() + nodeConsensus := consensus.NewExpected(cborStore, + blockstore.Blockstore, + chn.ChainReader, + rnd, + chn.MessageStore, + chn.Fork, + gasPriceSchedule, + blkValid, + chn.SystemCall, + circulatingSupplyCalculator, + ) + + stmgr := statemanger.NewStateManger(chn.ChainReader, nodeConsensus, rnd, + chn.Fork, gasPriceSchedule, chn.SystemCall) + + blkValid.Stmgr = stmgr + chn.Stmgr = stmgr + chn.Waiter.Stmgr = stmgr + + chainSyncManager, err := chainsync.NewManager(stmgr, blkValid, chn, nodeChainSelector, + blockstore.Blockstore, network.ExchangeClient, config.ChainClock(), chn.Fork) + if err != nil { + return nil, err + } + + var slashFilter slashfilter.ISlashFilter + if config.Repo().Config().SlashFilterDs.Type == "local" { + slashFilter = slashfilter.NewLocalSlashFilter(config.Repo().ChainDatastore()) + } else { + slashFilter, err = slashfilter.NewMysqlSlashFilter(config.Repo().Config().SlashFilterDs.MySQL) + if err != nil { + return nil, err + } + } + + network.HelloHandler.Register(func(ci *types.ChainInfo) { + err := chainSyncManager.BlockProposer().SendHello(ci) + if err != nil { + log.Errorf("error receiving chain info from hello %s: %s", ci, err) + return + } + }) + + return &SyncerSubmodule{ + Stmgr: stmgr, + BlockstoreModule: blockstore, + ChainModule: chn, + NetworkModule: network, + SlashFilter: slashFilter, + ChainSelector: nodeChainSelector, + ChainSyncManager: &chainSyncManager, + Drand: chn.Drand, + SyncProvider: *NewChainSyncProvider(&chainSyncManager), + BlockValidator: blkValid, + }, nil +} + +func (syncer *SyncerSubmodule) handleIncomingBlocks(ctx context.Context, msg pubsub.Message) error { + sender := msg.GetSender() + source := msg.GetSource() + // ignore messages from self + if sender == syncer.NetworkModule.Host.ID() || source == syncer.NetworkModule.Host.ID() { + return nil + } + + ctx, span := trace.StartSpan(ctx, "Node.handleIncomingBlocks") + + var bm types.BlockMsg + err := bm.UnmarshalCBOR(bytes.NewReader(msg.GetData())) + if err != nil { + return errors.Wrapf(err, "failed to decode blocksub payload from source: %s, sender: %s", source, sender) + } + + header := bm.Header + span.AddAttributes(trace.StringAttribute("block", header.Cid().String())) + + log.Infof("received new block %s height %d from peer %s age %v", header.Cid(), header.Height, sender, time.Since(time.Unix(int64(header.Timestamp), 0))) + + _, err = syncer.ChainModule.ChainReader.PutObject(ctx, bm.Header) + if err != nil { + log.Errorf("failed to save block %s", err) + } + go func() { + start := time.Now() + + if delay := time.Since(time.Unix(int64(bm.Header.Timestamp), 0)); delay > incomeBlockLargeDelayDuration { + log.Warnf("received block(%d, %s) with large delay : %s", + bm.Header.Height, bm.Header.Cid(), delay.String()) + } + + blkSvc := blockservice.New(syncer.BlockstoreModule.Blockstore, syncer.NetworkModule.Bitswap) + + if _, err := syncer.NetworkModule.FetchMessagesByCids(ctx, blkSvc, bm.BlsMessages); err != nil { + log.Errorf("fetch block bls messages failed:%s", err.Error()) + return + } + if _, err := syncer.NetworkModule.FetchSignedMessagesByCids(ctx, blkSvc, bm.SecpkMessages); err != nil { + log.Errorf("fetch block signed messages failed:%s", err.Error()) + return + } + + if cost := time.Since(start); cost > slowFetchMessageDuration { + log.Warnw("fetch message slow", "block", bm.Header.Cid().String(), "height", bm.Header.Height, "took", cost) + } else { + log.Debugw("fetch message", "block", bm.Header.Cid().String(), "height", bm.Header.Height, "took", cost) + } + + syncer.NetworkModule.Host.ConnManager().TagPeer(sender, "new-block", 20) + + ts, _ := types.NewTipSet([]*types.BlockHeader{header}) + chainInfo := types.NewChainInfo(source, sender, ts) + + if err = syncer.ChainSyncManager.BlockProposer().SendGossipBlock(chainInfo); err != nil { + log.Errorf("failed to notify syncer of new block, block: %s", err) + } + }() + return nil +} + +// Start starts the syncer submodule for a node. +func (syncer *SyncerSubmodule) Start(ctx context.Context) error { + // setup topic + topic, err := syncer.NetworkModule.Pubsub.Join(types.BlockTopic(syncer.NetworkModule.NetworkName)) + if err != nil { + return err + } + syncer.BlockTopic = pubsub.NewTopic(topic) + + syncer.BlockSub, err = syncer.BlockTopic.Subscribe() + if err != nil { + return errors.Wrapf(err, "failed to subscribe block topic") + } + + // process incoming blocks + go func() { + for { + received, err := syncer.BlockSub.Next(ctx) + if err != nil { + if ctx.Err() != context.Canceled { + log.Errorf("error reading message from topic %s: %s", syncer.BlockSub.Topic(), err) + } + return + } + + if err := syncer.handleIncomingBlocks(ctx, received); err != nil { + handlerName := runtime.FuncForPC(reflect.ValueOf(syncer.handleIncomingBlocks).Pointer()).Name() + if err != context.Canceled { + log.Debugf("error in handler %s for topic %s: %s", handlerName, syncer.BlockSub.Topic(), err) + } + } + } + }() + + err = syncer.ChainModule.Start(ctx) + if err != nil { + return err + } + + return syncer.ChainSyncManager.Start(ctx) +} + +func (syncer *SyncerSubmodule) Stop(ctx context.Context) { + if syncer.CancelChainSync != nil { + syncer.CancelChainSync() + } + if syncer.BlockSub != nil { + syncer.BlockSub.Cancel() + } + if syncer.Stmgr != nil { + syncer.Stmgr.Close(ctx) + } +} + +// API create a new sync api implement +func (syncer *SyncerSubmodule) API() v1api.ISyncer { + return &syncerAPI{syncer: syncer} +} + +func (syncer *SyncerSubmodule) V0API() v0api.ISyncer { + return &syncerAPI{syncer: syncer} +} diff --git a/app/submodule/wallet/remotewallet/api.go b/app/submodule/wallet/remotewallet/api.go new file mode 100644 index 0000000000..3d722f2456 --- /dev/null +++ b/app/submodule/wallet/remotewallet/api.go @@ -0,0 +1,82 @@ +// Code from github.com/filecoin-project/venus-wallet/storage/wallet/wallet.go & api/api_wallet.go & api/remotecli/cli.go . DO NOT EDIT. + +package remotewallet + +import ( + "context" + "net/http" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/wallet" +) + +type IWallet interface { + WalletNew(context.Context, types.KeyType) (address.Address, error) + WalletHas(ctx context.Context, address address.Address) (bool, error) + WalletList(ctx context.Context) ([]address.Address, error) + WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta types.MsgMeta) (*wallet.Signature, error) + WalletExport(ctx context.Context, addr address.Address) (*types.KeyInfo, error) + WalletImport(context.Context, *types.KeyInfo) (address.Address, error) + WalletDelete(context.Context, address.Address) error +} + +var _ IWallet = &WalletAPIAdapter{} + +// wallet API permissions constraints +type WalletAPIAdapter struct { + Internal struct { + WalletNew func(ctx context.Context, kt types.KeyType) (address.Address, error) `perm:"admin"` + WalletHas func(ctx context.Context, address address.Address) (bool, error) `perm:"write"` + WalletList func(ctx context.Context) ([]address.Address, error) `perm:"write"` + WalletSign func(ctx context.Context, signer address.Address, toSign []byte, meta types.MsgMeta) (*wallet.Signature, error) `perm:"sign"` + WalletExport func(ctx context.Context, addr address.Address) (*types.KeyInfo, error) `perm:"admin"` + WalletImport func(ctx context.Context, ki *types.KeyInfo) (address.Address, error) `perm:"admin"` + WalletDelete func(ctx context.Context, addr address.Address) error `perm:"admin"` + } +} + +func (c *WalletAPIAdapter) WalletNew(ctx context.Context, keyType types.KeyType) (address.Address, error) { + return c.Internal.WalletNew(ctx, keyType) +} + +func (c *WalletAPIAdapter) WalletHas(ctx context.Context, addr address.Address) (bool, error) { + return c.Internal.WalletHas(ctx, addr) +} + +func (c *WalletAPIAdapter) WalletList(ctx context.Context) ([]address.Address, error) { + return c.Internal.WalletList(ctx) +} + +func (c *WalletAPIAdapter) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta types.MsgMeta) (*wallet.Signature, error) { + return c.Internal.WalletSign(ctx, signer, toSign, meta) +} + +func (c *WalletAPIAdapter) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) { + return c.Internal.WalletExport(ctx, a) +} + +func (c *WalletAPIAdapter) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) { + return c.Internal.WalletImport(ctx, ki) +} + +func (c *WalletAPIAdapter) WalletDelete(ctx context.Context, addr address.Address) error { + return c.Internal.WalletDelete(ctx, addr) +} + +// NewWalletRPC RPCClient returns an RPC client connected to a node +// @addr reference ./httpparse/ParseApiInfo() +// @requestHeader reference ./httpparse/ParseApiInfo() +func NewWalletRPC(ctx context.Context, addr string, requestHeader http.Header) (IWallet, jsonrpc.ClientCloser, error) { + var res WalletAPIAdapter + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", + []interface{}{ + &res.Internal, + }, + requestHeader, + ) + return &res, closer, err +} diff --git a/app/submodule/wallet/remotewallet/keymap.go b/app/submodule/wallet/remotewallet/keymap.go new file mode 100644 index 0000000000..3dd67deee1 --- /dev/null +++ b/app/submodule/wallet/remotewallet/keymap.go @@ -0,0 +1,36 @@ +package remotewallet + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var keyMapper = map[address.Protocol]types.KeyType{ + address.SECP256K1: types.KTSecp256k1, + address.BLS: types.KTBLS, +} + +func GetKeyType(p address.Protocol) types.KeyType { + k, ok := keyMapper[p] + if ok { + return k + } + return types.KTUnknown +} + +func ConvertRemoteKeyInfo(key *crypto.KeyInfo) *types.KeyInfo { + return &types.KeyInfo{ + PrivateKey: key.Key(), + Type: types.SignType2Key(key.SigType), + } +} + +func ConvertLocalKeyInfo(key *types.KeyInfo) *crypto.KeyInfo { + ki := &crypto.KeyInfo{ + SigType: types.KeyType2Sign(key.Type), + } + ki.SetPrivateKey(key.PrivateKey) + + return ki +} diff --git a/app/submodule/wallet/remotewallet/parse.go b/app/submodule/wallet/remotewallet/parse.go new file mode 100644 index 0000000000..3418c59f0d --- /dev/null +++ b/app/submodule/wallet/remotewallet/parse.go @@ -0,0 +1,51 @@ +package remotewallet + +import ( + "net/http" + "regexp" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +var ( + regJWTToken = regexp.MustCompile(`[a-zA-Z0-9\-_]{5,}\.[a-zA-Z0-9\-_]{5,}\.[a-zA-Z0-9\-_]{5,}`) + regUUID = regexp.MustCompile(`[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}`) + regIPv4 = regexp.MustCompile(`/ip4/(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/tcp/[0-9]{4,5}/http`) +) + +const ( + ServiceToken = "Authorization" + WalletStrategyToken = "StrategyToken" +) + +// APIInfo parse URL string to +type APIInfo struct { + Addr string + Token []byte + StrategyToken []byte +} + +func ParseAPIInfo(s string) (*APIInfo, error) { + token := []byte(regJWTToken.FindString(s)) + strategyToken := []byte(regUUID.FindString(s)) + addr := regIPv4.FindString(s) + return &APIInfo{ + Addr: addr, + Token: token, + StrategyToken: strategyToken, + }, nil +} + +func (a APIInfo) DialArgs() (string, error) { + return api.DialArgs(a.Addr, "v0") +} + +func (a APIInfo) AuthHeader() http.Header { + if len(a.Token) != 0 { + headers := http.Header{} + headers.Add(ServiceToken, "Bearer "+string(a.Token)) + headers.Add(WalletStrategyToken, string(a.StrategyToken)) + return headers + } + return nil +} diff --git a/app/submodule/wallet/remotewallet/remote.go b/app/submodule/wallet/remotewallet/remote.go new file mode 100644 index 0000000000..2e9df35b6d --- /dev/null +++ b/app/submodule/wallet/remotewallet/remote.go @@ -0,0 +1,83 @@ +package remotewallet + +import ( + "context" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/wallet" +) + +var _ wallet.WalletIntersection = &remoteWallet{} + +type remoteWallet struct { + IWallet + Cancel func() +} + +func (w *remoteWallet) Addresses(ctx context.Context) []address.Address { + wallets, err := w.IWallet.WalletList(ctx) + if err != nil { + return make([]address.Address, 0) + } + return wallets +} + +func (w *remoteWallet) HasPassword(ctx context.Context) bool { + return true +} + +func SetupRemoteWallet(info string) (wallet.WalletIntersection, error) { + ai, err := ParseAPIInfo(info) + if err != nil { + return nil, err + } + url, err := ai.DialArgs() + if err != nil { + return nil, err + } + wapi, closer, err := NewWalletRPC(context.Background(), url, ai.AuthHeader()) + if err != nil { + return nil, fmt.Errorf("creating jsonrpc client: %w", err) + } + return &remoteWallet{ + IWallet: wapi, + Cancel: closer, + }, nil +} + +func (w *remoteWallet) HasAddress(ctx context.Context, addr address.Address) bool { + exist, err := w.IWallet.WalletHas(ctx, addr) + if err != nil { + return false + } + return exist +} + +func (w *remoteWallet) NewAddress(ctx context.Context, protocol address.Protocol) (address.Address, error) { + return w.IWallet.WalletNew(ctx, GetKeyType(protocol)) +} + +func (w *remoteWallet) DeleteAddress(ctx context.Context, addr address.Address) error { + return w.IWallet.WalletDelete(ctx, addr) +} + +func (w *remoteWallet) Import(ctx context.Context, key *crypto.KeyInfo) (address.Address, error) { + return w.IWallet.WalletImport(ctx, ConvertRemoteKeyInfo(key)) +} + +func (w *remoteWallet) Export(ctx context.Context, addr address.Address, password string) (*crypto.KeyInfo, error) { + key, err := w.IWallet.WalletExport(ctx, addr) + if err != nil { + return nil, err + } + return ConvertLocalKeyInfo(key), nil +} + +func (w *remoteWallet) WalletSign(ctx context.Context, keyAddr address.Address, msg []byte, meta types.MsgMeta) (*crypto.Signature, error) { + return w.IWallet.WalletSign(ctx, keyAddr, msg, meta) +} diff --git a/app/submodule/wallet/wallet_api.go b/app/submodule/wallet/wallet_api.go new file mode 100644 index 0000000000..f56f0b500a --- /dev/null +++ b/app/submodule/wallet/wallet_api.go @@ -0,0 +1,165 @@ +package wallet + +import ( + "context" + "errors" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/app/submodule/wallet/remotewallet" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/wallet" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ v1api.IWallet = &WalletAPI{} + +var ErrNoDefaultFromAddress = errors.New("unable to determine a default walletModule address") + +type WalletAPI struct { // nolint + walletModule *WalletSubmodule + adapter wallet.WalletIntersection +} + +// WalletBalance returns the current balance of the given wallet address. +func (walletAPI *WalletAPI) WalletBalance(ctx context.Context, addr address.Address) (abi.TokenAmount, error) { + actor, err := walletAPI.walletModule.Chain.Stmgr.GetActorAtTsk(ctx, addr, types.EmptyTSK) + if err != nil { + if errors.Is(err, types.ErrActorNotFound) { + return abi.NewTokenAmount(0), nil + } + return abi.NewTokenAmount(0), err + } + + return actor.Balance, nil +} + +// WalletHas indicates whether the given address is in the wallet. +func (walletAPI *WalletAPI) WalletHas(ctx context.Context, addr address.Address) (bool, error) { + return walletAPI.adapter.HasAddress(ctx, addr), nil +} + +// SetWalletDefaultAddress set the specified address as the default in the config. +func (walletAPI *WalletAPI) WalletDefaultAddress(ctx context.Context) (address.Address, error) { + ret, err := walletAPI.walletModule.Config.Get("walletModule.defaultAddress") + addr := ret.(address.Address) + if err != nil || !addr.Empty() { + return addr, err + } + + // No default is set; pick the 0th and make it the default. + if len(walletAPI.WalletAddresses(ctx)) > 0 { + addr := walletAPI.WalletAddresses(ctx)[0] + err := walletAPI.walletModule.Config.Set("walletModule.defaultAddress", addr.String()) + if err != nil { + return address.Undef, err + } + + return addr, nil + } + + return address.Undef, nil +} + +// WalletAddresses gets addresses from the walletModule +func (walletAPI *WalletAPI) WalletAddresses(ctx context.Context) []address.Address { + return walletAPI.adapter.Addresses(ctx) +} + +// SetWalletDefaultAddress set the specified address as the default in the config. +func (walletAPI *WalletAPI) WalletSetDefault(ctx context.Context, addr address.Address) error { + localAddrs := walletAPI.WalletAddresses(ctx) + for _, localAddr := range localAddrs { + if localAddr == addr { + err := walletAPI.walletModule.Config.Set("walletModule.defaultAddress", addr.String()) + if err != nil { + return err + } + return nil + } + } + return errors.New("addr not in the walletModule list") +} + +// WalletNewAddress generates a new walletModule address +func (walletAPI *WalletAPI) WalletNewAddress(ctx context.Context, protocol address.Protocol) (address.Address, error) { + return walletAPI.adapter.NewAddress(ctx, protocol) +} + +// WalletImport adds a given set of KeyInfos to the walletModule +func (walletAPI *WalletAPI) WalletImport(ctx context.Context, key *types.KeyInfo) (address.Address, error) { + addr, err := walletAPI.adapter.Import(ctx, remotewallet.ConvertLocalKeyInfo(key)) + if err != nil { + return address.Undef, err + } + return addr, nil +} + +// WalletExport returns the KeyInfos for the given walletModule addresses +func (walletAPI *WalletAPI) WalletExport(ctx context.Context, addr address.Address, password string) (*types.KeyInfo, error) { + ki, err := walletAPI.adapter.Export(ctx, addr, password) + if err != nil { + return nil, err + } + return remotewallet.ConvertRemoteKeyInfo(ki), nil +} + +// WalletDelete delete the given walletModule address +func (walletAPI *WalletAPI) WalletDelete(ctx context.Context, addr address.Address) error { + return walletAPI.adapter.DeleteAddress(ctx, addr) +} + +// WalletSign signs the given bytes using the given address. +func (walletAPI *WalletAPI) WalletSign(ctx context.Context, k address.Address, msg []byte, meta types.MsgMeta) (*crypto.Signature, error) { + keyAddr, err := walletAPI.walletModule.Chain.Stmgr.ResolveToKeyAddress(ctx, k, nil) + if err != nil { + return nil, fmt.Errorf("ResolveTokeyAddress failed:%v", err) + } + return walletAPI.adapter.WalletSign(ctx, keyAddr, msg, meta) +} + +// WalletSignMessage signs the given message using the given address. +func (walletAPI *WalletAPI) WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) { + mb, err := msg.ToStorageBlock() + if err != nil { + return nil, fmt.Errorf("serializing message: %w", err) + } + + sign, err := walletAPI.WalletSign(ctx, k, mb.Cid().Bytes(), types.MsgMeta{Type: types.MTChainMsg}) + if err != nil { + return nil, fmt.Errorf("failed to sign message: %w", err) + } + + return &types.SignedMessage{ + Message: *msg, + Signature: *sign, + }, nil +} + +// LockWallet lock wallet +func (walletAPI *WalletAPI) LockWallet(ctx context.Context) error { + return walletAPI.walletModule.Wallet.LockWallet(ctx) +} + +// UnLockWallet unlock wallet +func (walletAPI *WalletAPI) UnLockWallet(ctx context.Context, password []byte) error { + return walletAPI.walletModule.Wallet.UnLockWallet(ctx, password) +} + +// SetPassword set wallet password +func (walletAPI *WalletAPI) SetPassword(ctx context.Context, password []byte) error { + return walletAPI.walletModule.Wallet.SetPassword(ctx, password) +} + +// HasPassword return whether the wallet has password +func (walletAPI *WalletAPI) HasPassword(ctx context.Context) bool { + return walletAPI.adapter.HasPassword(ctx) +} + +// WalletState return wallet state +func (walletAPI *WalletAPI) WalletState(ctx context.Context) int { + return walletAPI.walletModule.Wallet.WalletState(ctx) +} diff --git a/app/submodule/wallet/wallet_submodule.go b/app/submodule/wallet/wallet_submodule.go new file mode 100644 index 0000000000..eb80af227a --- /dev/null +++ b/app/submodule/wallet/wallet_submodule.go @@ -0,0 +1,102 @@ +package wallet + +import ( + "context" + + v0api "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + + logging "github.com/ipfs/go-log" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/app/submodule/chain" + "github.com/filecoin-project/venus/app/submodule/config" + "github.com/filecoin-project/venus/app/submodule/wallet/remotewallet" + pconfig "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var log = logging.Logger("wallet") + +// WalletSubmodule enhances the `Node` with a "wallet" and FIL transfer capabilities. +type WalletSubmodule struct { // nolint + Chain *chain.ChainSubmodule + Wallet *wallet.Wallet + adapter wallet.WalletIntersection + Signer types.Signer + Config *config.ConfigModule +} + +type walletRepo interface { + Config() *pconfig.Config + WalletDatastore() repo.Datastore +} + +// NewWalletSubmodule creates a new storage protocol submodule. +func NewWalletSubmodule(ctx context.Context, + repo walletRepo, + cfgModule *config.ConfigModule, + chain *chain.ChainSubmodule, + password []byte, +) (*WalletSubmodule, error) { + passphraseCfg, err := getPassphraseConfig(repo.Config()) + if err != nil { + return nil, errors.Wrap(err, "failed to get passphrase config") + } + backend, err := wallet.NewDSBackend(ctx, repo.WalletDatastore(), passphraseCfg, password) + if err != nil { + return nil, errors.Wrap(err, "failed to set up walletModule backend") + } + fcWallet := wallet.New(backend) + headSigner := state.NewHeadSignView(chain.ChainReader) + + var adapter wallet.WalletIntersection + if repo.Config().Wallet.RemoteEnable { + if repo.Config().Wallet.RemoteBackend == wallet.StringEmpty { + return nil, errors.New("remote backend is empty") + } + adapter, err = remotewallet.SetupRemoteWallet(repo.Config().Wallet.RemoteBackend) + if err != nil { + return nil, errors.Wrap(err, "failed to set up remote wallet") + } + log.Info("remote wallet set up") + } else { + adapter = fcWallet + } + return &WalletSubmodule{ + Config: cfgModule, + Chain: chain, + Wallet: fcWallet, + adapter: adapter, + Signer: state.NewSigner(headSigner, fcWallet), + }, nil +} + +// API create a new wallet api implement +func (wallet *WalletSubmodule) API() v1api.IWallet { + return &WalletAPI{ + walletModule: wallet, + adapter: wallet.adapter, + } +} + +func (wallet *WalletSubmodule) V0API() v0api.IWallet { + return &WalletAPI{ + walletModule: wallet, + adapter: wallet.adapter, + } +} + +func (wallet *WalletSubmodule) WalletIntersection() wallet.WalletIntersection { + return wallet.adapter +} + +func getPassphraseConfig(cfg *pconfig.Config) (pconfig.PassphraseConfig, error) { + return pconfig.PassphraseConfig{ + ScryptN: cfg.Wallet.PassphraseConfig.ScryptN, + ScryptP: cfg.Wallet.PassphraseConfig.ScryptP, + }, nil +} diff --git a/appveyor.yml b/appveyor.yml index 8047992a75..273ec58ae7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -2,7 +2,7 @@ version: "{build}" # Source Config -clone_folder: c:\gopath\src\github.com\filecoin-project\go-filecoin +clone_folder: c:\gopath\src\github.com\filecoin-project\venus # Build host diff --git a/bin/container_daemon b/bin/container_daemon index 5de44443c0..063a7365f7 100755 --- a/bin/container_daemon +++ b/bin/container_daemon @@ -14,7 +14,7 @@ fi if [ -e "$repo/config.json" ]; then echo "Found Filecoin fs-repo at $repo" else - go-filecoin init + venus daemon # TODO configure custom API and address here fi @@ -25,4 +25,4 @@ else echo "ERROR: arguments have been set but the first argument isn't 'daemon'" >&2 fi -exec go-filecoin daemon "$@" +exec venus daemon "$@" diff --git a/bin/node_restart b/bin/node_restart index 9b828bb5f0..c6833ad1c9 100755 --- a/bin/node_restart +++ b/bin/node_restart @@ -1,7 +1,7 @@ #!/bin/sh set -e filecoin_repo="/var/local/filecoin/repo" -filecoin_exec="go-filecoin --repodir=${filecoin_repo}" +filecoin_exec="venus --repo=${filecoin_repo}" # Number of time to check before giving up limit=3600 diff --git a/build/README.md b/build/README.md deleted file mode 100644 index be4f068e7b..0000000000 --- a/build/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Build Scripts - -This package contains cross platform scripts to make development easy on all operating systems. diff --git a/build/flags/flags.go b/build/flags/flags.go deleted file mode 100644 index 7a037ffb34..0000000000 --- a/build/flags/flags.go +++ /dev/null @@ -1,7 +0,0 @@ -package flags - -// GitCommit is the current git commit, this is injected through ldflags. -var GitCommit string - -// GitRoot is the git root, this is injected through ldflags. -var GitRoot string diff --git a/build/internal/helpers/gitroot.go b/build/internal/helpers/gitroot.go deleted file mode 100644 index 494d1919c4..0000000000 --- a/build/internal/helpers/gitroot.go +++ /dev/null @@ -1,17 +0,0 @@ -package helpers - -import ( - "os/exec" - "strings" -) - -// GetGitRoot return the project root joined with any path fragments -func GetGitRoot() string { - cmd := exec.Command("git", "rev-parse", "--show-toplevel") - out, err := cmd.CombinedOutput() - if err != nil { - panic("could not find git root") - } - - return strings.Trim(string(out), "\n") -} diff --git a/build/internal/tools/README.md b/build/internal/tools/README.md deleted file mode 100644 index 9a6853306c..0000000000 --- a/build/internal/tools/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This package depends on build tools that should be fetched with `go mod`. It -should not be built or linted. diff --git a/build/internal/tools/tools.go b/build/internal/tools/tools.go deleted file mode 100644 index a5885b557d..0000000000 --- a/build/internal/tools/tools.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build tools - -// Build tools for go mod -package tools - -import ( - _ "github.com/golangci/golangci-lint/cmd/golangci-lint" - _ "github.com/jstemmer/go-junit-report" -) diff --git a/build/internal/version/version.go b/build/internal/version/version.go deleted file mode 100644 index 96bbdc97c0..0000000000 --- a/build/internal/version/version.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !go1.14 - -package version - -import ( - "strconv" - "strings" -) - -// Check ensures that we are using the correct version of go -func Check(version string) bool { - pieces := strings.Split(version, ".") - - if pieces[0] != "go1" { - return false - } - - minorVersion, _ := strconv.Atoi(pieces[1]) - - if minorVersion > 13 { - return true - } - - if minorVersion < 13 { - return false - } - - if len(pieces) < 3 { - return false - } - - patchVersion, _ := strconv.Atoi(pieces[2]) - return patchVersion >= 1 -} diff --git a/build/internal/version/version_114.go b/build/internal/version/version_114.go deleted file mode 100644 index 4902987013..0000000000 --- a/build/internal/version/version_114.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build go1.14 - -package version - -// Check ensures that we are using the correct version of go -func Check(version string) bool { - return true -} diff --git a/build/internal/version/version_test.go b/build/internal/version/version_test.go deleted file mode 100644 index a3221b0343..0000000000 --- a/build/internal/version/version_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !go1.14 - -package version_test - -import ( - "github.com/filecoin-project/go-filecoin/build/internal/version" - "testing" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/stretchr/testify/assert" -) - -func TestCheck(t *testing.T) { - tf.UnitTest(t) - - // Filecoin currently requires go >= 1.13.1 - assert.True(t, version.Check("go1.13.1")) - assert.True(t, version.Check("go1.13.2")) - - assert.False(t, version.Check("go1.12.1")) - assert.False(t, version.Check("go1.12.2")) - - assert.False(t, version.Check("go1.11")) - assert.False(t, version.Check("go1.11.1")) - assert.False(t, version.Check("go1.11.2")) - assert.False(t, version.Check("go1.10")) - assert.False(t, version.Check("go2")) -} diff --git a/build/main.go b/build/main.go deleted file mode 100644 index 18196f9d56..0000000000 --- a/build/main.go +++ /dev/null @@ -1,368 +0,0 @@ -package main - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - pf "github.com/filecoin-project/go-paramfetch" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/build/internal/helpers" - "github.com/filecoin-project/go-filecoin/build/internal/version" -) - -var lineBreak = "\n" - -func init() { - log.SetFlags(0) - if runtime.GOOS == "windows" { - lineBreak = "\r\n" - } - // We build with go modules. - if err := os.Setenv("GO111MODULE", "on"); err != nil { - fmt.Println("Failed to set GO111MODULE env") - os.Exit(1) - } -} - -// command is a structure representing a shell command to be run in the -// specified directory -type command struct { - dir string - parts []string -} - -// cmd creates a new command using the pwd and its cwd -func cmd(parts ...string) command { - return cmdWithDir("./", parts...) -} - -// cmdWithDir creates a new command using the specified directory as its cwd -func cmdWithDir(dir string, parts ...string) command { - return command{ - dir: dir, - parts: parts, - } -} - -func runCmd(c command) { - parts := c.parts - if len(parts) == 1 { - parts = strings.Split(parts[0], " ") - } - - name := strings.Join(parts, " ") - cmd := exec.Command(parts[0], parts[1:]...) // #nosec - cmd.Dir = c.dir - log.Println(name) - - stderr, err := cmd.StderrPipe() - if err != nil { - panic(err) - } - stdout, err := cmd.StdoutPipe() - if err != nil { - panic(err) - } - - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - if _, err = io.Copy(os.Stderr, stderr); err != nil { - panic(err) - } - }() - go func() { - defer wg.Done() - if _, err = io.Copy(os.Stdout, stdout); err != nil { - panic(err) - } - }() - - if err := cmd.Start(); err != nil { - panic(err) - } - - wg.Wait() - if err := cmd.Wait(); err != nil { - log.Fatalf("Command '%s' failed: %s\n", name, err) - } -} - -func runCapture(name string) string { - args := strings.Split(name, " ") - cmd := exec.Command(args[0], args[1:]...) // #nosec - log.Println(name) - - output, err := cmd.CombinedOutput() - if err != nil { - log.Fatalf("Command '%s' failed: %s\n", name, err) - } - - return strings.Trim(string(output), lineBreak) -} - -// deps installs all dependencies -func deps() { - runCmd(cmd("pkg-config --version")) - - log.Println("Installing dependencies...") - - runCmd(cmd("go mod download")) - - dat, err := ioutil.ReadFile("./parameters.json") - if err != nil { - panic(errors.Wrap(err, "failed to read contents of ./parameters.json")) - } - - log.Println("Getting parameters...") - err = pf.GetParams(dat, 2048) - if err != nil { - panic(errors.Wrap(err, "failed to acquire Groth parameters for development sectors")) - } - - runCmd(cmd("./scripts/install-filecoin-ffi.sh")) -} - -// lint runs linting using golangci-lint -func lint(packages ...string) { - if len(packages) == 0 { - packages = []string{"./..."} - } - - log.Printf("Linting %s ...\n", strings.Join(packages, " ")) - - runCmd(cmd("go", "run", "github.com/golangci/golangci-lint/cmd/golangci-lint", - "--exclude", "(comment on exported (method|function|type|const|var)|should have( a package)? comment|comment should be of the form)", - "run")) -} - -func build() { - buildFilecoin() - buildGengen() - buildFaucet() - buildGenesisFileServer() - generateGenesis() - buildMigrations() - buildPrereleaseTool() -} - -func forcebuild() { - forceBuildFC() - buildGengen() - buildFaucet() - buildGenesisFileServer() - generateGenesis() - buildMigrations() - buildPrereleaseTool() -} - -func forceBuildFC() { - log.Println("Force building go-filecoin...") - - runCmd(cmd([]string{ - "bash", "-c", fmt.Sprintf("go build %s -a -v -o go-filecoin .", flags()), - }...)) -} - -// cleanDirectory removes the child of a directly wihtout removing the directory itself, unlike `RemoveAll`. -// There is also an additional parameter to ignore dot files which is important for directories which are normally -// empty. Git has no concept of directories, so for a directory to automatically be created on checkout, a file must -// exist in side of it. We use this pattern in a few places, so the need to keep the dot files around is impotant. -func cleanDirectory(dir string, ignoredots bool) error { - if abs := filepath.IsAbs(dir); !abs { - return fmt.Errorf("Directory %s is not an absolute path, could not clean directory", dir) - } - - files, err := ioutil.ReadDir(dir) - if err != nil { - return err - } - - for _, file := range files { - fname := file.Name() - if ignoredots && []rune(fname)[0] == '.' { - continue - } - - fpath := filepath.Join(dir, fname) - - fmt.Println("Removing", fpath) - if err := os.RemoveAll(fpath); err != nil { - return err - } - } - - return nil -} - -func generateGenesis() { - log.Println("Generating genesis...") - - liveFixtures, err := filepath.Abs("./fixtures/live") - if err != nil { - panic(err) - } - - if err := cleanDirectory(liveFixtures, true); err != nil { - panic(err) - } - - runCmd(cmd([]string{ - "./tools/gengen/gengen", - "--keypath", liveFixtures, - "--out-car", filepath.Join(liveFixtures, "genesis.car"), - "--out-json", filepath.Join(liveFixtures, "gen.json"), - "--config", "./fixtures/setup.json", - }...)) - - testFixtures, err := filepath.Abs("./fixtures/test") - if err != nil { - panic(err) - } - - if err := cleanDirectory(testFixtures, true); err != nil { - panic(err) - } - - runCmd(cmd([]string{ - "./tools/gengen/gengen", - "--keypath", testFixtures, - "--out-car", filepath.Join(testFixtures, "genesis.car"), - "--out-json", filepath.Join(testFixtures, "gen.json"), - "--config", "./fixtures/setup.json", - }...)) -} - -func flags() string { - return fmt.Sprintf("-ldflags=github.com/filecoin-project/go-filecoin=\"%s\"", strings.Join([]string{ - fmt.Sprintf("-X github.com/filecoin-project/go-filecoin/build/flags.GitRoot=%s", helpers.GetGitRoot()), - fmt.Sprintf("-X github.com/filecoin-project/go-filecoin/build/flags.GitCommit=%s", getCommitSha()), - }, " ")) -} - -func buildFilecoin() { - log.Println("Building go-filecoin...") - - runCmd(cmd([]string{ - "bash", "-c", fmt.Sprintf("go build %s -v -o go-filecoin .", flags()), - }...)) -} - -func buildGengen() { - log.Println("Building gengen utils...") - - runCmd(cmd([]string{"go", "build", "-o", "./tools/gengen/gengen", "./tools/gengen"}...)) -} - -func buildFaucet() { - log.Println("Building faucet...") - - runCmd(cmd([]string{"go", "build", "-o", "./tools/faucet/faucet", "./tools/faucet/"}...)) -} - -func buildGenesisFileServer() { - log.Println("Building genesis file server...") - - runCmd(cmd([]string{"go", "build", "-o", "./tools/genesis-file-server/genesis-file-server", "./tools/genesis-file-server/"}...)) -} - -func buildMigrations() { - log.Println("Building migrations...") - runCmd(cmd([]string{ - "go", "build", "-o", "./tools/migration/go-filecoin-migrate", "./tools/migration/main.go"}...)) -} - -func buildPrereleaseTool() { - log.Println("Building prerelease-tool...") - - runCmd(cmd([]string{"go", "build", "-o", "./tools/prerelease-tool/prerelease-tool", "./tools/prerelease-tool/"}...)) -} - -func install() { - log.Println("Installing...") - - runCmd(cmd( - "bash", "-c", fmt.Sprintf("go install %s", flags()), - )) -} - -// test executes tests and passes along all additional arguments to `go test`. -func test(userArgs ...string) { - log.Println("Running tests...") - - // Consult environment for test packages, in order to support CI container-level parallelism. - packages, ok := os.LookupEnv("TEST_PACKAGES") - if !ok { - packages = "./..." - } - - begin := time.Now() - runCmd(cmd( - "bash", "-c", fmt.Sprintf("go test %s %s", - strings.Replace(packages, "\n", " ", -1), - strings.Join(userArgs, " ")))) - end := time.Now() - log.Printf("Tests finished in %.1f seconds\n", end.Sub(begin).Seconds()) -} - -func main() { - args := os.Args[1:] - - if len(args) == 0 { - log.Fatalf("Missing command") - } - - if !version.Check(runtime.Version()) { - log.Fatalf("Invalid go version: %s", runtime.Version()) - } - - cmd := args[0] - - switch cmd { - case "deps", "smartdeps": - deps() - case "lint": - lint(args[1:]...) - case "build-filecoin": - buildFilecoin() - case "build-gengen": - buildGengen() - case "generate-genesis": - generateGenesis() - case "build-migrations": - buildMigrations() - case "build": - build() - case "fbuild": - forcebuild() - case "test": - test(args[1:]...) - case "install": - install() - case "best": - build() - test(args[1:]...) - case "all": - deps() - lint() - build() - test(args[1:]...) - default: - log.Fatalf("Unknown command: %s\n", cmd) - } -} - -func getCommitSha() string { - return runCapture("git log -n 1 --format=%H") -} diff --git a/build/project/project.go b/build/project/project.go deleted file mode 100644 index 17667a24af..0000000000 --- a/build/project/project.go +++ /dev/null @@ -1,19 +0,0 @@ -package project - -import ( - "path/filepath" - - "github.com/filecoin-project/go-filecoin/build/flags" - "github.com/filecoin-project/go-filecoin/build/internal/helpers" -) - -// Root return the project root joined with any path fragments -func Root(paths ...string) string { - if flags.GitRoot == "" { - // load the root if flag not present - // Note: in some environments (i.e. IDE's) it wont be present - flags.GitRoot = helpers.GetGitRoot() - } - allPaths := append([]string{flags.GitRoot}, paths...) - return filepath.Join(allPaths...) -} diff --git a/cmd/address.go b/cmd/address.go new file mode 100644 index 0000000000..b0fabea0af --- /dev/null +++ b/cmd/address.go @@ -0,0 +1,432 @@ +package cmd + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/howeyc/gopass" + + cmds "github.com/ipfs/go-ipfs-cmds" + files "github.com/ipfs/go-ipfs-files" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/cmd/tablewriter" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var ( + errMissPassword = errors.New("the wallet is missing password, please use command `venus wallet set-password` to set password") + errWalletLocked = errors.New("the wallet is locked, please use command `venus wallet unlock` to unlock") +) + +var walletCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manage your filecoin wallets", + }, + Subcommands: map[string]*cmds.Command{ + "balance": balanceCmd, + "import": walletImportCmd, + "export": walletExportCmd, + "ls": addrsLsCmd, + "new": addrsNewCmd, + "default": defaultAddressCmd, + "delete": addrsDeleteCmd, + "set-default": setDefaultAddressCmd, + "lock": lockedCmd, + "unlock": unlockedCmd, + "set-password": setWalletPassword, + }, +} + +type AddressResult struct { + Address address.Address +} + +// AddressLsResult is the result of running the address list command. +type AddressLsResult struct { + Addresses []address.Address +} + +var addrsNewCmd = &cmds.Command{ + Options: []cmds.Option{ + cmds.StringOption("type", "The type of address to create: bls (default) or secp256k1").WithDefault("bls"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + protocolName := req.Options["type"].(string) + var protocol address.Protocol + switch protocolName { + case "secp256k1": + protocol = address.SECP256K1 + case "bls": + protocol = address.BLS + default: + return fmt.Errorf("unrecognized address protocol %s", protocolName) + } + + if !env.(*node.Env).WalletAPI.HasPassword(req.Context) { + return errMissPassword + } + if env.(*node.Env).WalletAPI.WalletState(req.Context) == wallet.Lock { + return errWalletLocked + } + + addr, err := env.(*node.Env).WalletAPI.WalletNewAddress(req.Context, protocol) + if err != nil { + return err + } + + return printOneString(re, addr.String()) + }, +} + +var addrsDeleteCmd = &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "wallet address"), + }, + Options: []cmds.Option{ + cmds.BoolOption("really-do-it", "Actually send transaction performing the action").WithDefault(false), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if really := req.Options["really-do-it"].(bool); !really { + return fmt.Errorf("pass --really-do-it to actually execute this action") + } + + addr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + if !env.(*node.Env).WalletAPI.HasPassword(req.Context) { + return errMissPassword + } + if env.(*node.Env).WalletAPI.WalletState(req.Context) == wallet.Lock { + return errWalletLocked + } + + err = env.(*node.Env).WalletAPI.WalletDelete(req.Context, addr) + if err != nil { + return err + } + + return printOneString(re, "Delete successfully!") + }, +} + +var addrsLsCmd = &cmds.Command{ + Options: []cmds.Option{ + cmds.BoolOption("addr-only", "Only print addresses"), + cmds.BoolOption("id", "Output ID addresses"), + cmds.BoolOption("market", "Output market balances"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + api := env.(*node.Env) + ctx := req.Context + + addrs := api.WalletAPI.WalletAddresses(req.Context) + + // Assume an error means no default key is set + def, _ := api.WalletAPI.WalletDefaultAddress(req.Context) + + buf := new(bytes.Buffer) + tw := tablewriter.New( + tablewriter.Col("Address"), + tablewriter.Col("ID"), + tablewriter.Col("Balance"), + tablewriter.Col("Market(Avail)"), + tablewriter.Col("Market(Locked)"), + tablewriter.Col("Nonce"), + tablewriter.Col("Default"), + tablewriter.NewLineCol("Error")) + + addrOnly := false + if _, ok := req.Options["addr-only"]; ok { + addrOnly = true + } + for _, addr := range addrs { + if addrOnly { + writer := NewSilentWriter(buf) + writer.WriteStringln(addr.String()) + } else { + a, err := api.ChainAPI.StateGetActor(ctx, addr, types.EmptyTSK) + if err != nil { + if !strings.Contains(err.Error(), "actor not found") { + tw.Write(map[string]interface{}{ + "Address": addr, + "Error": err, + }) + continue + } + + a = &types.Actor{ + Balance: big.Zero(), + } + } + + row := map[string]interface{}{ + "Address": addr, + "Balance": types.FIL(a.Balance), + "Nonce": a.Nonce, + } + if addr == def { + row["Default"] = "X" + } + + if _, ok := req.Options["id"]; ok { + id, err := api.ChainAPI.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + row["ID"] = "n/a" + } else { + row["ID"] = id + } + } + + if _, ok := req.Options["market"]; ok { + mbal, err := api.ChainAPI.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err == nil { + row["Market(Avail)"] = types.FIL(types.BigSub(mbal.Escrow, mbal.Locked)) + row["Market(Locked)"] = types.FIL(mbal.Locked) + } + } + + tw.Write(row) + } + } + + if !addrOnly { + _ = tw.Flush(buf) + } + + return re.Emit(buf) + }, +} + +var defaultAddressCmd = &cmds.Command{ + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + addr, err := env.(*node.Env).WalletAPI.WalletDefaultAddress(req.Context) + if err != nil { + return err + } + + return printOneString(re, addr.String()) + }, +} + +var setDefaultAddressCmd = &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "address to set default for"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if env.(*node.Env).WalletAPI.WalletState(req.Context) == wallet.Lock { + return errWalletLocked + } + addr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + err = env.(*node.Env).WalletAPI.WalletSetDefault(context.TODO(), addr) + if err != nil { + return err + } + + return printOneString(re, addr.String()) + }, +} + +var balanceCmd = &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "APIAddress to get balance for"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + addr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + balance, err := env.(*node.Env).WalletAPI.WalletBalance(req.Context, addr) + if err != nil { + return err + } + + return printOneString(re, (types.FIL)(balance).String()) + }, +} + +// WalletSerializeResult is the type wallet export and import return and expect. +type WalletSerializeResult struct { + KeyInfo []*crypto.KeyInfo +} + +var walletImportCmd = &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.FileArg("walletFile", true, false, "File containing wallet data to import").EnableStdin(), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if !env.(*node.Env).WalletAPI.HasPassword(req.Context) { + return errMissPassword + } + if env.(*node.Env).WalletAPI.WalletState(req.Context) == wallet.Lock { + return errWalletLocked + } + iter := req.Files.Entries() + if !iter.Next() { + return fmt.Errorf("no file given: %s", iter.Err()) + } + + fi, ok := iter.Node().(files.File) + if !ok { + return fmt.Errorf("given file was not a files.File") + } + + var key types.KeyInfo + err := json.NewDecoder(hex.NewDecoder(fi)).Decode(&key) + if err != nil { + return err + } + + addr, err := env.(*node.Env).WalletAPI.WalletImport(req.Context, &key) + if err != nil { + return err + } + + return printOneString(re, addr.String()) + }, +} + +var walletExportCmd = &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.StringArg("addr", true, true, "address of key to export"), + cmds.StringArg("password", false, false, "Password to be locked"), + }, + PreRun: func(req *cmds.Request, env cmds.Environment) error { + // for testing, skip manual password entry + if len(req.Arguments) == 2 && len(req.Arguments[1]) != 0 { + return nil + } + pw, err := gopass.GetPasswdPrompt("Password:", true, os.Stdin, os.Stdout) + if err != nil { + return err + } + fmt.Println(req.Arguments) + req.Arguments = []string{req.Arguments[0], string(pw)} + + return nil + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if env.(*node.Env).WalletAPI.WalletState(req.Context) == wallet.Lock { + return errWalletLocked + } + if len(req.Arguments) != 2 { + return re.Emit("Two parameter is required.") + } + addr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + pw := req.Arguments[1] + ki, err := env.(*node.Env).WalletAPI.WalletExport(req.Context, addr, pw) + if err != nil { + return err + } + + kiBytes, err := json.Marshal(ki) + if err != nil { + return err + } + + return printOneString(re, hex.EncodeToString(kiBytes)) + }, +} + +var setWalletPassword = &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.StringArg("password", false, false, "Password to be locked"), + }, + PreRun: func(req *cmds.Request, env cmds.Environment) error { + pw, err := gopass.GetPasswdPrompt("Password:", true, os.Stdin, os.Stdout) + if err != nil { + return err + } + pw2, err := gopass.GetPasswdPrompt("Enter Password again:", true, os.Stdin, os.Stdout) + if err != nil { + return err + } + if !bytes.Equal(pw, pw2) { + return errors.New("the input passwords are inconsistent") + } + + req.Arguments = []string{string(pw)} + + return nil + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 1 { + return re.Emit("A parameter is required.") + } + + pw := req.Arguments[0] + if len(pw) == 0 { + return re.Emit("Do not enter an empty string") + } + + err := env.(*node.Env).WalletAPI.SetPassword(req.Context, []byte(pw)) + if err != nil { + return err + } + + return printOneString(re, "Password set successfully \n"+ + "You must REMEMBER your password! Without the password, it's impossible to decrypt the key!") + }, +} + +var lockedCmd = &cmds.Command{ + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + err := env.(*node.Env).WalletAPI.LockWallet(req.Context) + if err != nil { + return err + } + + return re.Emit("locked success") + }, +} + +var unlockedCmd = &cmds.Command{ + Arguments: []cmds.Argument{ + cmds.StringArg("password", false, false, "Password to be locked"), + }, + PreRun: func(req *cmds.Request, env cmds.Environment) error { + pw, err := gopass.GetPasswdPrompt("Password:", true, os.Stdin, os.Stdout) + if err != nil { + return err + } + req.Arguments = []string{string(pw)} + + return nil + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 1 { + return re.Emit("A parameter is required.") + } + + pw := req.Arguments[0] + + err := env.(*node.Env).WalletAPI.UnLockWallet(req.Context, []byte(pw)) + if err != nil { + return err + } + + return re.Emit("unlocked success") + }, +} diff --git a/cmd/address_integration_test.go b/cmd/address_integration_test.go new file mode 100644 index 0000000000..0e90883006 --- /dev/null +++ b/cmd/address_integration_test.go @@ -0,0 +1,143 @@ +package cmd_test + +import ( + "context" + "encoding/hex" + "encoding/json" + "os" + "strings" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/venus/app/node/test" + "github.com/filecoin-project/venus/cmd" + "github.com/filecoin-project/venus/fixtures/fortest" + "github.com/filecoin-project/venus/pkg/crypto" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/wallet" +) + +func TestAddressNewAndList(t *testing.T) { + tf.IntegrationTest(t) + + ctx := context.Background() + builder := test.NewNodeBuilder(t) + + n, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + addrs := make([]address.Address, 10) + var err error + for i := 0; i < 10; i++ { + addrs[i], err = n.Wallet().API().WalletNewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + } + + list := cmdClient.RunSuccess(ctx, "wallet", "ls").ReadStdout() + for _, addr := range addrs { + assert.Contains(t, list, addr.String()) + } +} + +func TestWalletBalance(t *testing.T) { + tf.IntegrationTest(t) + ctx := context.Background() + + builder := test.NewNodeBuilder(t) + cs := test.FixtureChainSeed(t) + builder.WithGenesisInit(cs.GenesisInitFunc) + + n, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + addr, err := n.Wallet().API().WalletNewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + + t.Log("[success] not found, zero") + balance := cmdClient.RunSuccess(ctx, "wallet", "balance", addr.String()).ReadStdout() + assert.Equal(t, "0 FIL\n", balance) + + t.Log("[success] balance 1394000000000000000000000000") + balance = cmdClient.RunSuccess(ctx, "wallet", "balance", builtin.RewardActorAddr.String()).ReadStdout() + assert.Equal(t, "1394000000 FIL\n", balance) + + t.Log("[success] newly generated one") + var addrNew cmd.AddressResult + cmdClient.RunSuccessFirstLine(ctx, "wallet", "new") + balance = cmdClient.RunSuccess(ctx, "wallet", "balance", addrNew.Address.String()).ReadStdout() + assert.Equal(t, "0 FIL\n", balance) +} + +func TestWalletLoadFromFile(t *testing.T) { + tf.IntegrationTest(t) + ctx := context.Background() + + builder := test.NewNodeBuilder(t) + cs := test.FixtureChainSeed(t) + builder.WithGenesisInit(cs.GenesisInitFunc) + + _, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + for _, p := range fortest.KeyFilePaths() { + cmdClient.RunSuccess(ctx, "wallet", "import", p) + } + + list := cmdClient.RunSuccess(ctx, "wallet", "ls").ReadStdout() + for _, addr := range fortest.TestAddresses { + // assert we loaded the test address from the file + assert.Contains(t, list, addr.String()) + } + + // assert default amount of funds were allocated to address during genesis + balance := cmdClient.RunSuccess(ctx, "wallet", "balance", fortest.TestAddresses[0].String()).ReadStdout() + assert.Equal(t, "1000000 FIL\n", balance) +} + +func TestWalletExportImportRoundTrip(t *testing.T) { + tf.IntegrationTest(t) + + ctx := context.Background() + builder := test.NewNodeBuilder(t) + + n, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + addr, err := n.Wallet().API().WalletNewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + + // ./venus wallet ls + // eg: + // Address Balance Nonce Default + // t3wzm53n4ui4zdgwenf7jflrtsejgpsus7rswlkvbffxhdpkixpzfzidbvinrpnjx7dgvs72ilsnpiu7yjhela 0 FIL 0 X + result := cmdClient.RunSuccessLines(ctx, "wallet", "ls") + require.Len(t, result, 2) // include the header `Address Balance Nonce Default` + resultAddr := strings.Split(result[1], " ")[0] + require.Equal(t, addr.String(), resultAddr) + + exportJSON := cmdClient.RunSuccess(ctx, "wallet", "export", resultAddr, string(wallet.TestPassword)).ReadStdoutTrimNewlines() + data, err := hex.DecodeString(exportJSON) + require.NoError(t, err) + var exportResult crypto.KeyInfo + err = json.Unmarshal(data, &exportResult) + require.NoError(t, err) + + wf, err := os.Create("walletFileTest") + require.NoError(t, err) + defer func() { + require.NoError(t, os.Remove("walletFileTest")) + }() + + keyInfoByte, err := json.Marshal(exportResult) + require.NoError(t, err) + _, err = wf.WriteString(hex.EncodeToString(keyInfoByte)) + require.NoError(t, err) + require.NoError(t, wf.Close()) + + importResult := cmdClient.RunSuccessFirstLine(ctx, "wallet", "import", wf.Name()) + assert.Equal(t, resultAddr, importResult) +} diff --git a/cmd/chain.go b/cmd/chain.go new file mode 100644 index 0000000000..f624875837 --- /dev/null +++ b/cmd/chain.go @@ -0,0 +1,443 @@ +// Package commands implements the command to print the blockchain. +package cmd + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/constants" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var chainCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Interact with filecoin blockchain", + }, + Subcommands: map[string]*cmds.Command{ + "head": chainHeadCmd, + "ls": chainLsCmd, + "set-head": chainSetHeadCmd, + "get-block": chainGetBlockCmd, + "get-message": chainGetMessageCmd, + "get-block-messages": chainGetBlockMessagesCmd, + "get-receipts": chainGetReceiptsCmd, + "disputer": chainDisputeSetCmd, + "export": chainExportCmd, + }, +} + +type ChainHeadResult struct { + Height abi.ChainEpoch + ParentWeight big.Int + Cids []cid.Cid + Timestamp string +} + +var chainHeadCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get heaviest tipset info", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + head, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + h := head.Height() + pw := head.ParentWeight() + + strTt := time.Unix(int64(head.MinTimestamp()), 0).Format("2006-01-02 15:04:05") + + return re.Emit(&ChainHeadResult{Height: h, ParentWeight: pw, Cids: head.Key().Cids(), Timestamp: strTt}) + }, + Type: &ChainHeadResult{}, +} + +type BlockResult struct { + Cid cid.Cid + Miner address.Address +} + +type ChainLsResult struct { + Height abi.ChainEpoch + Timestamp string + Blocks []BlockResult +} + +var chainLsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List blocks in the blockchain", + ShortDescription: `Provides a list of blocks in order from head to genesis. By default, only CIDs are returned for each block.`, + }, + Options: []cmds.Option{ + cmds.Int64Option("height", "Start height of the query").WithDefault(int64(-1)), + cmds.UintOption("count", "Number of queries").WithDefault(uint(10)), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + count, _ := req.Options["count"].(uint) + if count < 1 { + return nil + } + + var err error + + startTS, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + height, _ := req.Options["height"].(int64) + if height >= 0 && abi.ChainEpoch(height) < startTS.Height() { + startTS, err = env.(*node.Env).ChainAPI.ChainGetTipSetByHeight(req.Context, abi.ChainEpoch(height), startTS.Key()) + if err != nil { + return err + } + } + + if abi.ChainEpoch(count) > startTS.Height()+1 { + count = uint(startTS.Height() + 1) + } + tipSetKeys, err := env.(*node.Env).ChainAPI.ChainList(req.Context, startTS.Key(), int(count)) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + tpInfoStr := "" + for _, key := range tipSetKeys { + tp, err := env.(*node.Env).ChainAPI.ChainGetTipSet(req.Context, key) + if err != nil { + return err + } + + strTt := time.Unix(int64(tp.MinTimestamp()), 0).Format("2006-01-02 15:04:05") + + oneTpInfoStr := fmt.Sprintf("%v: (%s) [ ", tp.Height(), strTt) + for _, blk := range tp.Blocks() { + oneTpInfoStr += fmt.Sprintf("%s: %s,", blk.Cid().String(), blk.Miner) + } + oneTpInfoStr += " ]" + + tpInfoStr += oneTpInfoStr + "\n" + } + + writer.WriteString(tpInfoStr) + + return re.Emit(buf) + }, +} + +var chainSetHeadCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Set the chain head to a specific tipset key.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cids", true, true, "CID's of the blocks of the tipset to set the chain head to."), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + headCids, err := cidsFromSlice(req.Arguments) + if err != nil { + return err + } + maybeNewHead := types.NewTipSetKey(headCids...) + return env.(*node.Env).ChainAPI.ChainSetHead(req.Context, maybeNewHead) + }, +} + +var chainGetBlockCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get a block and print its details.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, true, "CID of the block to show."), + }, + Options: []cmds.Option{ + cmds.BoolOption("raw", "print just the raw block header"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + bcid, err := cid.Decode(req.Arguments[0]) + if err != nil { + return err + } + + ctx := req.Context + blk, err := env.(*node.Env).ChainAPI.ChainGetBlock(ctx, bcid) + if err != nil { + return fmt.Errorf("get block failed: %w", err) + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + if _, ok := req.Options["raw"].(bool); ok { + out, err := json.MarshalIndent(blk, "", " ") + if err != nil { + return err + } + + _ = writer.Write(out) + + return re.Emit(buf) + } + + msgs, err := env.(*node.Env).ChainAPI.ChainGetBlockMessages(ctx, bcid) + if err != nil { + return fmt.Errorf("failed to get messages: %v", err) + } + + pmsgs, err := env.(*node.Env).ChainAPI.ChainGetParentMessages(ctx, bcid) + if err != nil { + return fmt.Errorf("failed to get parent messages: %v", err) + } + + recpts, err := env.(*node.Env).ChainAPI.ChainGetParentReceipts(ctx, bcid) + if err != nil { + log.Warn(err) + } + + cblock := struct { + types.BlockHeader + BlsMessages []*types.Message + SecpkMessages []*types.SignedMessage + ParentReceipts []*types.MessageReceipt + ParentMessages []cid.Cid + }{} + + cblock.BlockHeader = *blk + cblock.BlsMessages = msgs.BlsMessages + cblock.SecpkMessages = msgs.SecpkMessages + cblock.ParentReceipts = recpts + cblock.ParentMessages = apiMsgCids(pmsgs) + + out, err := json.MarshalIndent(cblock, "", " ") + if err != nil { + return err + } + + _ = writer.Write(out) + + return re.Emit(buf) + }, +} + +var chainGetMessageCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show a filecoin message by its CID", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "CID of message to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + cid, err := cid.Decode(req.Arguments[0]) + if err != nil { + return err + } + + msg, err := env.(*node.Env).ChainAPI.ChainGetMessage(req.Context, cid) + if err != nil { + return err + } + + return re.Emit(msg) + }, + Type: types.Message{}, +} + +var chainGetBlockMessagesCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show a filecoin message collection by block CID", + ShortDescription: "Prints info for all messages in a collection, at the given block CID.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "CID of block to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + cid, err := cid.Decode(req.Arguments[0]) + if err != nil { + return err + } + + bmsg, err := env.(*node.Env).ChainAPI.ChainGetBlockMessages(req.Context, cid) + if err != nil { + return err + } + + return re.Emit(bmsg) + }, + Type: &types.BlockMessages{}, +} + +var chainGetReceiptsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show a filecoin receipt collection by its CID", + ShortDescription: `Prints info for all receipts in a collection, +at the given CID. MessageReceipt collection CIDs are found in the "ParentMessageReceipts" +field of the filecoin block header.`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "CID of receipt collection to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + cid, err := cid.Decode(req.Arguments[0]) + if err != nil { + return err + } + + receipts, err := env.(*node.Env).ChainAPI.ChainGetReceipts(req.Context, cid) + if err != nil { + return err + } + + return re.Emit(receipts) + }, + Type: []types.MessageReceipt{}, +} + +func apiMsgCids(in []types.MessageCID) []cid.Cid { + out := make([]cid.Cid, len(in)) + for k, v := range in { + out[k] = v.Cid + } + return out +} + +var chainExportCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "export chain to a car file", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("outputPath", true, false, ""), + }, + Options: []cmds.Option{ + cmds.StringOption("tipset").WithDefault(""), + cmds.Int64Option("recent-stateroots", "specify the number of recent state roots to include in the export").WithDefault(int64(0)), + cmds.BoolOption("skip-old-msgs").WithDefault(false), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 1 { + return errors.New("must specify filename to export chain to") + } + + rsrs := abi.ChainEpoch(req.Options["recent-stateroots"].(int64)) + if rsrs > 0 && rsrs < constants.Finality { + return fmt.Errorf("\"recent-stateroots\" has to be greater than %d", constants.Finality) + } + + fi, err := os.Create(req.Arguments[0]) + if err != nil { + return err + } + defer func() { + err := fi.Close() + if err != nil { + fmt.Printf("error closing output file: %+v", err) + } + }() + + ts, err := LoadTipSet(req.Context, req, env.(*node.Env).ChainAPI) + if err != nil { + return err + } + + skipold := req.Options["skip-old-msgs"].(bool) + + if rsrs == 0 && skipold { + return fmt.Errorf("must pass recent stateroots along with skip-old-msgs") + } + + stream, err := env.(*node.Env).ChainAPI.ChainExport(req.Context, rsrs, skipold, ts.Key()) + if err != nil { + return err + } + + var last bool + for b := range stream { + last = len(b) == 0 + + _, err := fi.Write(b) + if err != nil { + return err + } + } + + if !last { + return fmt.Errorf("incomplete export (remote connection lost?)") + } + + return nil + }, +} + +// LoadTipSet gets the tipset from the context, or the head from the API. +// +// It always gets the head from the API so commands use a consistent tipset even if time pases. +func LoadTipSet(ctx context.Context, req *cmds.Request, chainAPI v1api.IChain) (*types.TipSet, error) { + tss := req.Options["tipset"].(string) + if tss == "" { + return chainAPI.ChainHead(ctx) + } + + return ParseTipSetRef(ctx, chainAPI, tss) +} + +func ParseTipSetRef(ctx context.Context, chainAPI v1api.IChain, tss string) (*types.TipSet, error) { + if tss[0] == '@' { + if tss == "@head" { + return chainAPI.ChainHead(ctx) + } + + var h uint64 + if _, err := fmt.Sscanf(tss, "@%d", &h); err != nil { + return nil, fmt.Errorf("parsing height tipset ref: %w", err) + } + + return chainAPI.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(h), types.EmptyTSK) + } + + cids, err := ParseTipSetString(tss) + if err != nil { + return nil, err + } + + if len(cids) == 0 { + return nil, nil + } + + k := types.NewTipSetKey(cids...) + ts, err := chainAPI.ChainGetTipSet(ctx, k) + if err != nil { + return nil, err + } + + return ts, nil +} + +func ParseTipSetString(ts string) ([]cid.Cid, error) { + strs := strings.Split(ts, ",") + + var cids []cid.Cid + for _, s := range strs { + c, err := cid.Parse(strings.TrimSpace(s)) + if err != nil { + return nil, err + } + cids = append(cids, c) + } + + return cids, nil +} diff --git a/cmd/chain_integration_test.go b/cmd/chain_integration_test.go new file mode 100644 index 0000000000..2e2276e2e3 --- /dev/null +++ b/cmd/chain_integration_test.go @@ -0,0 +1,47 @@ +package cmd_test + +import ( + "context" + "encoding/json" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/app/node/test" + "github.com/filecoin-project/venus/cmd" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestChainHead(t *testing.T) { + tf.IntegrationTest(t) + + ctx := context.Background() + builder := test.NewNodeBuilder(t) + + _, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + jsonResult := cmdClient.RunSuccess(ctx, "chain", "head", "--enc", "json").ReadStdoutTrimNewlines() + var cidsFromJSON cmd.ChainHeadResult + err := json.Unmarshal([]byte(jsonResult), &cidsFromJSON) + assert.NoError(t, err) +} + +func TestChainLs(t *testing.T) { + tf.IntegrationTest(t) + ctx := context.Background() + + t.Run("chain ls returns the specified number of tipsets modified by the count", func(t *testing.T) { + seed, cfg, chainClk := test.CreateBootstrapSetup(t) + n := test.CreateBootstrapMiner(ctx, t, seed, chainClk, cfg) + + cmdClient, apiDone := test.RunNodeAPI(ctx, n, t) + defer apiDone() + + result := cmdClient.RunSuccess(ctx, "chain", "ls", "--count", "2").ReadStdoutTrimNewlines() + rows := strings.Count(result, "\n") + require.Equal(t, rows, 0) + }) +} diff --git a/cmd/cid.go b/cmd/cid.go new file mode 100644 index 0000000000..f9418555f0 --- /dev/null +++ b/cmd/cid.go @@ -0,0 +1,74 @@ +package cmd + +import ( + "bytes" + "fmt" + "os" + + "github.com/filecoin-project/venus/cmd/tablewriter" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + cmds "github.com/ipfs/go-ipfs-cmds" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" +) + +var cidCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Cid command", + }, + Subcommands: map[string]*cmds.Command{ + "inspect-bundle": inspectBundleCmd, + }, +} + +var inspectBundleCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get the manifest CID from a car file, as well as the actor code CIDs", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, ""), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + + f, err := os.OpenFile(req.Arguments[0], os.O_RDONLY, 0o664) + if err != nil { + return fmt.Errorf("opening the car file: %w", err) + } + + bs := blockstoreutil.NewMemory() + wrapBs := adt.WrapStore(ctx, cbor.NewCborStore(bs)) + + hdr, err := car.LoadCar(ctx, bs, f) + if err != nil { + return fmt.Errorf("error loading car file: %w", err) + } + + manifestCid := hdr.Roots[0] + + if err := re.Emit("Manifest CID: " + manifestCid.String()); err != nil { + return err + } + + entries, err := actors.ReadManifest(ctx, wrapBs, manifestCid) + if err != nil { + return fmt.Errorf("error loading manifest: %w", err) + } + + buf := &bytes.Buffer{} + tw := tablewriter.New(tablewriter.Col("Actor"), tablewriter.Col("CID")) + for name, cid := range entries { + tw.Write(map[string]interface{}{ + "Actor": name, + "CID": cid.String(), + }) + } + if err := tw.Flush(buf); err != nil { + return err + } + + return re.Emit(buf) + }, +} diff --git a/cmd/daemon.go b/cmd/daemon.go new file mode 100644 index 0000000000..bd771102b5 --- /dev/null +++ b/cmd/daemon.go @@ -0,0 +1,289 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/filecoin-project/venus/fixtures/assets" + "github.com/filecoin-project/venus/fixtures/networks" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/utils" + + "github.com/filecoin-project/venus/pkg/util/ulimit" + + paramfetch "github.com/filecoin-project/go-paramfetch" + + _ "net/http/pprof" // nolint: golint + + cmds "github.com/ipfs/go-ipfs-cmds" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/app/paths" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/genesis" + "github.com/filecoin-project/venus/pkg/journal" + "github.com/filecoin-project/venus/pkg/migration" + "github.com/filecoin-project/venus/pkg/repo" +) + +var log = logging.Logger("daemon") + +const ( + makeGenFlag = "make-genesis" + preTemplateFlag = "genesis-template" +) + +var daemonCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Initialize a venus repo, Start a long-running daemon process", + }, + Options: []cmds.Option{ + cmds.StringOption(makeGenFlag, "make genesis"), + cmds.StringOption(preTemplateFlag, "template for make genesis"), + cmds.StringOption(SwarmAddress, "multiaddress to listen on for filecoin network connections"), + cmds.StringOption(SwarmPublicRelayAddress, "public multiaddress for routing circuit relay traffic. Necessary for relay nodes to provide this if they are not publically dialable"), + cmds.BoolOption(OfflineMode, "start the node without networking"), + cmds.BoolOption(ELStdout), + cmds.BoolOption(ULimit, "manage open file limit").WithDefault(true), + cmds.StringOption(AuthServiceURL, "venus auth service URL"), + cmds.BoolOption(IsRelay, "advertise and allow venus network traffic to be relayed through this node"), + cmds.StringOption(ImportSnapshot, "import chain state from a given chain export file or url"), + cmds.StringOption(GenesisFile, "path of file or HTTP(S) URL containing archive of genesis block DAG data"), + cmds.StringOption(Network, "when set, populates config with network specific parameters, eg. mainnet,2k,calibrationnet,interopnet,butterflynet").WithDefault("mainnet"), + cmds.StringOption(Password, "set wallet password"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if limit, _ := req.Options[ULimit].(bool); limit { + if _, _, err := ulimit.ManageFdLimit(); err != nil { + log.Errorf("setting file descriptor limit: %s", err) + } + } + + repoDir, _ := req.Options[OptionRepoDir].(string) + repoDir, err := paths.GetRepoPath(repoDir) + if err != nil { + return err + } + ps, err := assets.GetProofParams() + if err != nil { + return err + } + srs, err := assets.GetSrs() + if err != nil { + return err + } + if err := paramfetch.GetParams(req.Context, ps, srs, 0); err != nil { + return fmt.Errorf("fetching proof parameters: %w", err) + } + + exist, err := repo.Exists(repoDir) + if err != nil { + return err + } + if !exist { + defer func() { + if err != nil { + log.Infof("Failed to initialize venus, cleaning up %s after attempt...", repoDir) + if err := os.RemoveAll(repoDir); err != nil { + log.Errorf("Failed to clean up failed repo: %s", err) + } + } + }() + log.Infof("Initializing repo at '%s'", repoDir) + + if err := re.Emit(repoDir); err != nil { + return err + } + if err := repo.InitFSRepo(repoDir, repo.LatestVersion, config.NewDefaultConfig()); err != nil { + return err + } + + if err = initRun(req); err != nil { + return err + } + } + + return daemonRun(req, re) + }, +} + +func initRun(req *cmds.Request) error { + rep, err := getRepo(req) + if err != nil { + return err + } + // The only error Close can return is that the repo has already been closed. + defer func() { + _ = rep.Close() + }() + var genesisFunc genesis.InitFunc + cfg := rep.Config() + network, _ := req.Options[Network].(string) + if err := networks.SetConfigFromOptions(cfg, network); err != nil { + return fmt.Errorf("setting config %v", err) + } + // genesis node + if mkGen, ok := req.Options[makeGenFlag].(string); ok { + preTp := req.Options[preTemplateFlag] + if preTp == nil { + return fmt.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag) + } + + node.SetNetParams(cfg.NetworkParams) + if err := actors.SetNetworkBundle(int(cfg.NetworkParams.NetworkType)); err != nil { + return err + } + utils.ReloadMethodsMap() + + genesisFunc = genesis.MakeGenesis(req.Context, rep, mkGen, preTp.(string), cfg.NetworkParams.ForkUpgradeParam) + } else { + genesisFileSource, _ := req.Options[GenesisFile].(string) + genesisFunc, err = genesis.LoadGenesis(req.Context, rep, genesisFileSource, network) + if err != nil { + return err + } + } + if authServiceURL, ok := req.Options[AuthServiceURL].(string); ok && len(authServiceURL) > 0 { + cfg.API.VenusAuthURL = authServiceURL + } + + if err := rep.ReplaceConfig(cfg); err != nil { + log.Errorf("Error replacing config %s", err) + return err + } + + if err := node.Init(req.Context, rep, genesisFunc); err != nil { + log.Errorf("Error initializing node %s", err) + return err + } + + return nil +} + +func daemonRun(req *cmds.Request, re cmds.ResponseEmitter) error { + // third precedence is config file. + rep, err := getRepo(req) + if err != nil { + return err + } + + config := rep.Config() + if err := networks.SetConfigFromNetworkType(config, config.NetworkParams.NetworkType); err != nil { + return fmt.Errorf("set config failed %v %v", config.NetworkParams.NetworkType, err) + } + log.Infof("network params: %+v", config.NetworkParams) + log.Infof("upgrade params: %+v", config.NetworkParams.ForkUpgradeParam) + + if err := actors.SetNetworkBundle(int(config.NetworkParams.NetworkType)); err != nil { + return err + } + utils.ReloadMethodsMap() + + // second highest precedence is env vars. + if envAPI := os.Getenv("VENUS_API"); envAPI != "" { + config.API.APIAddress = envAPI + } + + // highest precedence is cmd line flag. + if flagAPI, ok := req.Options[OptionAPI].(string); ok && flagAPI != "" { + config.API.APIAddress = flagAPI + } + + if swarmAddress, ok := req.Options[SwarmAddress].(string); ok && swarmAddress != "" { + config.Swarm.Address = swarmAddress + } + + if publicRelayAddress, ok := req.Options[SwarmPublicRelayAddress].(string); ok && publicRelayAddress != "" { + config.Swarm.PublicRelayAddress = publicRelayAddress + } + + if authURL, ok := req.Options[AuthServiceURL].(string); ok && len(authURL) > 0 { + config.API.VenusAuthURL = authURL + } + + opts, err := node.OptionsFromRepo(rep) + if err != nil { + return err + } + + if offlineMode, ok := req.Options[OfflineMode].(bool); ok { // nolint + opts = append(opts, node.OfflineMode(offlineMode)) + } + + if isRelay, ok := req.Options[IsRelay].(bool); ok && isRelay { + opts = append(opts, node.IsRelay()) + } + importPath, _ := req.Options[ImportSnapshot].(string) + if len(importPath) != 0 { + err := Import(req.Context, rep, importPath) + if err != nil { + log.Errorf("failed to import snapshot, import path: %s, error: %s", importPath, err.Error()) + return err + } + } + + if password, _ := req.Options[Password].(string); len(password) > 0 { + opts = append(opts, node.SetWalletPassword([]byte(password))) + } + + journal, err := journal.NewZapJournal(rep.JournalPath()) // nolint + if err != nil { + return err + } + opts = append(opts, node.JournalConfigOption(journal)) + + // Monkey-patch network parameters option will set package variables during node build + opts = append(opts, node.MonkeyPatchNetworkParamsOption(config.NetworkParams)) + + // Instantiate the node. + fcn, err := node.New(req.Context, opts...) + if err != nil { + return err + } + + if fcn.OfflineMode() { + _ = re.Emit("Filecoin node running in offline mode (libp2p is disabled)\n") + } else { + _ = re.Emit(fmt.Sprintf("My peer ID is %s\n", fcn.Network().Host.ID().Pretty())) + for _, a := range fcn.Network().Host.Addrs() { + _ = re.Emit(fmt.Sprintf("Swarm listening on: %s\n", a)) + } + } + + if _, ok := req.Options[ELStdout].(bool); ok { + _ = re.Emit("--" + ELStdout + " option is deprecated\n") + } + + // Start the node. + if err := fcn.Start(req.Context); err != nil { + return err + } + + // Run API server around the node. + ready := make(chan interface{}, 1) + go func() { + <-ready + lines := []string{ + fmt.Sprintf("API server listening on %s\n", config.API.APIAddress), + } + _ = re.Emit(lines) + }() + + // The request is expected to remain open so the daemon uses the request context. + // Pass a new context here if the flow changes such that the command should exit while leaving + // a forked deamon running. + return fcn.RunRPCAndWait(req.Context, RootCmdDaemon, ready) +} + +func getRepo(req *cmds.Request) (repo.Repo, error) { + repoDir, _ := req.Options[OptionRepoDir].(string) + repoDir, err := paths.GetRepoPath(repoDir) + if err != nil { + return nil, err + } + if err = migration.TryToMigrate(repoDir); err != nil { + return nil, err + } + return repo.OpenFSRepo(repoDir, repo.LatestVersion) +} diff --git a/cmd/daemon_daemon_test.go b/cmd/daemon_daemon_test.go new file mode 100644 index 0000000000..d3273b3362 --- /dev/null +++ b/cmd/daemon_daemon_test.go @@ -0,0 +1,166 @@ +package cmd_test + +import ( + "context" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "testing" + + manet "github.com/multiformats/go-multiaddr/net" + + th "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDownloadGenesis(t *testing.T) { + tf.IntegrationTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + port, err := th.GetFreePort() + require.NoError(t, err) + + err = exec.CommandContext( + ctx, + th.Root("/genesis-file-server"), + "--genesis-file-path", + th.Root("fixtures/test/genesis.car"), + "--port", + strconv.Itoa(port), + ).Start() + require.NoError(t, err) + td := th.NewDaemon(t, th.GenesisFile(fmt.Sprintf("http://127.0.0.1:%d/genesis.car", port))).Start() + + td.ShutdownSuccess() +} + +func TestDaemonStartupMessage(t *testing.T) { + tf.IntegrationTest(t) + + daemon := th.NewDaemon(t).Start() + daemon.ShutdownSuccess() + + out := daemon.ReadStdout() + assert.Regexp(t, "\"My peer ID is [a-zA-Z0-9]*", out) + assert.Regexp(t, "\\n\"Swarm listening on.*", out) +} + +func TestDaemonApiFile(t *testing.T) { + tf.IntegrationTest(t) + + daemon := th.NewDaemon(t).Start() + + apiPath := filepath.Join(daemon.RepoDir(), "api") + assert.FileExists(t, apiPath) + + daemon.ShutdownEasy() + + _, err := os.Lstat(apiPath) + assert.Error(t, err, "Expect api file to be deleted on shutdown") + assert.True(t, os.IsNotExist(err)) +} + +func TestDaemonCORS(t *testing.T) { + tf.IntegrationTest(t) + + t.Run("default allowed origins work", func(t *testing.T) { + td := th.NewDaemon(t).Start() + defer td.ShutdownSuccess() + + maddr, err := td.CmdAddr() + assert.NoError(t, err) + + _, host, err := manet.DialArgs(maddr) //nolint + assert.NoError(t, err) + + url := fmt.Sprintf("http://%s/api/swarm/id", host) + + token, err := td.CmdToken() + assert.NoError(t, err) + + req, err := http.NewRequest("POST", url, nil) + assert.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Add("Origin", "http://localhost:8080") + res, err := http.DefaultClient.Do(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + + req, err = http.NewRequest("POST", url, nil) + assert.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Add("Origin", "https://localhost:8080") + res, err = http.DefaultClient.Do(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + + req, err = http.NewRequest("POST", url, nil) + assert.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Add("Origin", "http://127.0.0.1:8080") + res, err = http.DefaultClient.Do(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + + req, err = http.NewRequest("POST", url, nil) + assert.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Add("Origin", "https://127.0.0.1:8080") + res, err = http.DefaultClient.Do(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + }) + + t.Run("non-configured origin fails", func(t *testing.T) { + td := th.NewDaemon(t).Start() + defer td.ShutdownSuccess() + + maddr, err := td.CmdAddr() + assert.NoError(t, err) + + _, host, err := manet.DialArgs(maddr) //nolint + assert.NoError(t, err) + token, err := td.CmdToken() + assert.NoError(t, err) + + url := fmt.Sprintf("http://%s/api/swarm/id", host) + req, err := http.NewRequest("POST", url, nil) + assert.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Add("Origin", "http://disallowed.origin") + res, err := http.DefaultClient.Do(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusForbidden, res.StatusCode) + }) +} + +func TestDaemonOverHttp(t *testing.T) { + tf.IntegrationTest(t) + + td := th.NewDaemon(t).Start() + defer td.ShutdownSuccess() + + maddr, err := td.CmdAddr() + require.NoError(t, err) + + _, host, err := manet.DialArgs(maddr) //nolint + require.NoError(t, err) + token, err := td.CmdToken() + assert.NoError(t, err) + + url := fmt.Sprintf("http://%s/api/daemon", host) + req, err := http.NewRequest("POST", url, nil) + require.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, res.StatusCode) +} diff --git a/cmd/dispute.go b/cmd/dispute.go new file mode 100644 index 0000000000..91e5f6c79a --- /dev/null +++ b/cmd/dispute.go @@ -0,0 +1,430 @@ +package cmd + +import ( + "bytes" + "context" + "errors" + "fmt" + "strconv" + "time" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + cmds "github.com/ipfs/go-ipfs-cmds" + + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/venus-shared/actors" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var disputeLog = logging.Logger("disputer") + +const Confidence = 10 + +type minerDeadline struct { + miner address.Address + index uint64 +} + +var chainDisputeSetCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "interact with the window post disputer", + ShortDescription: `interact with the window post disputer`, + }, + Options: []cmds.Option{ + cmds.StringOption("max-fee", "Spend up to X FIL per DisputeWindowedPoSt message"), + cmds.StringOption("from", "optionally specify the account to send messages from"), + }, + Subcommands: map[string]*cmds.Command{ + "start": disputerStartCmd, + "dispute": disputerMsgCmd, + }, +} + +var disputerMsgCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Send a specific DisputeWindowedPoSt message", + ShortDescription: `[minerAddress index postIndex]`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("minerAddress", true, false, "address for miner"), + cmds.StringArg("index", true, false, ""), + cmds.StringArg("postIndex", true, false, ""), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 3 { + return errors.New("usage: dispute [minerAddress index postIndex]") + } + + toa, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return fmt.Errorf("given 'miner' address %q was invalid: %w", req.Arguments[0], err) + } + + deadline, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + postIndex, err := strconv.ParseUint(req.Arguments[2], 10, 64) + if err != nil { + return err + } + + fromStr := req.Options["from"].(string) + fromAddr, err := getSender(req.Context, env.(*node.Env).WalletAPI, fromStr) + if err != nil { + return err + } + + dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{ + Deadline: deadline, + PoStIndex: postIndex, + }) + + if aerr != nil { + return fmt.Errorf("failed to serailize params: %w", aerr) + } + + dmsg := &types.Message{ + To: toa, + From: fromAddr, + Value: big.Zero(), + Method: builtin3.MethodsMiner.DisputeWindowedPoSt, + Params: dpp, + } + + rslt, err := env.(*node.Env).ChainAPI.StateCall(req.Context, dmsg, types.EmptyTSK) + if err != nil { + return fmt.Errorf("failed to simulate dispute: %w", err) + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + if rslt.MsgRct.ExitCode == 0 { + mss, err := getMaxFee(req.Options["max-fee"].(string)) + if err != nil { + return err + } + + sm, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(req.Context, dmsg, mss) + if err != nil { + return err + } + + _ = writer.WriteString(fmt.Sprintf("dispute message %v", sm.Cid())) + + } else { + _ = writer.WriteString("dispute is unsuccessful") + } + + return re.Emit(buf) + }, +} + +var disputerStartCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Start the window post disputer", + ShortDescription: `[minerAddress]`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("minerAddress", true, false, "address for miner"), + }, + Options: []cmds.Option{ + cmds.Uint64Option("start-epoch", "only start disputing PoSts after this epoch").WithDefault(uint64(0)), + cmds.Uint64Option("height", ""), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + + fromStr := req.Options["from"].(string) + fromAddr, err := getSender(ctx, env.(*node.Env).WalletAPI, fromStr) + if err != nil { + return err + } + + mss, err := getMaxFee(req.Options["max-fee"].(string)) + if err != nil { + return err + } + + startEpoch := abi.ChainEpoch(0) + height := req.Options["height"].(uint64) + if height > 0 { + startEpoch = abi.ChainEpoch(height) + } + + disputeLog.Info("setting up window post disputer") + + // subscribe to head changes and validate the current value + + headChanges, err := env.(*node.Env).ChainAPI.ChainNotify(ctx) + if err != nil { + return err + } + head, ok := <-headChanges + if !ok { + return fmt.Errorf("notify stream was invalid") + } + + if len(head) != 1 { + return fmt.Errorf("notify first entry should have been one item") + } + + if head[0].Type != types.HCCurrent { + return fmt.Errorf("expected current head on Notify stream (got %s)", head[0].Type) + } + + lastEpoch := head[0].Val.Height() + lastStatusCheckEpoch := lastEpoch + + // build initial deadlineMap + + minerList, err := env.(*node.Env).ChainAPI.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return err + } + + knownMiners := make(map[address.Address]struct{}) + deadlineMap := make(map[abi.ChainEpoch][]minerDeadline) + for _, miner := range minerList { + dClose, dl, err := makeMinerDeadline(ctx, env.(*node.Env).ChainAPI, miner) + if err != nil { + return fmt.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + + knownMiners[miner] = struct{}{} + } + + // when this fires, check for newly created miners, and purge any "missed" epochs from deadlineMap + statusCheckTicker := time.NewTicker(time.Hour) + defer statusCheckTicker.Stop() + + disputeLog.Info("starting up window post disputer") + + applyTsk := func(tsk types.TipSetKey) error { + disputeLog.Infow("last checked epoch", "epoch", lastEpoch) + dls, ok := deadlineMap[lastEpoch] + delete(deadlineMap, lastEpoch) + if !ok || startEpoch >= lastEpoch { + // no deadlines closed at this epoch - Confidence, or we haven't reached the start cutoff yet + return nil + } + + dpmsgs := make([]*types.Message, 0) + + startTime := time.Now() + proofsChecked := uint64(0) + + // TODO: Parallelizeable + for _, dl := range dls { + fullDeadlines, err := env.(*node.Env).ChainAPI.StateMinerDeadlines(ctx, dl.miner, tsk) + if err != nil { + return fmt.Errorf("failed to load deadlines: %w", err) + } + + if int(dl.index) >= len(fullDeadlines) { + return fmt.Errorf("deadline index %d not found in deadlines", dl.index) + } + + disputableProofs := fullDeadlines[dl.index].DisputableProofCount + proofsChecked += disputableProofs + + ms, err := makeDisputeWindowedPosts(ctx, env.(*node.Env).ChainAPI, dl, disputableProofs, fromAddr) + if err != nil { + return fmt.Errorf("failed to check for disputes: %w", err) + } + + dpmsgs = append(dpmsgs, ms...) + + dClose, dl, err := makeMinerDeadline(ctx, env.(*node.Env).ChainAPI, dl.miner) + if err != nil { + return fmt.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + } + + disputeLog.Infow("checked proofs", "count", proofsChecked, "duration", time.Since(startTime)) + + // TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop + for _, dpmsg := range dpmsgs { + disputeLog.Infow("disputing a PoSt", "miner", dpmsg.To) + m, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, dpmsg, mss) + if err != nil { + disputeLog.Errorw("failed to dispute post message", "err", err.Error(), "miner", dpmsg.To) + } else { + disputeLog.Infow("submited dispute", "mcid", m.Cid(), "miner", dpmsg.To) + } + } + + return nil + } + + disputeLoop := func() error { + select { + case notif, ok := <-headChanges: + if !ok { + return fmt.Errorf("head change channel errored") + } + + for _, val := range notif { + switch val.Type { + case types.HCApply: + for ; lastEpoch <= val.Val.Height(); lastEpoch++ { + err := applyTsk(val.Val.Key()) + if err != nil { + return err + } + } + case types.HCRevert: + // do nothing + default: + return fmt.Errorf("unexpected head change type %s", val.Type) + } + } + case <-statusCheckTicker.C: + disputeLog.Infof("running status check") + + minerList, err = env.(*node.Env).ChainAPI.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting miner list: %w", err) + } + + for _, m := range minerList { + _, ok := knownMiners[m] + if !ok { + dClose, dl, err := makeMinerDeadline(ctx, env.(*node.Env).ChainAPI, m) + if err != nil { + return fmt.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + + knownMiners[m] = struct{}{} + } + } + + for ; lastStatusCheckEpoch < lastEpoch; lastStatusCheckEpoch++ { + // if an epoch got "skipped" from the deadlineMap somehow, just fry it now instead of letting it sit around forever + _, ok := deadlineMap[lastStatusCheckEpoch] + if ok { + disputeLog.Infow("epoch skipped during execution, deleting it from deadlineMap", "epoch", lastStatusCheckEpoch) + delete(deadlineMap, lastStatusCheckEpoch) + } + } + + log.Infof("status check complete") + case <-ctx.Done(): + return ctx.Err() + } + + return nil + } + + for { + err := disputeLoop() + if err == context.Canceled { + disputeLog.Info("disputer shutting down") + break + } + if err != nil { + disputeLog.Errorw("disputer shutting down", "err", err) + return err + } + } + + return nil + }, +} + +// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1 +// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent +func makeDisputeWindowedPosts(ctx context.Context, api v1api.IChain, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) { + disputes := make([]*types.Message, 0) + + for i := uint64(0); i < postsSnapshotted; i++ { + + dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{ + Deadline: dl.index, + PoStIndex: i, + }) + + if aerr != nil { + return nil, fmt.Errorf("failed to serailize params: %w", aerr) + } + + dispute := &types.Message{ + To: dl.miner, + From: sender, + Value: big.Zero(), + Method: builtin3.MethodsMiner.DisputeWindowedPoSt, + Params: dpp, + } + + rslt, err := api.StateCall(ctx, dispute, types.EmptyTSK) + if err == nil && rslt.MsgRct.ExitCode == 0 { + disputes = append(disputes, dispute) + } + + } + + return disputes, nil +} + +func makeMinerDeadline(ctx context.Context, api v1api.IChain, mAddr address.Address) (abi.ChainEpoch, *minerDeadline, error) { + dl, err := api.StateMinerProvingDeadline(ctx, mAddr, types.EmptyTSK) + if err != nil { + return -1, nil, fmt.Errorf("getting proving index list: %w", err) + } + + return dl.Close, &minerDeadline{ + miner: mAddr, + index: dl.Index, + }, nil +} + +func getSender(ctx context.Context, api v1api.IWallet, fromStr string) (address.Address, error) { + if fromStr == "" { + return api.WalletDefaultAddress(ctx) + } + + addr, err := address.NewFromString(fromStr) + if err != nil { + return address.Undef, err + } + + has, err := api.WalletHas(ctx, addr) + if err != nil { + return address.Undef, err + } + + if !has { + return address.Undef, fmt.Errorf("wallet doesn't contain: %s ", addr) + } + + return addr, nil +} + +func getMaxFee(maxStr string) (*types.MessageSendSpec, error) { + if maxStr != "" { + maxFee, err := types.ParseFIL(maxStr) + if err != nil { + return nil, fmt.Errorf("parsing max-fee: %w", err) + } + return &types.MessageSendSpec{ + MaxFee: types.BigInt(maxFee), + }, nil + } + + return nil, nil +} diff --git a/cmd/drand.go b/cmd/drand.go new file mode 100644 index 0000000000..7c465fd1f0 --- /dev/null +++ b/cmd/drand.go @@ -0,0 +1,28 @@ +package cmd + +import ( + "github.com/filecoin-project/go-state-types/abi" + cmds "github.com/ipfs/go-ipfs-cmds" + + "github.com/filecoin-project/venus/app/node" +) + +var drandCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Retrieve randomness from drand server", + }, + Options: []cmds.Option{ + cmds.Uint64Option("height", "chain epoch (default 0)"), + cmds.Uint64Option("round", "retrieve randomness at requested round (default 0)"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + round, _ := req.Options["round"].(uint64) + height, _ := req.Options["height"].(uint64) + + entry, err := env.(*node.Env).ChainAPI.GetEntry(req.Context, abi.ChainEpoch(height), round) + if err != nil { + return err + } + return re.Emit(entry) + }, +} diff --git a/cmd/fetch.go b/cmd/fetch.go new file mode 100644 index 0000000000..09d5d8af20 --- /dev/null +++ b/cmd/fetch.go @@ -0,0 +1,39 @@ +package cmd + +import ( + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/pkg/errors" + + paramfetch "github.com/filecoin-project/go-paramfetch" + + "github.com/filecoin-project/venus/fixtures/assets" +) + +var fetchCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "fetch paramsters", + }, + Options: []cmds.Option{ + cmds.Uint64Option(Size, "size to fetch"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + // highest precedence is cmd line flag. + if size, ok := req.Options[Size].(uint64); ok { + ps, err := assets.GetProofParams() + if err != nil { + return err + } + + srs, err := assets.GetSrs() + if err != nil { + return err + } + + if err := paramfetch.GetParams(req.Context, ps, srs, size); err != nil { + return errors.Wrapf(err, "fetching proof parameters: %v", err) + } + return nil + } + return errors.New("uncorrect parameters") + }, +} diff --git a/cmd/go-filecoin/actor.go b/cmd/go-filecoin/actor.go deleted file mode 100644 index b7c6a4ff7d..0000000000 --- a/cmd/go-filecoin/actor.go +++ /dev/null @@ -1,78 +0,0 @@ -package commands - -import ( - "encoding/json" - "io" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" -) - -// ActorView represents a generic way to represent details about any actor to the user. -type ActorView struct { - Address string `json:"address"` - Code cid.Cid `json:"code,omitempty"` - Nonce uint64 `json:"nonce"` - Balance types.AttoFIL `json:"balance"` - Head cid.Cid `json:"head,omitempty"` -} - -var actorCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Interact with actors. Actors are built-in smart contracts.", - }, - Subcommands: map[string]*cmds.Command{ - "ls": actorLsCmd, - }, -} - -var actorLsCmd = &cmds.Command{ - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - results, err := GetPorcelainAPI(env).ActorLs(req.Context) - if err != nil { - return err - } - - for result := range results { - if result.Error != nil { - return result.Error - } - - output := makeActorView(result.Actor, result.Key) - if err := re.Emit(output); err != nil { - return err - } - } - return nil - }, - Type: &ActorView{}, - Encoders: cmds.EncoderMap{ - cmds.JSON: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, a *ActorView) error { - marshaled, err := json.Marshal(a) - if err != nil { - return err - } - _, err = w.Write(marshaled) - if err != nil { - return err - } - _, err = w.Write([]byte("\n")) - return err - }), - }, -} - -func makeActorView(act *actor.Actor, addr address.Address) *ActorView { - return &ActorView{ - Address: addr.String(), - Code: act.Code.Cid, - Nonce: act.CallSeqNum, - Balance: act.Balance, - Head: act.Head.Cid, - } -} diff --git a/cmd/go-filecoin/actor_integration_test.go b/cmd/go-filecoin/actor_integration_test.go deleted file mode 100644 index 8b807ff97b..0000000000 --- a/cmd/go-filecoin/actor_integration_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package commands_test - -import ( - "bytes" - "context" - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestActorDaemon(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - t.Run("actor ls --enc json returns NDJSON containing all actors in the state tree", func(t *testing.T) { - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - op1 := cmdClient.RunSuccess(ctx, "actor", "ls", "--enc", "json") - result1 := op1.ReadStdoutTrimNewlines() - - var avs []commands.ActorView - for _, line := range bytes.Split([]byte(result1), []byte{'\n'}) { - // unmarshall JSON to actor view an add to slice - var av commands.ActorView - err := json.Unmarshal(line, &av) - require.NoError(t, err) - avs = append(avs, av) - } - - assert.NotZero(t, len(avs)) - }) -} diff --git a/cmd/go-filecoin/address.go b/cmd/go-filecoin/address.go deleted file mode 100644 index b0534a7841..0000000000 --- a/cmd/go-filecoin/address.go +++ /dev/null @@ -1,186 +0,0 @@ -package commands - -import ( - "encoding/json" - "fmt" - - "github.com/filecoin-project/go-address" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - files "github.com/ipfs/go-ipfs-files" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var walletCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Manage your filecoin wallets", - }, - Subcommands: map[string]*cmds.Command{ - "balance": balanceCmd, - "import": walletImportCmd, - "export": walletExportCmd, - }, -} - -var addrsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Interact with addresses", - }, - Subcommands: map[string]*cmds.Command{ - "ls": addrsLsCmd, - "new": addrsNewCmd, - "default": defaultAddressCmd, - }, -} - -type AddressResult struct { - Address address.Address -} - -// AddressLsResult is the result of running the address list command. -type AddressLsResult struct { - Addresses []address.Address -} - -var addrsNewCmd = &cmds.Command{ - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - protocolName := req.Options["type"].(string) - var protocol address.Protocol - switch protocolName { - case "secp256k1": - protocol = address.SECP256K1 - case "bls": - protocol = address.BLS - default: - return fmt.Errorf("unrecognized address protocol %s", protocolName) - } - addr, err := GetPorcelainAPI(env).WalletNewAddress(protocol) - if err != nil { - return err - } - return re.Emit(&AddressResult{addr}) - }, - Options: []cmdkit.Option{ - cmdkit.StringOption("type", "The type of address to create: bls or secp256k1 (default)").WithDefault("secp256k1"), - }, - Type: &AddressResult{}, -} - -var addrsLsCmd = &cmds.Command{ - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addrs := GetPorcelainAPI(env).WalletAddresses() - - var alr AddressLsResult - for _, addr := range addrs { - alr.Addresses = append(alr.Addresses, addr) - } - - return re.Emit(&alr) - }, - Type: &AddressLsResult{}, -} - -var defaultAddressCmd = &cmds.Command{ - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addr, err := GetPorcelainAPI(env).WalletDefaultAddress() - if err != nil { - return err - } - - return re.Emit(&AddressResult{addr}) - }, - Type: &AddressResult{}, -} - -var balanceCmd = &cmds.Command{ - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("address", true, false, "Address to get balance for"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addr, err := address.NewFromString(req.Arguments[0]) - if err != nil { - return err - } - - balance, err := GetPorcelainAPI(env).WalletBalance(req.Context, addr) - if err != nil { - return err - } - return re.Emit(balance) - }, - Type: &types.AttoFIL{}, -} - -// WalletSerializeResult is the type wallet export and import return and expect. -type WalletSerializeResult struct { - KeyInfo []*crypto.KeyInfo -} - -var walletImportCmd = &cmds.Command{ - Arguments: []cmdkit.Argument{ - cmdkit.FileArg("walletFile", true, false, "File containing wallet data to import").EnableStdin(), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - iter := req.Files.Entries() - if !iter.Next() { - return fmt.Errorf("no file given: %s", iter.Err()) - } - - fi, ok := iter.Node().(files.File) - if !ok { - return fmt.Errorf("given file was not a files.File") - } - - var wir *WalletSerializeResult - if err := json.NewDecoder(fi).Decode(&wir); err != nil { - return err - } - keyInfos := wir.KeyInfo - - if len(keyInfos) == 0 { - return fmt.Errorf("no keys in wallet file") - } - - addrs, err := GetPorcelainAPI(env).WalletImport(keyInfos...) - if err != nil { - return err - } - - var alr AddressLsResult - for _, addr := range addrs { - alr.Addresses = append(alr.Addresses, addr) - } - - return re.Emit(&alr) - }, - Type: &AddressLsResult{}, -} - -var walletExportCmd = &cmds.Command{ - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("addresses", true, true, "Addresses of keys to export").EnableStdin(), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addrs := make([]address.Address, len(req.Arguments)) - for i, arg := range req.Arguments { - addr, err := address.NewFromString(arg) - if err != nil { - return err - } - addrs[i] = addr - } - - kis, err := GetPorcelainAPI(env).WalletExport(addrs) - if err != nil { - return err - } - - var klr WalletSerializeResult - klr.KeyInfo = append(klr.KeyInfo, kis...) - - return re.Emit(klr) - }, - Type: &WalletSerializeResult{}, -} diff --git a/cmd/go-filecoin/address_integration_test.go b/cmd/go-filecoin/address_integration_test.go deleted file mode 100644 index 88702f4461..0000000000 --- a/cmd/go-filecoin/address_integration_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package commands_test - -import ( - "context" - "encoding/json" - "os" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestAddressNewAndList(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - addrs := make([]address.Address, 10) - var err error - for i := 0; i < 10; i++ { - addrs[i], err = n.PorcelainAPI.WalletNewAddress(address.SECP256K1) - require.NoError(t, err) - } - - list := cmdClient.RunSuccess(ctx, "address", "ls").ReadStdout() - for _, addr := range addrs { - assert.Contains(t, list, addr.String()) - } -} - -func TestWalletBalance(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - builder := test.NewNodeBuilder(t) - cs := node.FixtureChainSeed(t) - builder.WithGenesisInit(cs.GenesisInitFunc) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - addr, err := n.PorcelainAPI.WalletNewAddress(address.SECP256K1) - require.NoError(t, err) - - t.Log("[success] not found, zero") - var balance abi.TokenAmount - cmdClient.RunMarshaledJSON(ctx, &balance, "wallet", "balance", addr.String()) - assert.Equal(t, "0", balance.String()) - - t.Log("[success] balance 1394000000000000000000000000") - cmdClient.RunMarshaledJSON(ctx, &balance, "wallet", "balance", builtin.RewardActorAddr.String()) - assert.Equal(t, "1394000000000000000000000000", balance.String()) - - t.Log("[success] newly generated one") - var addrNew commands.AddressResult - cmdClient.RunMarshaledJSON(ctx, &addrNew, "address", "new") - cmdClient.RunMarshaledJSON(ctx, &balance, "wallet", "balance", addrNew.Address.String()) - assert.Equal(t, "0", balance.String()) -} - -func TestWalletLoadFromFile(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - builder := test.NewNodeBuilder(t) - cs := node.FixtureChainSeed(t) - builder.WithGenesisInit(cs.GenesisInitFunc) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - for _, p := range fortest.KeyFilePaths() { - cmdClient.RunSuccess(ctx, "wallet", "import", p) - } - - var addrs commands.AddressLsResult - cmdClient.RunMarshaledJSON(ctx, &addrs, "address", "ls") - - for _, addr := range fortest.TestAddresses { - // assert we loaded the test address from the file - assert.Contains(t, addrs.Addresses, addr) - } - - // assert default amount of funds were allocated to address during genesis - var balance abi.TokenAmount - cmdClient.RunMarshaledJSON(ctx, &balance, "wallet", "balance", fortest.TestAddresses[0].String()) - assert.Equal(t, "1000000000000000000000000", balance.String()) -} - -func TestWalletExportImportRoundTrip(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - var lsResult commands.AddressLsResult - cmdClient.RunMarshaledJSON(ctx, &lsResult, "address", "ls") - require.Len(t, lsResult.Addresses, 1) - - exportJSON := cmdClient.RunSuccess(ctx, "wallet", "export", lsResult.Addresses[0].String()).ReadStdout() - var exportResult commands.WalletSerializeResult - err := json.Unmarshal([]byte(exportJSON), &exportResult) - require.NoError(t, err) - - wf, err := os.Create("walletFileTest") - require.NoError(t, err) - defer func() { - require.NoError(t, os.Remove("walletFileTest")) - }() - - _, err = wf.WriteString(exportJSON) - require.NoError(t, err) - require.NoError(t, wf.Close()) - - var importResult commands.AddressLsResult - cmdClient.RunMarshaledJSON(ctx, &importResult, "wallet", "import", wf.Name()) - assert.Len(t, importResult.Addresses, 1) - assert.Equal(t, lsResult.Addresses[0], importResult.Addresses[0]) -} diff --git a/cmd/go-filecoin/bootstrap.go b/cmd/go-filecoin/bootstrap.go deleted file mode 100644 index 5f0b60bc94..0000000000 --- a/cmd/go-filecoin/bootstrap.go +++ /dev/null @@ -1,32 +0,0 @@ -package commands - -import ( - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -// BootstrapLsResult is the result of the bootstrap listing command. -type BootstrapLsResult struct { - Peers []string -} - -var bootstrapCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Interact with bootstrap addresses", - }, - Subcommands: map[string]*cmds.Command{ - "ls": bootstrapLsCmd, - }, -} - -var bootstrapLsCmd = &cmds.Command{ - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - peers, err := GetPorcelainAPI(env).ConfigGet("bootstrap.addresses") - if err != nil { - return err - } - - return re.Emit(&BootstrapLsResult{peers.([]string)}) - }, - Type: &BootstrapLsResult{}, -} diff --git a/cmd/go-filecoin/bootstrap_integration_test.go b/cmd/go-filecoin/bootstrap_integration_test.go deleted file mode 100644 index 20259e099a..0000000000 --- a/cmd/go-filecoin/bootstrap_integration_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestBootstrapList(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - bs := cmdClient.RunSuccess(ctx, "bootstrap", "ls") - - assert.Equal(t, "{\n\t\"Peers\": []\n}\n", bs.ReadStdout()) -} diff --git a/cmd/go-filecoin/chain.go b/cmd/go-filecoin/chain.go deleted file mode 100644 index 9abcc643b8..0000000000 --- a/cmd/go-filecoin/chain.go +++ /dev/null @@ -1,187 +0,0 @@ -// Package commands implements the command to print the blockchain. -package commands - -import ( - "fmt" - "os" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - files "github.com/ipfs/go-ipfs-files" - "github.com/libp2p/go-libp2p-core/peer" -) - -var chainCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Inspect the filecoin blockchain", - }, - Subcommands: map[string]*cmds.Command{ - "export": storeExportCmd, - "head": storeHeadCmd, - "import": storeImportCmd, - "ls": storeLsCmd, - "status": storeStatusCmd, - "set-head": storeSetHeadCmd, - "sync": storeSyncCmd, - }, -} - -var storeHeadCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Get heaviest tipset CIDs", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - head, err := GetPorcelainAPI(env).ChainHead() - if err != nil { - return err - } - return re.Emit(head.Key()) - }, - Type: []cid.Cid{}, -} - -var storeLsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "List blocks in the blockchain", - ShortDescription: `Provides a list of blocks in order from head to genesis. By default, only CIDs are returned for each block.`, - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption("long", "l", "List blocks in long format, including CID, Miner, StateRoot, block height and message count respectively"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - iter, err := GetPorcelainAPI(env).ChainLs(req.Context) - if err != nil { - return err - } - for ; !iter.Complete(); err = iter.Next() { - if err != nil { - return err - } - if !iter.Value().Defined() { - panic("tipsets from this iterator should have at least one member") - } - if err := re.Emit(iter.Value().ToSlice()); err != nil { - return err - } - } - return nil - }, - Type: []block.Block{}, -} - -var storeStatusCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show status of chain sync operation.", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - syncStatus := GetPorcelainAPI(env).SyncerStatus() - if err := re.Emit(syncStatus); err != nil { - return err - } - return nil - }, -} - -var storeSetHeadCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Set the chain head to a specific tipset key.", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cids", true, true, "CID's of the blocks of the tipset to set the chain head to."), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - headCids, err := cidsFromSlice(req.Arguments) - if err != nil { - return err - } - maybeNewHead := block.NewTipSetKey(headCids...) - return GetPorcelainAPI(env).ChainSetHead(req.Context, maybeNewHead) - }, -} - -var storeSyncCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Instruct the chain syncer to sync a specific chain head, going to network if required.", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("peerid", true, false, "Base58-encoded libp2p peer ID to sync from"), - cmdkit.StringArg("cids", true, true, "CID's of the blocks of the tipset to sync."), - }, - Options: []cmdkit.Option{}, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - syncPid, err := peer.Decode(req.Arguments[0]) - if err != nil { - return err - } - - syncCids, err := cidsFromSlice(req.Arguments[1:]) - if err != nil { - return err - } - - syncKey := block.NewTipSetKey(syncCids...) - ci := &block.ChainInfo{ - Source: syncPid, - Sender: syncPid, - Height: 0, // only checked when trusted is false. - Head: syncKey, - } - return GetPorcelainAPI(env).ChainSyncHandleNewTipSet(ci) - }, -} - -var storeExportCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Export the chain store to a car file.", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("file", true, false, "File to export chain data to."), - cmdkit.StringArg("cids", true, true, "CID's of the blocks of the tipset to export from."), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - f, err := os.Create(req.Arguments[0]) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - expCids, err := cidsFromSlice(req.Arguments[1:]) - if err != nil { - return err - } - expKey := block.NewTipSetKey(expCids...) - - if err := GetPorcelainAPI(env).ChainExport(req.Context, expKey, f); err != nil { - return err - } - return nil - }, -} - -var storeImportCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Import the chain from a car file.", - }, - Arguments: []cmdkit.Argument{ - cmdkit.FileArg("file", true, false, "File to import chain data from.").EnableStdin(), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - iter := req.Files.Entries() - if !iter.Next() { - return fmt.Errorf("no file given: %s", iter.Err()) - } - - fi, ok := iter.Node().(files.File) - if !ok { - return fmt.Errorf("given file was not a files.File") - } - defer func() { _ = fi.Close() }() - headKey, err := GetPorcelainAPI(env).ChainImport(req.Context, fi) - if err != nil { - return err - } - return re.Emit(headKey) - }, -} diff --git a/cmd/go-filecoin/chain_integration_test.go b/cmd/go-filecoin/chain_integration_test.go deleted file mode 100644 index 2701d411c9..0000000000 --- a/cmd/go-filecoin/chain_integration_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package commands_test - -import ( - "bytes" - "context" - "encoding/json" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestChainHead(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - jsonResult := cmdClient.RunSuccess(ctx, "chain", "head", "--enc", "json").ReadStdoutTrimNewlines() - var cidsFromJSON []cid.Cid - err := json.Unmarshal([]byte(jsonResult), &cidsFromJSON) - assert.NoError(t, err) -} - -func TestChainLs(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - t.Run("chain ls with json encoding returns the whole chain as json", func(t *testing.T) { - seed, cfg, fakeClk, chainClk := test.CreateBootstrapSetup(t) - n := test.CreateBootstrapMiner(ctx, t, seed, chainClk, cfg) - - cmdClient, apiDone := test.RunNodeAPI(ctx, n, t) - defer apiDone() - - blk := test.RequireMineOnce(ctx, t, fakeClk, n) - c := blk.Cid() - - result2 := cmdClient.RunSuccess(ctx, "chain", "ls", "--enc", "json").ReadStdoutTrimNewlines() - var bs [][]block.Block - for _, line := range bytes.Split([]byte(result2), []byte{'\n'}) { - var b []block.Block - err := json.Unmarshal(line, &b) - require.NoError(t, err) - bs = append(bs, b) - require.Equal(t, 1, len(b)) - } - - assert.Equal(t, 2, len(bs)) - assert.True(t, bs[1][0].Parents.Empty()) - assert.True(t, c.Equals(bs[0][0].Cid())) - }) - - t.Run("chain ls with chain of size 1 returns genesis block", func(t *testing.T) { - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - op := cmdClient.RunSuccess(ctx, "chain", "ls", "--enc", "json") - result := op.ReadStdoutTrimNewlines() - - var b []block.Block - err := json.Unmarshal([]byte(result), &b) - require.NoError(t, err) - - assert.True(t, b[0].Parents.Empty()) - }) - - t.Run("chain ls --long returns CIDs, Miner, block height and message count", func(t *testing.T) { - seed, cfg, fakeClk, chainClk := test.CreateBootstrapSetup(t) - n := test.CreateBootstrapMiner(ctx, t, seed, chainClk, cfg) - - cmdClient, apiDone := test.RunNodeAPI(ctx, n, t) - defer apiDone() - - test.RequireMineOnce(ctx, t, fakeClk, n) - - chainLsResult := cmdClient.RunSuccess(ctx, "chain", "ls", "--long").ReadStdoutTrimNewlines() - - assert.Contains(t, chainLsResult, fortest.TestMiners[0].String()) - assert.Contains(t, chainLsResult, "1") - assert.Contains(t, chainLsResult, "0") - }) - - t.Run("chain ls --long with JSON encoding returns integer string block height", func(t *testing.T) { - seed, cfg, fakeClk, chainClk := test.CreateBootstrapSetup(t) - n := test.CreateBootstrapMiner(ctx, t, seed, chainClk, cfg) - - cmdClient, apiDone := test.RunNodeAPI(ctx, n, t) - defer apiDone() - - test.RequireMineOnce(ctx, t, fakeClk, n) - - chainLsResult := cmdClient.RunSuccess(ctx, "chain", "ls", "--long", "--enc", "json").ReadStdoutTrimNewlines() - assert.Contains(t, chainLsResult, `"height":0`) - assert.Contains(t, chainLsResult, `"height":1`) - }) -} diff --git a/cmd/go-filecoin/client.go b/cmd/go-filecoin/client.go deleted file mode 100644 index 70f898e12a..0000000000 --- a/cmd/go-filecoin/client.go +++ /dev/null @@ -1,287 +0,0 @@ -package commands - -import ( - "fmt" - "strconv" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" - - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - files "github.com/ipfs/go-ipfs-files" - p2pcore "github.com/libp2p/go-libp2p-core/peer" -) - -var clientCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Make deals, store data, retrieve data", - }, - Subcommands: map[string]*cmds.Command{ - "cat": clientCatCmd, - "import": clientImportDataCmd, - "propose-storage-deal": ClientProposeStorageDealCmd, - "query-storage-deal": ClientQueryStorageDealCmd, - "verify-storage-deal": clientVerifyStorageDealCmd, - "list-asks": clientListAsksCmd, - }, -} - -var clientCatCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Read out data stored on the network", - ShortDescription: ` -Prints data from the storage market specified with a given CID to stdout. The -only argument should be the CID to return. The data will be returned in whatever -format was provided with the data initially. -`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of data to read"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - c, err := cid.Decode(req.Arguments[0]) - if err != nil { - return err - } - - dr, err := GetPorcelainAPI(env).DAGCat(req.Context, c) - if err != nil { - return err - } - - return re.Emit(dr) - }, -} - -var clientImportDataCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Import data into the local node", - ShortDescription: ` -Imports data previously exported with the client cat command into the storage -market. This command takes only one argument, the path of the file to import. -See the go-filecoin client cat command for more details. -`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.FileArg("file", true, false, "Path to file to import").EnableStdin(), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - iter := req.Files.Entries() - if !iter.Next() { - return fmt.Errorf("no file given: %s", iter.Err()) - } - - fi, ok := iter.Node().(files.File) - if !ok { - return fmt.Errorf("given file was not a files.File") - } - - out, err := GetPorcelainAPI(env).DAGImportData(req.Context, fi) - if err != nil { - return err - } - - return re.Emit(out.Cid()) - }, - Type: cid.Cid{}, -} - -var ClientProposeStorageDealCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Propose a storage deal with a storage miner", - ShortDescription: `Sends a storage deal proposal to a miner`, - LongDescription: ` -Send a storage deal proposal to a miner. - -Start and end should be specified with the number of blocks for which to store the -data. New blocks are generated about every 30 seconds, so the time given should -be represented as a count of 30 second intervals. For example, 1 minute would -be 2, 1 hour would be 120, and 1 day would be 2880. -`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("miner", true, false, "Address of miner to send storage proposal"), - cmdkit.StringArg("data", true, false, "CID of the data to be stored"), - cmdkit.StringArg("start", true, false, "Chain epoch at which deal should start"), - cmdkit.StringArg("end", true, false, "Chain epoch at which deal should end"), - cmdkit.StringArg("price", true, false, "Storage price per epoch of all data in FIL (e.g. 0.01)"), - cmdkit.StringArg("collateral", true, false, "Collateral of deal in FIL (e.g. 0.01)"), - }, - Options: []cmdkit.Option{ - cmdkit.StringOption("peerid", "Override miner's peer id stored on chain"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addr, err := GetPorcelainAPI(env).WalletDefaultAddress() - if err != nil { - return err - } - - maddr, err := address.NewFromString(req.Arguments[0]) - if err != nil { - return err - } - - chainHead := GetPorcelainAPI(env).ChainHeadKey() - - dataCID, err := cid.Decode(req.Arguments[1]) - if err != nil { - return errors.Wrap(err, "could not decode data cid") - } - - data := &storagemarket.DataRef{ - TransferType: "graphsync", - Root: dataCID, - } - - status, err := GetPorcelainAPI(env).MinerGetStatus(req.Context, maddr, chainHead) - if err != nil { - return err - } - - peerID := status.PeerID - peerIDStr, _ := req.Options["peerid"].(string) - if peerIDStr != "" { - peerID, err = p2pcore.Decode(peerIDStr) - if err != nil { - return err - } - } - - providerInfo := &storagemarket.StorageProviderInfo{ - Address: maddr, - Owner: status.OwnerAddress, - Worker: status.WorkerAddress, - SectorSize: uint64(status.SectorSize), - PeerID: peerID, - } - - start, err := strconv.ParseUint(req.Arguments[2], 10, 64) - if err != nil { - return errors.Wrap(err, "could not parse deal start") - } - - end, err := strconv.ParseUint(req.Arguments[3], 10, 64) - if err != nil { - return errors.Wrap(err, "could not parse deal end") - } - - price, valid := types.NewAttoFILFromFILString(req.Arguments[4]) - if !valid { - return errors.Errorf("could not parse price %s", req.Arguments[5]) - } - - collateral, valid := types.NewAttoFILFromFILString(req.Arguments[5]) - if !valid { - return errors.Errorf("could not parse collateral %s", req.Arguments[6]) - } - - resp, err := GetStorageAPI(env).ProposeStorageDeal( - req.Context, - addr, - providerInfo, - data, - abi.ChainEpoch(start), - abi.ChainEpoch(end), - price, - collateral, - status.SealProofType, - ) - if err != nil { - return err - } - - return re.Emit(resp) - }, - Type: storagemarket.ProposeStorageDealResult{}, -} - -var ClientQueryStorageDealCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Query a storage deal's status", - ShortDescription: ` -Checks the status of the storage deal proposal specified by the id. The deal -status and deal message will be returned as a formatted string unless another -format is specified with the --enc flag. -`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("id", true, false, "CID of deal to query"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - dealCID, err := cid.Decode(req.Arguments[0]) - if err != nil { - return errors.Wrap(err, "could not decode deal cid") - } - - deal, err := GetStorageAPI(env).GetStorageDeal(req.Context, dealCID) - if err != nil { - return err - } - - return re.Emit(deal) - }, - Type: storagemarket.ClientDeal{}, -} - -// VerifyStorageDealResult wraps the success in an interface type -type VerifyStorageDealResult struct { -} - -var clientVerifyStorageDealCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Verify a storage deal", - ShortDescription: ` -Returns an error if the deal is not in the Complete state. Returns nil otherwise. -`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("id", true, false, "CID of deal to query"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - dealCID, err := cid.Decode(req.Arguments[0]) - if err != nil { - return errors.Wrap(err, "could not decode deal cid") - } - - deal, err := GetStorageAPI(env).GetStorageDeal(req.Context, dealCID) - if err != nil { - return err - } - - if deal.State != storagemarket.StorageDealActive { - return errors.New("storage deal not in Active state") - } - - // TODO: Check for slashes - return re.Emit(&VerifyStorageDealResult{}) - }, - Type: &VerifyStorageDealResult{}, -} - -var clientListAsksCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "List all asks in the storage market", - ShortDescription: ` -Lists all asks in the storage market. This command takes no arguments. -`, - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - minerAddr, err := GetBlockAPI(env).MinerAddress() - if err != nil { - return err - } - - asks, err := GetStorageAPI(env).ListAsks(minerAddr) - if err != nil { - return err - } - - return re.Emit(asks) - }, - Type: []*storagemarket.SignedStorageAsk{}, -} diff --git a/cmd/go-filecoin/client_daemon_test.go b/cmd/go-filecoin/client_daemon_test.go deleted file mode 100644 index 6c9b87b311..0000000000 --- a/cmd/go-filecoin/client_daemon_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/stretchr/testify/assert" -) - -func TestListAsks(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.TODO() - - seed, cfg, fakeClk, chainClk := test.CreateBootstrapSetup(t) - n := test.CreateBootstrapMiner(ctx, t, seed, chainClk, cfg) - - minerDaemon, apiDone := test.RunNodeAPI(ctx, n, t) - defer apiDone() - - minerDaemon.RunSuccess(ctx, "miner", "set-price", "20", "10") - - test.RequireMineOnce(ctx, t, fakeClk, n) - - var asks []*storagemarket.SignedStorageAsk - minerDaemon.RunMarshaledJSON(ctx, &asks, "client", "list-asks") - assert.Len(t, asks, 1) - ask := asks[0].Ask - assert.Equal(t, fortest.TestMiners[0], ask.Miner) - assert.Equal(t, uint64(1), ask.SeqNo) - assert.Equal(t, types.NewAttoFILFromFIL(20), ask.Price) - assert.Equal(t, abi.ChainEpoch(10), ask.Expiry) -} diff --git a/cmd/go-filecoin/client_integration_test.go b/cmd/go-filecoin/client_integration_test.go deleted file mode 100644 index a5d43b8765..0000000000 --- a/cmd/go-filecoin/client_integration_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package commands_test - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestProposeDeal(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - nodes, cancel := test.MustCreateNodesWithBootstrap(ctx, t, 1) - defer cancel() - - miner := nodes[0] - - maddr, err := miner.BlockMining.BlockMiningAPI.MinerAddress() - require.NoError(t, err) - - client := nodes[1] - clientAPI, clientStop := test.RunNodeAPI(ctx, client, t) - defer clientStop() - - clientAddr, err := client.PorcelainAPI.WalletDefaultAddress() - require.NoError(t, err) - - // Add enough funds (1 FIL) for client and miner to to cover deal - provider, err := miner.StorageProtocol.Provider() - require.NoError(t, err) - - err = provider.AddStorageCollateral(ctx, types.NewAttoFILFromFIL(1)) - require.NoError(t, err) - err = client.StorageProtocol.Client().AddPaymentEscrow(ctx, clientAddr, types.NewAttoFILFromFIL(1)) - require.NoError(t, err) - - // import empty 1K of bytes to create piece - input := bytes.NewBuffer(make([]byte, 1024)) - node, err := client.PorcelainAPI.DAGImportData(ctx, input) - require.NoError(t, err) - - // propose deal - var result storagemarket.ProposeStorageDealResult - clientAPI.RunMarshaledJSON(ctx, &result, "client", "propose-storage-deal", - "--peerid", miner.Host().ID().String(), - maddr.String(), - node.Cid().String(), - "1000", - "2000", - ".0000000000001", - "1", - ) - - // wait for deal to process - var dealStatus storagemarket.ClientDeal - for i := 0; i < 30; i++ { - clientAPI.RunMarshaledJSON(ctx, &dealStatus, "client", "query-storage-deal", result.ProposalCid.String()) - switch dealStatus.State { - case storagemarket.StorageDealProposalAccepted, - storagemarket.StorageDealStaged, - storagemarket.StorageDealSealing, - storagemarket.StorageDealActive: - // Deal accepted. Test passed. - return - default: - time.Sleep(1 * time.Second) // in progress, wait and continue - } - } - t.Error("timeout waiting for deal status update") -} diff --git a/cmd/go-filecoin/commands_test.go b/cmd/go-filecoin/commands_test.go deleted file mode 100644 index a162d1d598..0000000000 --- a/cmd/go-filecoin/commands_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package commands_test - -import ( - "testing" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" -) - -// create a basic new TestDaemon, with a miner and the KeyInfo it needs to sign -// tickets and blocks. This does not set a DefaultAddress in the Wallet; in this -// case, node/init.go Init generates a new address in the wallet and sets it to -// the default address. -func makeTestDaemonWithMinerAndStart(t *testing.T) *th.TestDaemon { - daemon := th.NewDaemon( - t, - th.WithMiner(fortest.TestMiners[0]), - th.KeyFile(fortest.KeyFilePaths()[0]), - ).Start() - return daemon -} - -func buildWithMiner(t *testing.T, builder *test.NodeBuilder) { - // bundle together common init options for node test state - cs := node.FixtureChainSeed(t) - builder.WithGenesisInit(cs.GenesisInitFunc) - builder.WithConfig(cs.MinerConfigOpt(0)) - builder.WithInitOpt(cs.MinerInitOpt(0)) -} diff --git a/cmd/go-filecoin/config.go b/cmd/go-filecoin/config.go deleted file mode 100644 index ba463b77e6..0000000000 --- a/cmd/go-filecoin/config.go +++ /dev/null @@ -1,102 +0,0 @@ -package commands - -import ( - "strings" - - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -var configCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Get and set filecoin config values", - ShortDescription: ` -go-filecoin config controls configuration variables. These variables are stored -in a config file inside your filecoin repo. When getting values, a key should be -provided, like so: - -go-filecoin config KEY - -When setting values, the key should be given first followed by the value and -separated by a space, like so: - -go-filecoin config KEY VALUE - -The key should be specified as a period separated string of keys. The value may -be either a bare string or any valid json compatible with the given key.`, - LongDescription: ` -go-filecoin config controls configuration variables. The configuration values -are stored as a JSON config file in your filecoin repo. When using go-filecoin -config, a key and value may be provided to set variables, or just a key may be -provided to fetch it's associated value without modifying it. - -Keys should be listed with a dot separation for each layer of nesting within The -JSON config. For example, the "addresses" key resides within an object under the -"bootstrap" key, therefore it should be addressed with the string -"bootstrap.addresses" like so: - -$ go-filecoin config bootstrap.addresses -[ - "newaddr" -] - -Values may be either bare strings (be sure to quote said string if they contain -spaces to avoid arguments being separated by your shell) or as encoded JSON -compatible with the associated keys. For example, "bootstrap.addresses" expects -an array of strings, so it should be set with something like so: - -$ go-filecoin config bootstrap.addresses '["newaddr"]' - -When setting keys with subkeys, such as the "bootstrap" key which has 3 keys -underneath it, period, minPeerThreshold, and addresses, the given JSON value -will be merged with existing values to avoid unintentionally resetting other -configuration variables under "bootstrap". For example, setting period then -setting addresses, like so, will not change the value of "period": - -$ go-filecoin config bootstrap -{ - "addresses": [], - "minPeerThreshold": 0, - "period": "1m" -} -$ go-filecoin config bootstrap '{"period": "5m"}' -$ go-filecoin config bootstrap '{"addresses": ["newaddr"]}' -$ go-filecoin config bootstrap -{ - "addresses": ["newaddr"], - "minPeerThreshold": 0, - "period": "5m" -} -`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("key", true, false, "The key of the config entry (e.g. \"api.address\")"), - cmdkit.StringArg("value", false, false, "Optionally, a value with which to set the config entry"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - api := GetPorcelainAPI(env) - key := req.Arguments[0] - var value string - - if len(req.Arguments) == 2 { - value = req.Arguments[1] - } else if strings.Contains(key, "=") { - args := strings.Split(key, "=") - key = args[0] - value = args[1] - } - - if value != "" { - err := api.ConfigSet(key, value) - if err != nil { - return err - } - } - res, err := api.ConfigGet(key) - if err != nil { - return err - } - - return re.Emit(res) - }, -} diff --git a/cmd/go-filecoin/config_integration_test.go b/cmd/go-filecoin/config_integration_test.go deleted file mode 100644 index 23a55bbda6..0000000000 --- a/cmd/go-filecoin/config_integration_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestConfigDaemon(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - t.Run("config prints config value", func(t *testing.T) { - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - wrapped1 := config.NewDefaultConfig().Datastore - - var decodedOutput1 config.DatastoreConfig - cmdClient.RunMarshaledJSON(ctx, &decodedOutput1, "config", "datastore") - assert.Equal(t, wrapped1, &decodedOutput1) - - var path string - cmdClient.RunMarshaledJSON(ctx, &path, "config", "datastore.path") - assert.Equal(t, config.NewDefaultConfig().Datastore.Path, path) - }) - - t.Run("config simple_value updates config", func(t *testing.T) { - builder := test.NewNodeBuilder(t) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - period := "1m" - // check writing default does not error - cmdClient.RunSuccess(ctx, "config", "bootstrap.period", period) - - // validate output - var retrievedPeriod string - cmdClient.RunMarshaledJSON(ctx, &retrievedPeriod, "config", "bootstrap.period") - assert.Equal(t, period, retrievedPeriod) - - // validate config write - nbci, err := n.PorcelainAPI.ConfigGet("bootstrap.period") - require.NoError(t, err) - nbc, ok := nbci.(string) - require.True(t, ok) - assert.Equal(t, nbc, period) - }) - - t.Run("config updates config", func(t *testing.T) { - builder := test.NewNodeBuilder(t) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - cmdClient.RunSuccess(ctx, "config", "bootstrap", `{"addresses": ["fake1", "fake2"], "period": "1m", "minPeerThreshold": 0}`) - - var bootstrapConfig config.BootstrapConfig - cmdClient.RunMarshaledJSON(ctx, &bootstrapConfig, "config", "bootstrap") - - // validate output - require.Len(t, bootstrapConfig.Addresses, 2) - assert.Equal(t, "fake1", bootstrapConfig.Addresses[0]) - assert.Equal(t, "fake2", bootstrapConfig.Addresses[1]) - - // validate config write - nbci, err := n.PorcelainAPI.ConfigGet("bootstrap") - require.NoError(t, err) - nbc, ok := nbci.(*config.BootstrapConfig) - require.True(t, ok) - - assert.Equal(t, nbc, &bootstrapConfig) - }) -} diff --git a/cmd/go-filecoin/daemon.go b/cmd/go-filecoin/daemon.go deleted file mode 100644 index 02a8f14c70..0000000000 --- a/cmd/go-filecoin/daemon.go +++ /dev/null @@ -1,239 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "net/http" - _ "net/http/pprof" // nolint: golint - "os" - "os/signal" - "syscall" - "time" - - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - cmdhttp "github.com/ipfs/go-ipfs-cmds/http" - ma "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr-net" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/journal" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -var daemonCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Start a long-running daemon process", - }, - Options: []cmdkit.Option{ - cmdkit.StringOption(SwarmAddress, "multiaddress to listen on for filecoin network connections"), - cmdkit.StringOption(SwarmPublicRelayAddress, "public multiaddress for routing circuit relay traffic. Necessary for relay nodes to provide this if they are not publically dialable"), - cmdkit.BoolOption(OfflineMode, "start the node without networking"), - cmdkit.BoolOption(ELStdout), - cmdkit.BoolOption(IsRelay, "advertise and allow filecoin network traffic to be relayed through this node"), - cmdkit.StringOption(BlockTime, "period a node waits between mining successive blocks").WithDefault(clock.DefaultEpochDuration.String()), - cmdkit.StringOption(PropagationDelay, "time a node waits after the start of an epoch for blocks to arrive").WithDefault(clock.DefaultPropagationDelay.String()), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - return daemonRun(req, re) - }, -} - -func daemonRun(req *cmds.Request, re cmds.ResponseEmitter) error { - // third precedence is config file. - rep, err := getRepo(req) - if err != nil { - return err - } - config := rep.Config() - - // second highest precedence is env vars. - if envAPI := os.Getenv("FIL_API"); envAPI != "" { - config.API.Address = envAPI - } - - // highest precedence is cmd line flag. - if flagAPI, ok := req.Options[OptionAPI].(string); ok && flagAPI != "" { - config.API.Address = flagAPI - } - - if swarmAddress, ok := req.Options[SwarmAddress].(string); ok && swarmAddress != "" { - config.Swarm.Address = swarmAddress - } - - if publicRelayAddress, ok := req.Options[SwarmPublicRelayAddress].(string); ok && publicRelayAddress != "" { - config.Swarm.PublicRelayAddress = publicRelayAddress - } - - opts, err := node.OptionsFromRepo(rep) - if err != nil { - return err - } - - if offlineMode, ok := req.Options[OfflineMode].(bool); ok { - opts = append(opts, node.OfflineMode(offlineMode)) - } - - if isRelay, ok := req.Options[IsRelay].(bool); ok && isRelay { - opts = append(opts, node.IsRelay()) - } - - durStr, ok := req.Options[BlockTime].(string) - if !ok { - return fmt.Errorf("invalid %s: %v", BlockTime, req.Options[BlockTime]) - } - blockTime, err := time.ParseDuration(durStr) - if err != nil { - return fmt.Errorf("invalid %s: %s", BlockTime, durStr) - } - opts = append(opts, node.BlockTime(blockTime)) - - delayStr, ok := req.Options[PropagationDelay].(string) - if !ok { - return fmt.Errorf("invalid %s: %v", PropagationDelay, req.Options[PropagationDelay]) - } - propDelay, err := time.ParseDuration(delayStr) - if err != nil { - return fmt.Errorf("invalid %s: %s", PropagationDelay, delayStr) - } - opts = append(opts, node.PropagationDelay(propDelay)) - - journal, err := journal.NewZapJournal(rep.JournalPath()) - if err != nil { - return err - } - opts = append(opts, node.JournalConfigOption(journal)) - - // Monkey-patch network parameters option will set package variables during node build - opts = append(opts, node.MonkeyPatchNetworkParamsOption(config.NetworkParams)) - - // Instantiate the node. - fcn, err := node.New(req.Context, opts...) - if err != nil { - return err - } - - if fcn.OfflineMode { - _ = re.Emit("Filecoin node running in offline mode (libp2p is disabled)\n") - } else { - _ = re.Emit(fmt.Sprintf("My peer ID is %s\n", fcn.Host().ID().Pretty())) - for _, a := range fcn.Host().Addrs() { - _ = re.Emit(fmt.Sprintf("Swarm listening on: %s\n", a)) - } - } - - if _, ok := req.Options[ELStdout].(bool); ok { - _ = re.Emit("--" + ELStdout + " option is deprecated\n") - } - - // Start the node. - if err := fcn.Start(req.Context); err != nil { - return err - } - defer fcn.Stop(req.Context) - - // Run API server around the node. - ready := make(chan interface{}, 1) - go func() { - <-ready - _ = re.Emit(fmt.Sprintf("API server listening on %s\n", config.API.Address)) - }() - - var terminate = make(chan os.Signal, 1) - signal.Notify(terminate, os.Interrupt, syscall.SIGTERM) - defer signal.Stop(terminate) - - // The request is expected to remain open so the daemon uses the request context. - // Pass a new context here if the flow changes such that the command should exit while leaving - // a forked deamon running. - return RunAPIAndWait(req.Context, fcn, config.API, ready, terminate) -} - -func getRepo(req *cmds.Request) (repo.Repo, error) { - repoDir, _ := req.Options[OptionRepoDir].(string) - repoDir, err := paths.GetRepoPath(repoDir) - if err != nil { - return nil, err - } - return repo.OpenFSRepo(repoDir, repo.Version) -} - -// RunAPIAndWait starts an API server and waits for it to finish. -// The `ready` channel is closed when the server is running and its API address has been -// saved to the node's repo. -// A message sent to or closure of the `terminate` channel signals the server to stop. -func RunAPIAndWait(ctx context.Context, nd *node.Node, config *config.APIConfig, ready chan interface{}, terminate chan os.Signal) error { - servenv := CreateServerEnv(ctx, nd) - - cfg := cmdhttp.NewServerConfig() - cfg.APIPath = APIPrefix - cfg.SetAllowedOrigins(config.AccessControlAllowOrigin...) - cfg.SetAllowedMethods(config.AccessControlAllowMethods...) - cfg.SetAllowCredentials(config.AccessControlAllowCredentials) - - maddr, err := ma.NewMultiaddr(config.Address) - if err != nil { - return err - } - - // Listen on the configured address in order to bind the port number in case it has - // been configured as zero (i.e. OS-provided) - apiListener, err := manet.Listen(maddr) - if err != nil { - return err - } - - handler := http.NewServeMux() - handler.Handle("/debug/pprof/", http.DefaultServeMux) - handler.Handle(APIPrefix+"/", cmdhttp.NewHandler(servenv, rootCmdDaemon, cfg)) - - apiserv := http.Server{ - Handler: handler, - } - - go func() { - err := apiserv.Serve(manet.NetListener(apiListener)) - if err != nil && err != http.ErrServerClosed { - panic(err) - } - }() - - // Write the resolved API address to the repo - config.Address = apiListener.Multiaddr().String() - if err := nd.Repo.SetAPIAddr(config.Address); err != nil { - return errors.Wrap(err, "Could not save API address to repo") - } - // Signal that the sever has started and then wait for a signal to stop. - close(ready) - received := <-terminate - if received != nil { - fmt.Println("Received signal", received) - } - fmt.Println("Shutting down...") - - // Allow a grace period for clean shutdown. - ctx, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - - if err := apiserv.Shutdown(ctx); err != nil { - fmt.Println("Error shutting down API server:", err) - } - - return nil -} - -func CreateServerEnv(ctx context.Context, nd *node.Node) *Env { - return &Env{ - blockMiningAPI: nd.BlockMining.BlockMiningAPI, - drandAPI: nd.DrandAPI, - ctx: ctx, - inspectorAPI: NewInspectorAPI(nd.Repo), - porcelainAPI: nd.PorcelainAPI, - retrievalAPI: nd.RetrievalProtocol, - storageAPI: nd.StorageAPI, - } -} diff --git a/cmd/go-filecoin/daemon_daemon_test.go b/cmd/go-filecoin/daemon_daemon_test.go deleted file mode 100644 index f9168bed2b..0000000000 --- a/cmd/go-filecoin/daemon_daemon_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package commands_test - -import ( - "fmt" - "net/http" - "os" - "path/filepath" - "testing" - - manet "github.com/multiformats/go-multiaddr-net" - - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDaemonStartupMessage(t *testing.T) { - tf.IntegrationTest(t) - - daemon := th.NewDaemon(t).Start() - daemon.ShutdownSuccess() - - out := daemon.ReadStdout() - assert.Regexp(t, "^\"My peer ID is [a-zA-Z0-9]*", out) - assert.Regexp(t, "\\n\"Swarm listening on.*", out) -} - -func TestDaemonApiFile(t *testing.T) { - tf.IntegrationTest(t) - - daemon := th.NewDaemon(t).Start() - - apiPath := filepath.Join(daemon.RepoDir(), "api") - assert.FileExists(t, apiPath) - - daemon.ShutdownEasy() - - _, err := os.Lstat(apiPath) - assert.Error(t, err, "Expect api file to be deleted on shutdown") - assert.True(t, os.IsNotExist(err)) -} - -func TestDaemonCORS(t *testing.T) { - tf.IntegrationTest(t) - - t.Run("default allowed origins work", func(t *testing.T) { - td := th.NewDaemon(t).Start() - defer td.ShutdownSuccess() - - maddr, err := td.CmdAddr() - assert.NoError(t, err) - - _, host, err := manet.DialArgs(maddr) - assert.NoError(t, err) - - url := fmt.Sprintf("http://%s/api/id", host) - req, err := http.NewRequest("GET", url, nil) - assert.NoError(t, err) - req.Header.Add("Origin", "http://localhost:8080") - res, err := http.DefaultClient.Do(req) - assert.NoError(t, err) - assert.Equal(t, http.StatusOK, res.StatusCode) - - req, err = http.NewRequest("GET", url, nil) - assert.NoError(t, err) - req.Header.Add("Origin", "https://localhost:8080") - res, err = http.DefaultClient.Do(req) - assert.NoError(t, err) - assert.Equal(t, http.StatusOK, res.StatusCode) - - req, err = http.NewRequest("GET", url, nil) - assert.NoError(t, err) - req.Header.Add("Origin", "http://127.0.0.1:8080") - res, err = http.DefaultClient.Do(req) - assert.NoError(t, err) - assert.Equal(t, http.StatusOK, res.StatusCode) - - req, err = http.NewRequest("GET", url, nil) - assert.NoError(t, err) - req.Header.Add("Origin", "https://127.0.0.1:8080") - res, err = http.DefaultClient.Do(req) - assert.NoError(t, err) - assert.Equal(t, http.StatusOK, res.StatusCode) - }) - - t.Run("non-configured origin fails", func(t *testing.T) { - td := th.NewDaemon(t).Start() - defer td.ShutdownSuccess() - - maddr, err := td.CmdAddr() - assert.NoError(t, err) - - _, host, err := manet.DialArgs(maddr) - assert.NoError(t, err) - - url := fmt.Sprintf("http://%s/api/id", host) - req, err := http.NewRequest("GET", url, nil) - assert.NoError(t, err) - req.Header.Add("Origin", "http://disallowed.origin") - res, err := http.DefaultClient.Do(req) - assert.NoError(t, err) - assert.Equal(t, http.StatusForbidden, res.StatusCode) - }) -} - -func TestDaemonOverHttp(t *testing.T) { - tf.IntegrationTest(t) - - td := th.NewDaemon(t).Start() - defer td.ShutdownSuccess() - - maddr, err := td.CmdAddr() - require.NoError(t, err) - - _, host, err := manet.DialArgs(maddr) - require.NoError(t, err) - - url := fmt.Sprintf("http://%s/api/daemon", host) - req, err := http.NewRequest("POST", url, nil) - require.NoError(t, err) - res, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, http.StatusNotFound, res.StatusCode) -} diff --git a/cmd/go-filecoin/dag.go b/cmd/go-filecoin/dag.go deleted file mode 100644 index b93324f54b..0000000000 --- a/cmd/go-filecoin/dag.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package commands implements the command to print the blockchain. -package commands - -import ( - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -var dagCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Interact with IPLD DAG objects.", - }, - Subcommands: map[string]*cmds.Command{ - "get": dagGetCmd, - }, -} - -var dagGetCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Get a DAG node by its CID", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("ref", true, false, "CID of object to get"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - out, err := GetPorcelainAPI(env).DAGGetNode(req.Context, req.Arguments[0]) - if err != nil { - return err - } - - return re.Emit(out) - }, -} diff --git a/cmd/go-filecoin/dag_integration_test.go b/cmd/go-filecoin/dag_integration_test.go deleted file mode 100644 index 24139f2a9f..0000000000 --- a/cmd/go-filecoin/dag_integration_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package commands_test - -import ( - "bytes" - "context" - "testing" - - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestDagDaemon(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - t.Run("dag get returning the genesis block", func(t *testing.T) { - builder := test.NewNodeBuilder(t) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - c := n.PorcelainAPI.ChainHeadKey().Iter().Value() - - // get an IPLD node from the DAG by its CID - op := cmdClient.RunSuccess(ctx, "dag", "get", c.String(), "--enc", "json") - result2 := op.ReadStdoutTrimNewlines() - - ipldnode, err := cbor.FromJSON(bytes.NewReader([]byte(result2)), constants.DefaultHashFunction, -1) - require.NoError(t, err) - - // CBOR decode the IPLD node's raw data into a Filecoin block - - var actual block.Block - encoding.Decode(ipldnode.RawData(), &actual) // nolint: errcheck - // assert.NoError(err) - // TODO Enable ^^ and debug why Block.Miner isn't being de/encoded properly. - - // CIDs should be equal - - // TODO: reenable once cbor versions are matching! - // types.AssertHaveSameCid(assert, &expected, &actual) - }) -} diff --git a/cmd/go-filecoin/deals.go b/cmd/go-filecoin/deals.go deleted file mode 100644 index 672e968dd5..0000000000 --- a/cmd/go-filecoin/deals.go +++ /dev/null @@ -1,116 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -const ( - clientOnly = "client" - minerOnly = "miner" -) - -var dealsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Manage and inspect deals made by or with this node", - }, - Subcommands: map[string]*cmds.Command{ - "list": dealsListCmd, - "show": dealsShowCmd, - }, -} - -// DealsListResult represents the subset of deal data returned by deals list -type DealsListResult struct { - Miner address.Address `json:"minerAddress"` - PieceCid cid.Cid `json:"pieceCid"` - ProposalCid cid.Cid `json:"proposalCid"` - IsMiner bool `json:"isMiner"` - State string `json:"state"` - Message string `json:"message"` -} - -var dealsListCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "List all deals", - ShortDescription: ` -Lists all recorded deals made by or with this node. This may include pending -deals, active deals, finished deals and cancelled deals. -`, - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption(clientOnly, "c", "only return deals made as a client"), - cmdkit.BoolOption(minerOnly, "m", "only return deals made as a miner"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - isClientOnly, _ := req.Options[clientOnly].(bool) - isMinerOnly, _ := req.Options[minerOnly].(bool) - var clientDeals []storagemarket.ClientDeal - var minerDeals []storagemarket.MinerDeal - var err error - if !isMinerOnly { - clientDeals, err = GetStorageAPI(env).GetClientDeals(req.Context) - if err != nil { - return fmt.Errorf("error reading client deals: %w", err) - } - } - if !isClientOnly { - minerDeals, err = GetStorageAPI(env).GetProviderDeals(req.Context) - if err != nil { - return fmt.Errorf("error reading miner deals: %w", err) - } - } - formattedDeals := []DealsListResult{} - for _, deal := range clientDeals { - formattedDeals = append(formattedDeals, DealsListResult{ - Miner: deal.Proposal.Provider, - PieceCid: deal.Proposal.PieceCID, - ProposalCid: deal.ProposalCid, - IsMiner: false, - State: storagemarket.DealStates[deal.State], - Message: deal.Message, - }) - } - for _, deal := range minerDeals { - formattedDeals = append(formattedDeals, DealsListResult{ - Miner: deal.Proposal.Provider, - PieceCid: deal.Proposal.PieceCID, - ProposalCid: deal.ProposalCid, - IsMiner: true, - State: storagemarket.DealStates[deal.State], - Message: deal.Message, - }) - } - return re.Emit(formattedDeals) - }, - Type: []DealsListResult{}, -} - -var dealsShowCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show deal details for CID ", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of deal to query"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - dealCid, err := cid.Parse(req.Arguments[0]) - if err != nil { - return errors.Wrap(err, "invalid cid "+req.Arguments[0]) - } - - deal, err := GetStorageAPI(env).GetStorageDeal(req.Context, dealCid) - if err != nil { - return err - } - return re.Emit(deal) - }, - Type: storagemarket.ClientDeal{}, -} diff --git a/cmd/go-filecoin/deals_integration_test.go b/cmd/go-filecoin/deals_integration_test.go deleted file mode 100644 index d212443335..0000000000 --- a/cmd/go-filecoin/deals_integration_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package commands_test - -import ( - "bytes" - "context" - "fmt" - "testing" - - "github.com/multiformats/go-multihash" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestDealsList(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - minerAPI, clientAPI, done, deal1Cid, deal2Cid := createDeal(ctx, t) - defer done() - - var dealResults []commands.DealsListResult - var cidsInList [2]cid.Cid - t.Run("with no filters", func(t *testing.T) { - // Client fails cause no miner is started for the client - clientAPI.RunFail(ctx, "Error: error reading miner deals: Mining has not been started so storage provider is not available", "deals", "list") - - // Miner sees the deal - minerAPI.RunMarshaledJSON(ctx, &dealResults, "deals", "list") - require.Len(t, dealResults, 2) - cidsInList[0] = dealResults[0].ProposalCid - cidsInList[1] = dealResults[1].ProposalCid - require.Contains(t, cidsInList, deal1Cid) - require.Contains(t, cidsInList, deal2Cid) - }) - - t.Run("with --miner", func(t *testing.T) { - // Client fails cause no miner is started for the client - clientAPI.RunFail(ctx, "Error: error reading miner deals: Mining has not been started so storage provider is not available", "deals", "list") - - // Miner sees the deal - minerAPI.RunMarshaledJSON(ctx, &dealResults, "deals", "list", "--miner") - require.Len(t, dealResults, 2) - cidsInList[0] = dealResults[0].ProposalCid - cidsInList[1] = dealResults[1].ProposalCid - require.Contains(t, cidsInList, deal1Cid) - require.Contains(t, cidsInList, deal2Cid) - }) - - t.Run("with --client", func(t *testing.T) { - // Client sees both deals - clientAPI.RunMarshaledJSON(ctx, &dealResults, "deals", "list", "--client") - require.Len(t, dealResults, 2) - cidsInList[0] = dealResults[0].ProposalCid - cidsInList[1] = dealResults[1].ProposalCid - require.Contains(t, cidsInList, deal1Cid) - require.Contains(t, cidsInList, deal2Cid) - - // Miner sees no client deals, but does not error - minerOutput := minerAPI.RunSuccessFirstLine(ctx, "deals", "list", "--client") - require.Equal(t, "[]", minerOutput) - }) - - t.Run("with --help", func(t *testing.T) { - clientOutput := clientAPI.RunSuccess(ctx, "deals", "list", "--help").ReadStdoutTrimNewlines() - require.Contains(t, clientOutput, "only return deals made as a client") - require.Contains(t, clientOutput, "only return deals made as a miner") - }) -} - -func TestDealShow(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - _, clientAPI, done, deal1Cid, _ := createDeal(ctx, t) - defer done() - - var res storagemarket.ClientDeal - t.Run("showDeal outputs correct information", func(t *testing.T) { - clientAPI.RunMarshaledJSON(ctx, &res, "deals", "show", deal1Cid.String()) - - assert.Equal(t, abi.ChainEpoch(2000), res.ClientDealProposal.Proposal.EndEpoch) - assert.Equal(t, "t0106", res.ClientDealProposal.Proposal.Provider.String()) - assert.LessOrEqual(t, storagemarket.StorageDealProposalAccepted, res.State) - - assert.Equal(t, abi.NewTokenAmount(100000), res.ClientDealProposal.Proposal.StoragePricePerEpoch) - }) - - t.Run("When deal does not exist errors with useful message", func(t *testing.T) { - nonDealCid := requireTestCID(t, []byte("anything")) - expectedErr := fmt.Sprintf("No state for /%s", nonDealCid.String()) - clientAPI.RunFail(ctx, expectedErr, "deals", "show", nonDealCid.String()) - }) -} - -func createDeal(ctx context.Context, t *testing.T) (*test.Client, *test.Client, func(), cid.Cid, cid.Cid) { - nodes, cancel := test.MustCreateNodesWithBootstrap(ctx, t, 1) - - miner := nodes[0] - minerAPI, minerStop := test.RunNodeAPI(ctx, miner, t) - - maddr, err := miner.BlockMining.BlockMiningAPI.MinerAddress() - require.NoError(t, err) - - client := nodes[1] - clientAPI, clientStop := test.RunNodeAPI(ctx, client, t) - done := func() { - cancel() - minerStop() - clientStop() - } - - clientAddr, err := client.PorcelainAPI.WalletDefaultAddress() - require.NoError(t, err) - - // Add enough funds (1 FIL) for client and miner to to cover deal - provider, err := miner.StorageProtocol.Provider() - require.NoError(t, err) - - err = provider.AddStorageCollateral(ctx, types.NewAttoFILFromFIL(1)) - require.NoError(t, err) - err = client.StorageProtocol.Client().AddPaymentEscrow(ctx, clientAddr, types.NewAttoFILFromFIL(1)) - require.NoError(t, err) - - // import some data to create first piece - input1 := bytes.NewBuffer([]byte("HODLHODLHODL")) - node1, err := client.PorcelainAPI.DAGImportData(ctx, input1) - require.NoError(t, err) - - // import some data to create second piece - input2 := bytes.NewBuffer([]byte("FREEASINBEER")) - node2, err := client.PorcelainAPI.DAGImportData(ctx, input2) - require.NoError(t, err) - - // propose 2 deals - var result storagemarket.ProposeStorageDealResult - clientAPI.RunMarshaledJSON(ctx, &result, "client", "propose-storage-deal", - "--peerid", miner.Host().ID().String(), - maddr.String(), - node1.Cid().String(), - "1000", - "2000", - ".0000000000001", - "1", - ) - require.NotEqual(t, cid.Undef, result.ProposalCid) - deal1Cid := result.ProposalCid - - clientAPI.RunMarshaledJSON(ctx, &result, "client", "propose-storage-deal", - "--peerid", miner.Host().ID().String(), - maddr.String(), - node2.Cid().String(), - "1000", - "2000", - ".0000000000001", - "1", - ) - require.NotEqual(t, cid.Undef, result.ProposalCid) - deal2Cid := result.ProposalCid - return minerAPI, clientAPI, done, deal1Cid, deal2Cid -} - -func requireTestCID(t *testing.T, data []byte) cid.Cid { - hash, err := multihash.Sum(data, multihash.SHA2_256, -1) - require.NoError(t, err) - return cid.NewCidV1(cid.DagCBOR, hash) -} diff --git a/cmd/go-filecoin/dht.go b/cmd/go-filecoin/dht.go deleted file mode 100644 index f3a0d5619f..0000000000 --- a/cmd/go-filecoin/dht.go +++ /dev/null @@ -1,165 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "time" - - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/routing" -) - -const ( - dhtVerboseOptionName = "verbose" - numProvidersOptionName = "num-providers" -) - -// Note, most of this is copied directly from go-ipfs (https://github.com/ipfs/go-ipfs/blob/master/core/commands/dht.go). -// A few simple modifications have been adapted for filecoin. -var dhtCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Explore and manipulate the libp2p DHT.", - ShortDescription: ``, - }, - - Subcommands: map[string]*cmds.Command{ - "findprovs": findProvidersDhtCmd, - "findpeer": findPeerDhtCmd, - "query": queryDhtCmd, - }, -} - -var queryDhtCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Find the closest Peer IDs to a given Peer ID by querying the DHT.", - ShortDescription: "Outputs a list of newline-delimited Peer IDs.", - }, - - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("peerID", true, false, "The peerID to run the query against."), - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption(dhtVerboseOptionName, "v", "Print extra information."), - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - - id, err := peer.Decode(req.Arguments[0]) - if err != nil { - return cmds.ClientError("invalid peer ID") - } - - ctx, cancel := context.WithCancel(req.Context) - ctx, events := routing.RegisterForQueryEvents(ctx) - - closestPeers, err := GetPorcelainAPI(env).NetworkGetClosestPeers(ctx, string(id)) - if err != nil { - cancel() - return err - } - - go func() { - defer cancel() - for p := range closestPeers { - routing.PublishQueryEvent(ctx, &routing.QueryEvent{ - ID: p, - Type: routing.FinalPeer, - }) - } - }() - - for e := range events { - if err := res.Emit(e); err != nil { - return err - } - } - - return nil - }, - Type: routing.QueryEvent{}, -} - -var findProvidersDhtCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Find peers that can provide a given key's value.", - ShortDescription: "Outputs a list of newline-delimited provider Peer IDs for a given key.", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("key", true, false, "The key whose provider Peer IDs are output.").EnableStdin(), - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption(dhtVerboseOptionName, "v", "Print extra information."), - cmdkit.IntOption(numProvidersOptionName, "n", "The max number of providers to find.").WithDefault(20), - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - numProviders, _ := req.Options[numProvidersOptionName].(int) - if numProviders < 1 { - return fmt.Errorf("number of providers must be greater than 0") - } - - c, err := cid.Parse(req.Arguments[0]) - if err != nil { - return err - } - - ctx, cancel := context.WithTimeout(req.Context, time.Minute) - ctx, events := routing.RegisterForQueryEvents(ctx) - - pchan := GetPorcelainAPI(env).NetworkFindProvidersAsync(ctx, c, numProviders) - - go func() { - defer cancel() - for p := range pchan { - np := p - // Note that the peer IDs in these Provider - // events are the main output of this command. - // These results are piped back into the event - // system so that they can be read alongside - // other routing events which are output in - // verbose mode but otherwise filtered. - routing.PublishQueryEvent(ctx, &routing.QueryEvent{ - Type: routing.Provider, - Responses: []*peer.AddrInfo{&np}, - }) - } - }() - for e := range events { - if err := res.Emit(e); err != nil { - return err - } - } - - return nil - }, - Type: routing.QueryEvent{}, -} - -var findPeerDhtCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Find the multiaddresses associated with a Peer ID.", - ShortDescription: "Outputs a list of newline-delimited multiaddresses.", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("peerID", true, false, "The ID of the peer to search for."), - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - peerID, err := peer.Decode(req.Arguments[0]) - if err != nil { - return err - } - - out, err := GetPorcelainAPI(env).NetworkFindPeer(req.Context, peerID) - if err != nil { - return err - } - - for _, addr := range out.Addrs { - if err := res.Emit(addr.String()); err != nil { - return err - } - } - return nil - }, -} diff --git a/cmd/go-filecoin/dht_integration_test.go b/cmd/go-filecoin/dht_integration_test.go deleted file mode 100644 index 90a9a3fa15..0000000000 --- a/cmd/go-filecoin/dht_integration_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestDhtFindPeer(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - builder1 := test.NewNodeBuilder(t) - n1 := builder1.BuildAndStart(ctx) - defer n1.Stop(ctx) - cmdClient, done := test.RunNodeAPI(ctx, n1, t) - defer done() - - builder2 := test.NewNodeBuilder(t) - n2 := builder2.BuildAndStart(ctx) - defer n2.Stop(ctx) - - node.ConnectNodes(t, n1, n2) - - n2Id := n2.PorcelainAPI.NetworkGetPeerID() - findpeerOutput := cmdClient.RunSuccess(ctx, "dht", "findpeer", n2Id.String()).ReadStdoutTrimNewlines() - n2Addr := n2.PorcelainAPI.NetworkGetPeerAddresses()[0] - - assert.Contains(t, findpeerOutput, n2Addr.String()) -} - -// TODO: findprovs will have to be untested until -// https://github.com/filecoin-project/go-filecoin/issues/2357 -// original tests were flaky; testing may need to be omitted entirely -// unless it can consistently pass. diff --git a/cmd/go-filecoin/drand.go b/cmd/go-filecoin/drand.go deleted file mode 100644 index 4f659943f5..0000000000 --- a/cmd/go-filecoin/drand.go +++ /dev/null @@ -1,62 +0,0 @@ -package commands - -import ( - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -var drandCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Explore access and configure drand.", - ShortDescription: ``, - }, - - Subcommands: map[string]*cmds.Command{ - "configure": drandConfigure, - "random": drandRandom, - }, -} - -var drandConfigure = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Configure drand client", - ShortDescription: `Fetches drand group configuration from one or more server. When found, it updates - drand client to use configuration and persists configuration in node config`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("addresses", true, true, "Addresses used to contact drand group for configuration."), - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption("override-addrs", "use the provided addresses rather than the retrieved config to contact drand"), - cmdkit.BoolOption("insecure", "use insecure protocol to contact drand"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - insecure, _ := req.Options["insecure"].(bool) - override, _ := req.Options["override-addrs"].(bool) - - err := GetDrandAPI(env).Configure(req.Arguments, !insecure, override) - if err != nil { - return err - } - return re.Emit("drand group key configured") - }, -} - -var drandRandom = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Retrieve randomness round from drand group", - }, - Options: []cmdkit.Option{ - cmdkit.Uint64Option("round", "retrieve randomness at given round (default 0)"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - round, _ := req.Options["round"].(uint64) - - entry, err := GetDrandAPI(env).GetEntry(req.Context, drand.Round(round)) - if err != nil { - return err - } - return re.Emit(entry) - }, -} diff --git a/cmd/go-filecoin/env.go b/cmd/go-filecoin/env.go deleted file mode 100644 index 5ac0a8203a..0000000000 --- a/cmd/go-filecoin/env.go +++ /dev/null @@ -1,73 +0,0 @@ -package commands - -import ( - "context" - - cmds "github.com/ipfs/go-ipfs-cmds" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/mining" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/retrieval" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/storage" -) - -// Env is the environment for command API handlers. -type Env struct { - blockMiningAPI *mining.API - ctx context.Context - drandAPI *drand.API - porcelainAPI *porcelain.API - retrievalAPI retrieval.API - storageAPI *storage.API - inspectorAPI *Inspector -} - -var _ cmds.Environment = (*Env)(nil) - -// NewClientEnv returns a new environment for command API clients. -// This environment lacks direct access to any internal APIs. -func NewClientEnv(ctx context.Context) *Env { - return &Env{ctx: ctx} -} - -// Context returns the context of the environment. -func (ce *Env) Context() context.Context { - return ce.ctx -} - -// GetPorcelainAPI returns the porcelain.API interface from the environment. -func GetPorcelainAPI(env cmds.Environment) *porcelain.API { - ce := env.(*Env) - return ce.porcelainAPI -} - -// GetBlockAPI returns the block protocol api from the given environment. -func GetBlockAPI(env cmds.Environment) *mining.API { - ce := env.(*Env) - return ce.blockMiningAPI -} - -// GetRetrievalAPI returns the retrieval protocol api from the given environment. -func GetRetrievalAPI(env cmds.Environment) retrieval.API { - ce := env.(*Env) - return ce.retrievalAPI -} - -// GetStorageAPI returns the storage protocol api from the given environment. -func GetStorageAPI(env cmds.Environment) *storage.API { - ce := env.(*Env) - return ce.storageAPI -} - -// GetInspectorAPI returns the inspector api from the given environment. -func GetInspectorAPI(env cmds.Environment) *Inspector { - ce := env.(*Env) - return ce.inspectorAPI -} - -// GetDrandAPI returns the drand api from the given environment. -func GetDrandAPI(env cmds.Environment) *drand.API { - ce := env.(*Env) - return ce.drandAPI -} diff --git a/cmd/go-filecoin/errors.go b/cmd/go-filecoin/errors.go deleted file mode 100644 index c93c697666..0000000000 --- a/cmd/go-filecoin/errors.go +++ /dev/null @@ -1,32 +0,0 @@ -package commands - -import ( - "fmt" - "github.com/pkg/errors" -) - -var ( - // ErrInvalidSize indicates that the provided size was invalid. - ErrInvalidSize = fmt.Errorf("invalid size") - - // ErrInvalidPrice indicates that the provided price was invalid. - ErrInvalidPrice = fmt.Errorf("invalid price") - - // ErrInvalidAmount indicates that the provided amount was invalid. - ErrInvalidAmount = fmt.Errorf("invalid amount") - - // ErrInvalidCollateral indicates that provided collateral was invalid. - ErrInvalidCollateral = fmt.Errorf("invalid collateral") - - // ErrInvalidPledge indicates that provided pledge was invalid. - ErrInvalidPledge = fmt.Errorf("invalid pledge") - - // ErrInvalidBlockHeight indicates that the provided block height was invalid. - ErrInvalidBlockHeight = fmt.Errorf("invalid block height") - - // ErrMissingDaemon is the error returned when trying to execute a command that requires the daemon to be started. - ErrMissingDaemon = errors.New("daemon must be started before using this command") - - // ErrNoWalletAddresses indicates that there are no addresses in wallet to mine to. - ErrNoWalletAddresses = fmt.Errorf("no addresses in wallet to mine to") -) diff --git a/cmd/go-filecoin/id.go b/cmd/go-filecoin/id.go deleted file mode 100644 index 2bb967417a..0000000000 --- a/cmd/go-filecoin/id.go +++ /dev/null @@ -1,129 +0,0 @@ -package commands - -import ( - "encoding/base64" - "encoding/json" - "fmt" - - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/libp2p/go-libp2p-core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -// IDDetails is a collection of information about a node. -type IDDetails struct { - Addresses []ma.Multiaddr - ID peer.ID - AgentVersion string - ProtocolVersion string - PublicKey []byte // raw bytes -} - -var idCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show info about the network peers", - }, - Options: []cmdkit.Option{ - // TODO: ideally copy this from the `ipfs id` command - cmdkit.StringOption("format", "f", "Specify an output format"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addrs := GetPorcelainAPI(env).NetworkGetPeerAddresses() - hostID := GetPorcelainAPI(env).NetworkGetPeerID() - - details := IDDetails{ - Addresses: make([]ma.Multiaddr, len(addrs)), - ID: hostID, - } - - for i, addr := range addrs { - subaddr, err := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", hostID.Pretty())) - if err != nil { - return err - } - details.Addresses[i] = addr.Encapsulate(subaddr) - } - - return re.Emit(&details) - }, - Type: IDDetails{}, -} - -// MarshalJSON implements json.Marshaler -func (idd IDDetails) MarshalJSON() ([]byte, error) { - addressStrings := make([]string, len(idd.Addresses)) - for i, addr := range idd.Addresses { - addressStrings[i] = addr.String() - } - - v := map[string]interface{}{ - "Addresses": addressStrings, - } - - if idd.ID != "" { - v["ID"] = idd.ID.Pretty() - } - if idd.AgentVersion != "" { - v["AgentVersion"] = idd.AgentVersion - } - if idd.ProtocolVersion != "" { - v["ProtocolVersion"] = idd.ProtocolVersion - } - if idd.PublicKey != nil { - // Base64-encode the public key explicitly. - // This is what the built-in JSON encoder does to []byte too. - v["PublicKey"] = base64.StdEncoding.EncodeToString(idd.PublicKey) - } - return json.Marshal(v) -} - -// UnmarshalJSON implements Unmarshaler -func (idd *IDDetails) UnmarshalJSON(data []byte) error { - var v map[string]*json.RawMessage - var err error - if err = json.Unmarshal(data, &v); err != nil { - return err - } - - var addresses []string - if err := decode(v, "Addresses", &addresses); err != nil { - return err - } - idd.Addresses = make([]ma.Multiaddr, len(addresses)) - for i, addr := range addresses { - a, err := ma.NewMultiaddr(addr) - if err != nil { - return err - } - idd.Addresses[i] = a - } - - var id string - if err := decode(v, "ID", &id); err != nil { - return err - } - if idd.ID, err = peer.Decode(id); err != nil { - return err - } - - if err := decode(v, "AgentVersion", &idd.AgentVersion); err != nil { - return err - } - if err := decode(v, "ProtocolVersion", &idd.ProtocolVersion); err != nil { - return err - } - if err := decode(v, "PublicKey", &idd.PublicKey); err != nil { - return err - } - return nil -} - -func decode(idd map[string]*json.RawMessage, key string, dest interface{}) error { - if raw := idd[key]; raw != nil { - if err := json.Unmarshal(*raw, &dest); err != nil { - return err - } - } - return nil -} diff --git a/cmd/go-filecoin/id_daemon_test.go b/cmd/go-filecoin/id_daemon_test.go deleted file mode 100644 index 8a39df3686..0000000000 --- a/cmd/go-filecoin/id_daemon_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package commands_test - -import ( - "context" - "io/ioutil" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestId(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - - builder := test.NewNodeBuilder(t) - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - id := cmdClient.RunSuccess(ctx, "id") - - idContent := id.ReadStdout() - assert.Containsf(t, idContent, "/ip4/127.0.0.1/tcp/", "default addr") - assert.Contains(t, idContent, "ID") -} - -func TestPersistId(t *testing.T) { - tf.IntegrationTest(t) - - // we need to control this - dir, err := ioutil.TempDir("", "go-fil-test") - require.NoError(t, err) - - // Start a demon in dir - d1 := th.NewDaemon(t, th.ContainerDir(dir)).Start() - - // get the id and kill it - id1 := d1.GetID() - d1.Stop() - - // restart the daemon - d2 := th.NewDaemon(t, th.ShouldInit(false), th.ContainerDir(dir)).Start() - - // get the id and compare to previous - id2 := d2.GetID() - d2.ShutdownSuccess() - t.Logf("d1: %s", d1.ReadStdout()) - t.Logf("d2: %s", d2.ReadStdout()) - assert.Equal(t, id1, id2) -} diff --git a/cmd/go-filecoin/init.go b/cmd/go-filecoin/init.go deleted file mode 100644 index dc54e8fa73..0000000000 --- a/cmd/go-filecoin/init.go +++ /dev/null @@ -1,303 +0,0 @@ -package commands - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - - "github.com/filecoin-project/go-address" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-car" - "github.com/libp2p/go-libp2p-core/crypto" - - "github.com/filecoin-project/go-filecoin/fixtures/networks" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/genesis" - drandapi "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -var logInit = logging.Logger("commands/init") - -var initCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Initialize a filecoin repo", - }, - Options: []cmdkit.Option{ - cmdkit.StringOption(GenesisFile, "path of file or HTTP(S) URL containing archive of genesis block DAG data"), - cmdkit.StringOption(PeerKeyFile, "path of file containing key to use for new node's libp2p identity"), - cmdkit.StringOption(WalletKeyFile, "path of file containing keys to import into the wallet on initialization"), - cmdkit.StringOption(OptionSectorDir, "path of directory into which staged and sealed sectors will be written"), - cmdkit.StringOption(MinerActorAddress, "when set, sets the daemons's miner actor address to the provided address"), - cmdkit.UintOption(AutoSealIntervalSeconds, "when set to a number > 0, configures the daemon to check for and seal any staged sectors on an interval.").WithDefault(uint(120)), - cmdkit.StringOption(Network, "when set, populates config with network specific parameters"), - cmdkit.StringOption(OptionPresealedSectorDir, "when set to the path of a directory, imports pre-sealed sector data from that directory"), - cmdkit.StringOption(OptionDrandConfigAddr, "configure drand with given address, uses secure contact protocol and no override. If you need different settings use daemon drand command"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - repoDir, _ := req.Options[OptionRepoDir].(string) - repoDir, err := paths.GetRepoPath(repoDir) - if err != nil { - return err - } - - if err := re.Emit(repoDir); err != nil { - return err - } - if err := repo.InitFSRepo(repoDir, repo.Version, config.NewDefaultConfig()); err != nil { - return err - } - rep, err := repo.OpenFSRepo(repoDir, repo.Version) - if err != nil { - return err - } - // The only error Close can return is that the repo has already been closed. - defer func() { _ = rep.Close() }() - - genesisFileSource, _ := req.Options[GenesisFile].(string) - gif, err := loadGenesis(req.Context, rep, genesisFileSource) - if err != nil { - return err - } - - peerKeyFile, _ := req.Options[PeerKeyFile].(string) - walletKeyFile, _ := req.Options[WalletKeyFile].(string) - initopts, err := getNodeInitOpts(peerKeyFile, walletKeyFile) - if err != nil { - return err - } - - cfg := rep.Config() - if err := setConfigFromOptions(cfg, req.Options); err != nil { - logInit.Errorf("Error setting config %s", err) - return err - } - - if err := setDrandConfig(rep, req.Options); err != nil { - logInit.Error("Error configuring drand config %s", err) - return err - } - if err := rep.ReplaceConfig(cfg); err != nil { - logInit.Errorf("Error replacing config %s", err) - return err - } - - logInit.Info("Initializing node") - if err := node.Init(req.Context, rep, gif, initopts...); err != nil { - logInit.Errorf("Error initializing node %s", err) - return err - } - - return nil - }, -} - -func setConfigFromOptions(cfg *config.Config, options cmdkit.OptMap) error { - var err error - if dir, ok := options[OptionSectorDir].(string); ok { - cfg.SectorBase.RootDirPath = dir - } - - if autoSealIntervalSeconds, ok := options[AutoSealIntervalSeconds]; ok { - cfg.Mining.AutoSealIntervalSeconds = autoSealIntervalSeconds.(uint) - } - - if ma, ok := options[MinerActorAddress].(string); ok { - if cfg.Mining.MinerAddress, err = address.NewFromString(ma); err != nil { - return err - } - } - - if dir, ok := options[OptionPresealedSectorDir].(string); ok { - if cfg.Mining.MinerAddress == address.Undef { - return fmt.Errorf("if --%s is provided, --%s must also be provided", OptionPresealedSectorDir, MinerActorAddress) - } - - cfg.SectorBase.PreSealedSectorsDirPath = dir - } - - // Setup devnet specific config options. - netName, _ := options[Network].(string) - var netcfg *networks.NetworkConf - if netName == "interop" { - netcfg = networks.Interop() - } else if netName == "testnet" { - netcfg = networks.Testnet() - } else if netName != "" { - return fmt.Errorf("unknown network name %s", netName) - } - if netcfg != nil { - cfg.Bootstrap = &netcfg.Bootstrap - cfg.Drand = &netcfg.Drand - cfg.NetworkParams = &netcfg.Network - } - - return nil -} - -// helper type to implement plumbing subset -type setWrapper struct { - cfg *config.Config -} - -func (w *setWrapper) ConfigSet(dottedKey string, jsonString string) error { - return w.cfg.Set(dottedKey, jsonString) -} - -func setDrandConfig(repo repo.Repo, options cmdkit.OptMap) error { - drandAddrStr, ok := options[OptionDrandConfigAddr].(string) - if !ok { - // skip configuring drand during init - return nil - } - - // Arbitrary filecoin genesis time, it will be set correctly when daemon runs - // It is not needed to set config properly - dGRPC, err := node.DefaultDrandIfaceFromConfig(repo.Config(), 0) - if err != nil { - return err - } - d := drandapi.New(dGRPC, &setWrapper{repo.Config()}) - return d.Configure([]string{drandAddrStr}, true, false) -} - -func loadGenesis(ctx context.Context, rep repo.Repo, sourceName string) (genesis.InitFunc, error) { - if sourceName == "" { - return gengen.MakeGenesisFunc(), nil - } - - source, err := openGenesisSource(sourceName) - if err != nil { - return nil, err - } - defer func() { _ = source.Close() }() - - genesisBlk, err := extractGenesisBlock(source, rep) - if err != nil { - return nil, err - } - - gif := func(cst cbor.IpldStore, bs blockstore.Blockstore) (*block.Block, error) { - return genesisBlk, err - } - - return gif, nil - -} - -func getNodeInitOpts(peerKeyFile string, walletKeyFile string) ([]node.InitOpt, error) { - var initOpts []node.InitOpt - if peerKeyFile != "" { - data, err := ioutil.ReadFile(peerKeyFile) - if err != nil { - return nil, err - } - peerKey, err := crypto.UnmarshalPrivateKey(data) - if err != nil { - return nil, err - } - initOpts = append(initOpts, node.PeerKeyOpt(peerKey)) - } - - if walletKeyFile != "" { - f, err := os.Open(walletKeyFile) - if err != nil { - return nil, err - } - - var wir *WalletSerializeResult - if err := json.NewDecoder(f).Decode(&wir); err != nil { - return nil, err - } - - if len(wir.KeyInfo) > 0 { - initOpts = append(initOpts, node.DefaultKeyOpt(wir.KeyInfo[0])) - } - - for _, k := range wir.KeyInfo[1:] { - initOpts = append(initOpts, node.ImportKeyOpt(k)) - } - } - - return initOpts, nil -} - -func openGenesisSource(sourceName string) (io.ReadCloser, error) { - sourceURL, err := url.Parse(sourceName) - if err != nil { - return nil, fmt.Errorf("invalid filepath or URL for genesis file: %s", sourceURL) - } - var source io.ReadCloser - if sourceURL.Scheme == "http" || sourceURL.Scheme == "https" { - // NOTE: This code is temporary. It allows downloading a genesis block via HTTP(S) to be able to join a - // recently deployed staging devnet. - response, err := http.Get(sourceName) - if err != nil { - return nil, err - } - source = response.Body - } else if sourceURL.Scheme != "" { - return nil, fmt.Errorf("unsupported protocol for genesis file: %s", sourceURL.Scheme) - } else { - file, err := os.Open(sourceName) - if err != nil { - return nil, err - } - source = file - } - return source, nil -} - -func extractGenesisBlock(source io.ReadCloser, rep repo.Repo) (*block.Block, error) { - bs := blockstore.NewBlockstore(rep.Datastore()) - ch, err := car.LoadCar(bs, source) - if err != nil { - return nil, err - } - - // need to check if we are being handed a car file with a single genesis block or an entire chain. - bsBlk, err := bs.Get(ch.Roots[0]) - if err != nil { - return nil, err - } - cur, err := block.DecodeBlock(bsBlk.RawData()) - if err != nil { - return nil, err - } - - // the root block of the car file has parents, this file must contain a chain. - var gensisBlk *block.Block - if !cur.Parents.Equals(block.UndefTipSet.Key()) { - // walk back up the chain until we hit a block with no parents, the genesis block. - for !cur.Parents.Equals(block.UndefTipSet.Key()) { - bsBlk, err := bs.Get(cur.Parents.ToSlice()[0]) - if err != nil { - return nil, err - } - cur, err = block.DecodeBlock(bsBlk.RawData()) - if err != nil { - return nil, err - } - } - - gensisBlk = cur - - logInit.Infow("initialized go-filecoin with genesis file containing partial chain", "genesisCID", gensisBlk.Cid().String(), "headCIDs", ch.Roots) - } else { - gensisBlk = cur - } - return gensisBlk, nil -} diff --git a/cmd/go-filecoin/init_daemon_test.go b/cmd/go-filecoin/init_daemon_test.go deleted file mode 100644 index 6b7f82b4c5..0000000000 --- a/cmd/go-filecoin/init_daemon_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package commands_test - -import ( - "context" - "fmt" - "net/http" - "os/exec" - "strconv" - "testing" - - manet "github.com/multiformats/go-multiaddr-net" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/build/project" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestInitOverHttp(t *testing.T) { - tf.IntegrationTest(t) - - td := th.NewDaemon(t).Start() - defer td.ShutdownSuccess() - - maddr, err := td.CmdAddr() - require.NoError(t, err) - - _, host, err := manet.DialArgs(maddr) - require.NoError(t, err) - - url := fmt.Sprintf("http://%s/api/init", host) - req, err := http.NewRequest("POST", url, nil) - require.NoError(t, err) - res, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, http.StatusNotFound, res.StatusCode) -} - -func TestDownloadGenesis(t *testing.T) { - tf.IntegrationTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - port, err := th.GetFreePort() - require.NoError(t, err) - - err = exec.CommandContext( - ctx, - project.Root("tools/genesis-file-server/genesis-file-server"), - "--genesis-file-path", - project.Root("fixtures/test/genesis.car"), - "--port", - strconv.Itoa(port), - ).Start() - require.NoError(t, err) - - td := th.NewDaemon(t, th.GenesisFile(fmt.Sprintf("http://127.0.0.1:%d/genesis.car", port))).Start() - - td.ShutdownSuccess() -} diff --git a/cmd/go-filecoin/inspector.go b/cmd/go-filecoin/inspector.go deleted file mode 100644 index ec882f2bf1..0000000000 --- a/cmd/go-filecoin/inspector.go +++ /dev/null @@ -1,261 +0,0 @@ -package commands - -import ( - "os" - "runtime" - - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - sysi "github.com/whyrusleeping/go-sysinfo" - - "github.com/filecoin-project/go-filecoin/build/flags" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -var inspectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show info about the filecoin node", - }, - Subcommands: map[string]*cmds.Command{ - "all": allInspectCmd, - "runtime": runtimeInspectCmd, - "disk": diskInspectCmd, - "memory": memoryInspectCmd, - "config": configInspectCmd, - "environment": envInspectCmd, - }, -} -var allInspectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Print all diagnostic information.", - ShortDescription: ` -Prints out information about filecoin process and its environment. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - var allInfo AllInspectorInfo - allInfo.Runtime = GetInspectorAPI(env).Runtime() - - dsk, err := GetInspectorAPI(env).Disk() - if err != nil { - return err - } - allInfo.Disk = dsk - - mem, err := GetInspectorAPI(env).Memory() - if err != nil { - return err - } - allInfo.Memory = mem - allInfo.Config = GetInspectorAPI(env).Config() - allInfo.Environment = GetInspectorAPI(env).Environment() - allInfo.FilecoinVersion = GetInspectorAPI(env).FilecoinVersion() - return cmds.EmitOnce(res, allInfo) - }, - Type: AllInspectorInfo{}, -} - -var runtimeInspectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Print runtime diagnostic information.", - ShortDescription: ` -Prints out information about the golang runtime. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - out := GetInspectorAPI(env).Runtime() - return cmds.EmitOnce(res, out) - }, - Type: RuntimeInfo{}, -} - -var diskInspectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Print filesystem usage information.", - ShortDescription: ` -Prints out information about the filesystem. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - out, err := GetInspectorAPI(env).Disk() - if err != nil { - return err - } - return cmds.EmitOnce(res, out) - }, - Type: DiskInfo{}, -} - -var memoryInspectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Print memory usage information.", - ShortDescription: ` -Prints out information about memory usage. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - out, err := GetInspectorAPI(env).Memory() - if err != nil { - return err - } - return cmds.EmitOnce(res, out) - }, - Type: MemoryInfo{}, -} - -var configInspectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Print in-memory config information.", - ShortDescription: ` -Prints out information about your filecoin nodes config. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - out := GetInspectorAPI(env).Config() - return cmds.EmitOnce(res, out) - }, - Type: config.Config{}, -} - -var envInspectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Print filecoin environment information.", - ShortDescription: ` -Prints out information about your filecoin nodes environment. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - out := GetInspectorAPI(env).Environment() - return cmds.EmitOnce(res, out) - }, - Type: EnvironmentInfo{}, -} - -// NewInspectorAPI returns a `Inspector` used to inspect the go-filecoin node. -func NewInspectorAPI(r repo.Repo) *Inspector { - return &Inspector{ - repo: r, - } -} - -// Inspector contains information used to inspect the go-filecoin node. -type Inspector struct { - repo repo.Repo -} - -// AllInspectorInfo contains all information the inspector can gather. -type AllInspectorInfo struct { - Config *config.Config - Runtime *RuntimeInfo - Environment *EnvironmentInfo - Disk *DiskInfo - Memory *MemoryInfo - FilecoinVersion string -} - -// RuntimeInfo contains information about the golang runtime. -type RuntimeInfo struct { - OS string - Arch string - Version string - Compiler string - NumProc int - GoMaxProcs int - NumGoRoutines int - NumCGoCalls int64 -} - -// EnvironmentInfo contains information about the environment filecoin is running in. -type EnvironmentInfo struct { - FilAPI string `json:"FIL_API"` - FilPath string `json:"FIL_PATH"` - GoPath string `json:"GOPATH"` -} - -// DiskInfo contains information about disk usage and type. -type DiskInfo struct { - Free uint64 - Total uint64 - FSType string -} - -// MemoryInfo contains information about memory usage. -type MemoryInfo struct { - Swap uint64 - Virtual uint64 -} - -// Runtime returns infrormation about the golang runtime. -func (g *Inspector) Runtime() *RuntimeInfo { - return &RuntimeInfo{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - Version: runtime.Version(), - Compiler: runtime.Compiler, - NumProc: runtime.NumCPU(), - GoMaxProcs: runtime.GOMAXPROCS(0), - NumGoRoutines: runtime.NumGoroutine(), - NumCGoCalls: runtime.NumCgoCall(), - } -} - -// Environment returns information about the environment filecoin is running in. -func (g *Inspector) Environment() *EnvironmentInfo { - return &EnvironmentInfo{ - FilAPI: os.Getenv("FIL_API"), - FilPath: os.Getenv("FIL_PATH"), - GoPath: os.Getenv("GOPATH"), - } -} - -// Disk return information about filesystem the filecoin nodes repo is on. -func (g *Inspector) Disk() (*DiskInfo, error) { - fsr, ok := g.repo.(*repo.FSRepo) - if !ok { - // we are using a in memory repo - return &DiskInfo{ - Free: 0, - Total: 0, - FSType: "0", - }, nil - } - - p, err := fsr.Path() - if err != nil { - return nil, err - } - - dinfo, err := sysi.DiskUsage(p) - if err != nil { - return nil, err - } - - return &DiskInfo{ - Free: dinfo.Free, - Total: dinfo.Total, - FSType: dinfo.FsType, - }, nil -} - -// Memory return information about system meory usage. -func (g *Inspector) Memory() (*MemoryInfo, error) { - meminfo, err := sysi.MemoryInfo() - if err != nil { - return nil, err - } - return &MemoryInfo{ - Swap: meminfo.Swap, - Virtual: meminfo.Used, - }, nil -} - -// Config return the current config values of the filecoin node. -func (g *Inspector) Config() *config.Config { - return g.repo.Config() -} - -// FilecoinVersion returns the version of go-filecoin. -func (g *Inspector) FilecoinVersion() string { - return flags.GitCommit -} diff --git a/cmd/go-filecoin/inspector_test.go b/cmd/go-filecoin/inspector_test.go deleted file mode 100644 index 2a3b893f44..0000000000 --- a/cmd/go-filecoin/inspector_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package commands_test - -import ( - "runtime" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestRuntime(t *testing.T) { - tf.UnitTest(t) - - mr := repo.NewInMemoryRepo() - g := commands.NewInspectorAPI(mr) - rt := g.Runtime() - - assert.Equal(t, runtime.GOOS, rt.OS) - assert.Equal(t, runtime.GOARCH, rt.Arch) - assert.Equal(t, runtime.Version(), rt.Version) - assert.Equal(t, runtime.Compiler, rt.Compiler) - assert.Equal(t, runtime.NumCPU(), rt.NumProc) - assert.Equal(t, runtime.GOMAXPROCS(0), rt.GoMaxProcs) - assert.Equal(t, runtime.NumCgoCall(), rt.NumCGoCalls) -} - -func TestDisk(t *testing.T) { - tf.UnitTest(t) - - mr := repo.NewInMemoryRepo() - g := commands.NewInspectorAPI(mr) - d, err := g.Disk() - - assert.NoError(t, err) - assert.Equal(t, uint64(0), d.Free) - assert.Equal(t, uint64(0), d.Total) - assert.Equal(t, "0", d.FSType) -} - -func TestMemory(t *testing.T) { - tf.UnitTest(t) - - mr := repo.NewInMemoryRepo() - g := commands.NewInspectorAPI(mr) - - _, err := g.Memory() - assert.NoError(t, err) -} - -func TestConfig(t *testing.T) { - tf.UnitTest(t) - - mr := repo.NewInMemoryRepo() - g := commands.NewInspectorAPI(mr) - c := g.Config() - assert.Equal(t, config.NewDefaultConfig(), c) -} diff --git a/cmd/go-filecoin/leb128.go b/cmd/go-filecoin/leb128.go deleted file mode 100644 index 2de50c0b52..0000000000 --- a/cmd/go-filecoin/leb128.go +++ /dev/null @@ -1,55 +0,0 @@ -package commands - -import ( - "strconv" - - "github.com/filecoin-project/go-leb128" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -var leb128Cmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Leb128 cli encode/decode", - ShortDescription: `Decode and encode leb128 text/uint64.`, - }, - Subcommands: map[string]*cmds.Command{ - "decode": decodeLeb128Cmd, - "encode": encodeLeb128Cmd, - }, -} - -var decodeLeb128Cmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "decode leb128", - ShortDescription: `Decode leb128 text`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("text", true, false, `The leb128 encoded text`), - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - text := req.Arguments[0] - val := leb128.ToUInt64([]byte(text)) - return cmds.EmitOnce(res, val) - }, - Type: uint64(0), -} - -var encodeLeb128Cmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "encode leb128", - ShortDescription: `Encode leb128 uint64`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("number", true, false, `The number to encode`), - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - num, err := strconv.ParseUint(req.Arguments[0], 10, 64) - if err != nil { - return err - } - out := leb128.FromUInt64(num) - return cmds.EmitOnce(res, out) - }, - Type: []byte{}, -} diff --git a/cmd/go-filecoin/leb128_test.go b/cmd/go-filecoin/leb128_test.go deleted file mode 100644 index ebaa119cd3..0000000000 --- a/cmd/go-filecoin/leb128_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestLeb128Decode(t *testing.T) { - tf.IntegrationTest(t) - - decodeTests := []struct { - Text string - Want string - }{ - {"A==", "65"}, - } - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - for _, tt := range decodeTests { - output := cmdClient.RunSuccess(ctx, "leb128", "decode", tt.Text).ReadStdoutTrimNewlines() - - require.Equal(t, tt.Want, output) - } -} - -func TestLeb128Encode(t *testing.T) { - tf.IntegrationTest(t) - - encodeTests := []struct { - Text string - Want string - }{ - {"65", "QQ=="}, - } - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - for _, tt := range encodeTests { - output := cmdClient.RunSuccess(ctx, "leb128", "encode", tt.Text).ReadStdoutTrimNewlines() - - require.Contains(t, output, tt.Want) - } -} diff --git a/cmd/go-filecoin/log.go b/cmd/go-filecoin/log.go deleted file mode 100644 index 04ae89e513..0000000000 --- a/cmd/go-filecoin/log.go +++ /dev/null @@ -1,112 +0,0 @@ -package commands - -import ( - "fmt" - "strings" - - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log/v2" -) - -var loglogger = logging.Logger("commands/log") - -var logCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Interact with the daemon subsystems log output.", - ShortDescription: ` -'go-filecoin log' contains utility commands to affect the subsystems logging -output of a running daemon. -`, - }, - - Subcommands: map[string]*cmds.Command{ - "level": logLevelCmd, - "ls": logLsCmd, - "tail": logTailCmd, - }, -} - -var logTailCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Read subsystems log output.", - ShortDescription: ` -Outputs subsystems log output as it is generated. -`, - }, - - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - r := logging.NewPipeReader() - go func() { - defer r.Close() // nolint: errcheck - <-req.Context.Done() - }() - - return re.Emit(r) - }, -} - -var logLevelCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Change the logging level.", - ShortDescription: ` -Change the verbosity of one or all subsystems log output. This does not affect -the event log. -`, - }, - - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("level", true, false, `The log level, with 'debug' the most verbose and 'panic' the least verbose. - One of: debug, info, warning, error, fatal, panic. - `), - }, - - Options: []cmdkit.Option{ - cmdkit.StringOption("subsystem", "The subsystem logging identifier"), - cmdkit.StringOption("expression", "Subsystem identifier by regular expression"), - }, - - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - level := strings.ToLower(req.Arguments[0]) - - var s string - if subsystem, ok := req.Options["subsystem"].(string); ok { - if err := logging.SetLogLevel(subsystem, level); err != nil { - return err - } - s = fmt.Sprintf("Changed log level of '%s' to '%s'", subsystem, level) - loglogger.Info(s) - } else if expression, ok := req.Options["expression"].(string); ok { - if err := logging.SetLogLevelRegex(expression, level); err != nil { - return err - } - s = fmt.Sprintf("Changed log level matching expression '%s' to '%s'", subsystem, level) - loglogger.Info(s) - } else { - lvl, err := logging.LevelFromString(level) - if err != nil { - return err - } - logging.SetAllLoggers(lvl) - s = fmt.Sprintf("Changed log level of all subsystems to: %s", level) - loglogger.Info(s) - } - - return cmds.EmitOnce(res, s) - }, - Type: string(""), -} - -var logLsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "List the logging subsystems.", - ShortDescription: ` -'go-filecoin log ls' is a utility command used to list the logging -subsystems of a running daemon. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - return cmds.EmitOnce(res, logging.GetSubsystems()) - }, - Type: []string{}, -} diff --git a/cmd/go-filecoin/main.go b/cmd/go-filecoin/main.go deleted file mode 100644 index eec25272ba..0000000000 --- a/cmd/go-filecoin/main.go +++ /dev/null @@ -1,387 +0,0 @@ -package commands - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/url" - "os" - "syscall" - - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/ipfs/go-ipfs-cmds/cli" - cmdhttp "github.com/ipfs/go-ipfs-cmds/http" - ma "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr-net" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -const ( - // OptionAPI is the name of the option for specifying the api port. - OptionAPI = "cmdapiaddr" - - // OptionRepoDir is the name of the option for specifying the directory of the repo. - OptionRepoDir = "repodir" - - // OptionSectorDir is the name of the option for specifying the directory into which staged and sealed sectors will be written. - OptionSectorDir = "sectordir" - - // OptionPresealedSectorDir is the name of the option for specifying the directory from which presealed sectors should be pulled when initializing. - OptionPresealedSectorDir = "presealed-sectordir" - - // OptionDrandConfigAddr is the init option for configuring drand to a given network address at init time - OptionDrandConfigAddr = "drand-config-addr" - - // APIPrefix is the prefix for the http version of the api. - APIPrefix = "/api" - - // OfflineMode tells us if we should try to connect this Filecoin node to the network - OfflineMode = "offline" - - // ELStdout tells the daemon to write event logs to stdout. - ELStdout = "elstdout" - - // AutoSealIntervalSeconds configures the daemon to check for and seal any staged sectors on an interval. - AutoSealIntervalSeconds = "auto-seal-interval-seconds" - - // SwarmAddress is the multiaddr for this Filecoin node - SwarmAddress = "swarmlisten" - - // SwarmPublicRelayAddress is a public address that the filecoin node - // will listen on if it is operating as a relay. We use this to specify - // the public ip:port of a relay node that is sitting behind a static - // NAT mapping. - SwarmPublicRelayAddress = "swarmrelaypublic" - - // BlockTime is the duration string of the block time the daemon will - // run with. TODO: this should eventually be more explicitly grouped - // with testing as we won't be able to set blocktime in production. - BlockTime = "block-time" - - // PropagationDelay is the duration the miner will wait for blocks to arrive before attempting to mine a new one - PropagationDelay = "prop-delay" - - // PeerKeyFile is the path of file containing key to use for new nodes libp2p identity - PeerKeyFile = "peerkeyfile" - - // WalletKeyFile is the path of file containing wallet keys that may be imported on initialization - WalletKeyFile = "wallet-keyfile" - - // MinerActorAddress when set, sets the daemons's miner address to the provided address - MinerActorAddress = "miner-actor-address" - - // GenesisFile is the path of file containing archive of genesis block DAG data - GenesisFile = "genesisfile" - - // Network populates config with network-specific parameters for a known network (e.g. testnet2) - Network = "network" - - // IsRelay when set causes the the daemon to provide libp2p relay - // services allowing other filecoin nodes behind NATs to talk directly. - IsRelay = "is-relay" -) - -func init() { - // add pretty json as an encoding type - cmds.Encoders["pretty-json"] = func(req *cmds.Request) func(io.Writer) cmds.Encoder { - return func(w io.Writer) cmds.Encoder { - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - return enc - } - } -} - -// command object for the local cli -var RootCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "A decentralized storage network", - Subcommands: ` -START RUNNING FILECOIN - go-filecoin init - Initialize a filecoin repo - go-filecoin config [] - Get and set filecoin config values - go-filecoin daemon - Start a long-running daemon process - go-filecoin wallet - Manage your filecoin wallets - go-filecoin address - Interact with addresses - -STORE AND RETRIEVE DATA - go-filecoin client - Make deals, store data, retrieve data - go-filecoin retrieval-client - Manage retrieval client operations - -MINE - go-filecoin miner - Manage a single miner actor - go-filecoin mining - Manage all mining operations for a node - -VIEW DATA STRUCTURES - go-filecoin chain - Inspect the filecoin blockchain - go-filecoin dag - Interact with IPLD DAG objects - go-filecoin deals - Manage deals made by or with this node - go-filecoin show - Get human-readable representations of filecoin objects - -NETWORK COMMANDS - go-filecoin bootstrap - Interact with bootstrap addresses - go-filecoin dht - Interact with the dht - go-filecoin id - Show info about the network peers - go-filecoin ping ... - Send echo request packets to p2p network members - go-filecoin swarm - Interact with the swarm - go-filecoin stats - Monitor statistics on your network usage - go-filecion drand configure - Configure drand server connection - go-filecoin drand random - retrieve drand randomness - -ACTOR COMMANDS - go-filecoin actor - Interact with actors. Actors are built-in smart contracts - go-filecoin paych - Payment channel operations - -MESSAGE COMMANDS - go-filecoin message - Manage messages - go-filecoin mpool - Manage the message pool - go-filecoin outbox - Manage the outbound message queue - -TOOL COMMANDS - go-filecoin inspect - Show info about the go-filecoin node - go-filecoin leb128 - Leb128 cli encode/decode - go-filecoin log - Interact with the daemon event log output - go-filecoin protocol - Show protocol parameter details - go-filecoin version - Show go-filecoin version information -`, - }, - Options: []cmdkit.Option{ - cmdkit.StringOption(OptionAPI, "set the api port to use"), - cmdkit.StringOption(OptionRepoDir, "set the repo directory, defaults to ~/.filecoin/repo"), - cmdkit.StringOption(cmds.EncLong, cmds.EncShort, "The encoding type the output should be encoded with (pretty-json or json)").WithDefault("pretty-json"), - cmdkit.BoolOption("help", "Show the full command help text."), - cmdkit.BoolOption("h", "Show a short version of the command help text."), - }, - Subcommands: make(map[string]*cmds.Command), -} - -// command object for the daemon -var rootCmdDaemon = &cmds.Command{ - Subcommands: make(map[string]*cmds.Command), -} - -// all top level commands, not available to daemon -var rootSubcmdsLocal = map[string]*cmds.Command{ - "daemon": daemonCmd, - "init": initCmd, - "version": versionCmd, - "leb128": leb128Cmd, -} - -// all top level commands, available on daemon. set during init() to avoid configuration loops. -var rootSubcmdsDaemon = map[string]*cmds.Command{ - "actor": actorCmd, - "address": addrsCmd, - "bootstrap": bootstrapCmd, - "chain": chainCmd, - "config": configCmd, - "client": clientCmd, - "drand": drandCmd, - "dag": dagCmd, - "deals": dealsCmd, - "dht": dhtCmd, - "id": idCmd, - "inspect": inspectCmd, - "leb128": leb128Cmd, - "log": logCmd, - "message": msgCmd, - "miner": minerCmd, - "mining": miningCmd, - "mpool": mpoolCmd, - "outbox": outboxCmd, - "ping": pingCmd, - "protocol": protocolCmd, - "retrieval-client": retrievalClientCmd, - "show": showCmd, - "stats": statsCmd, - "swarm": swarmCmd, - "wallet": walletCmd, - "version": versionCmd, -} - -func init() { - for k, v := range rootSubcmdsLocal { - RootCmd.Subcommands[k] = v - } - - for k, v := range rootSubcmdsDaemon { - RootCmd.Subcommands[k] = v - rootCmdDaemon.Subcommands[k] = v - } -} - -// Run processes the arguments and stdin -func Run(ctx context.Context, args []string, stdin, stdout, stderr *os.File) (int, error) { - err := cli.Run(ctx, RootCmd, args, stdin, stdout, stderr, buildEnv, makeExecutor) - if err == nil { - return 0, nil - } - if exerr, ok := err.(cli.ExitError); ok { - return int(exerr), nil - } - return 1, err -} - -func buildEnv(ctx context.Context, _ *cmds.Request) (cmds.Environment, error) { - return NewClientEnv(ctx), nil -} - -type executor struct { - api string - exec cmds.Executor -} - -func (e *executor) Execute(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - if e.api == "" { - return e.exec.Execute(req, re, env) - } - - client := cmdhttp.NewClient(e.api, cmdhttp.ClientWithAPIPrefix(APIPrefix)) - - res, err := client.Send(req) - if err != nil { - if isConnectionRefused(err) { - return cmdkit.Errorf(cmdkit.ErrFatal, "Connection Refused. Is the daemon running?") - } - if cmdKitErr, ok := err.(*cmdkit.Error); ok && cmdKitErr.Code == cmdkit.ErrNormal { - return re.CloseWithError(err) - } - if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() { - return re.CloseWithError(err) - } - return cmdkit.Errorf(cmdkit.ErrFatal, err.Error()) - } - - // copy received result into cli emitter - err = cmds.Copy(re, res) - if err != nil { - return cmdkit.Errorf(cmdkit.ErrFatal|cmdkit.ErrNormal, err.Error()) - } - return nil -} - -func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) { - isDaemonRequired := requiresDaemon(req) - var api string - if isDaemonRequired { - var err error - api, err = getAPIAddress(req) - if err != nil { - return nil, err - } - } - - if api == "" && isDaemonRequired { - return nil, ErrMissingDaemon - } - - return &executor{ - api: api, - exec: cmds.NewExecutor(RootCmd), - }, nil -} - -func getAPIAddress(req *cmds.Request) (string, error) { - var rawAddr string - var err error - // second highest precedence is env vars. - if envapi := os.Getenv("FIL_API"); envapi != "" { - rawAddr = envapi - } - - // first highest precedence is cmd flag. - if apiAddress, ok := req.Options[OptionAPI].(string); ok && apiAddress != "" { - rawAddr = apiAddress - } - - // we will read the api file if no other option is given. - if len(rawAddr) == 0 { - repoDir, _ := req.Options[OptionRepoDir].(string) - repoDir, err = paths.GetRepoPath(repoDir) - if err != nil { - return "", err - } - rawAddr, err = repo.APIAddrFromRepoPath(repoDir) - if err != nil { - return "", errors.Wrap(err, "can't find API endpoint address in environment, command-line, or local repo (is the daemon running?)") - } - } - - maddr, err := ma.NewMultiaddr(rawAddr) - if err != nil { - return "", errors.Wrap(err, fmt.Sprintf("unable to convert API endpoint address %s to a multiaddr", rawAddr)) - } - - _, host, err := manet.DialArgs(maddr) - if err != nil { - return "", errors.Wrap(err, fmt.Sprintf("unable to dial API endpoint address %s", maddr)) - } - - return host, nil -} - -func requiresDaemon(req *cmds.Request) bool { - for cmd := range rootSubcmdsLocal { - if len(req.Path) > 0 && req.Path[0] == cmd { - return false - } - } - return true -} - -func isConnectionRefused(err error) bool { - urlErr, ok := err.(*url.Error) - if !ok { - return false - } - - opErr, ok := urlErr.Err.(*net.OpError) - if !ok { - return false - } - - syscallErr, ok := opErr.Err.(*os.SyscallError) - if !ok { - return false - } - return syscallErr.Err == syscall.ECONNREFUSED -} - -var priceOption = cmdkit.StringOption("gas-price", "Price (FIL e.g. 0.00013) to pay for each GasUnit consumed mining this message") -var limitOption = cmdkit.Int64Option("gas-limit", "Maximum GasUnits this message is allowed to consume") -var previewOption = cmdkit.BoolOption("preview", "Preview the Gas cost of this command without actually executing it") - -func parseGasOptions(req *cmds.Request) (types.AttoFIL, gas.Unit, bool, error) { - priceOption := req.Options["gas-price"] - if priceOption == nil { - return types.ZeroAttoFIL, gas.Zero, false, errors.New("gas-price option is required") - } - - price, ok := types.NewAttoFILFromFILString(priceOption.(string)) - if !ok { - return types.ZeroAttoFIL, gas.NewGas(0), false, errors.New("invalid gas price (specify FIL as a decimal number)") - } - - limitOption := req.Options["gas-limit"] - if limitOption == nil { - return types.ZeroAttoFIL, gas.NewGas(0), false, errors.New("gas-limit option is required") - } - - gasLimitInt, ok := limitOption.(int64) - if !ok { - msg := fmt.Sprintf("invalid gas limit: %s", limitOption) - return types.ZeroAttoFIL, gas.NewGas(0), false, errors.New(msg) - } - - preview, _ := req.Options["preview"].(bool) - - return price, gas.NewGas(gasLimitInt), preview, nil -} diff --git a/cmd/go-filecoin/main_test.go b/cmd/go-filecoin/main_test.go deleted file mode 100644 index 80ee9cd829..0000000000 --- a/cmd/go-filecoin/main_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package commands - -import ( - "context" - "testing" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/stretchr/testify/assert" -) - -func TestRequiresDaemon(t *testing.T) { - tf.UnitTest(t) - - reqWithDaemon, err := cmds.NewRequest(context.Background(), []string{"chain", "head"}, nil, []string{}, nil, rootCmdDaemon) - assert.NoError(t, err) - assert.True(t, requiresDaemon(reqWithDaemon)) - - reqWithoutDaemon, err := cmds.NewRequest(context.Background(), []string{"daemon"}, nil, []string{}, nil, RootCmd) - assert.NoError(t, err) - assert.False(t, requiresDaemon(reqWithoutDaemon)) - - reqSubcmdDaemon, err := cmds.NewRequest(context.Background(), []string{"leb128", "decode"}, nil, []string{"A=="}, nil, RootCmd) - assert.NoError(t, err) - assert.False(t, requiresDaemon(reqSubcmdDaemon)) -} diff --git a/cmd/go-filecoin/message.go b/cmd/go-filecoin/message.go deleted file mode 100644 index 49ec3d06fd..0000000000 --- a/cmd/go-filecoin/message.go +++ /dev/null @@ -1,285 +0,0 @@ -package commands - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -var msgCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Send and monitor messages", - }, - Subcommands: map[string]*cmds.Command{ - "send": msgSendCmd, - "sendsigned": signedMsgSendCmd, - "status": msgStatusCmd, - "wait": msgWaitCmd, - }, -} - -// MessageSendResult is the return type for message send command -type MessageSendResult struct { - Cid cid.Cid - GasUsed gas.Unit - Preview bool -} - -var msgSendCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Send a message", // This feels too generic... - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("target", true, false, "Address of the actor to send the message to"), - cmdkit.StringArg("method", false, false, "The method to invoke on the target actor"), - }, - Options: []cmdkit.Option{ - cmdkit.StringOption("value", "Value to send with message in FIL"), - cmdkit.StringOption("from", "Address to send message from"), - priceOption, - limitOption, - previewOption, - // TODO: (per dignifiedquire) add an option to set the nonce and method explicitly - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - target, err := address.NewFromString(req.Arguments[0]) - if err != nil { - return err - } - - rawVal := req.Options["value"] - if rawVal == nil { - rawVal = "0" - } - val, ok := types.NewAttoFILFromFILString(rawVal.(string)) - if !ok { - return errors.New("mal-formed value") - } - - fromAddr, err := fromAddrOrDefault(req, env) - if err != nil { - return err - } - - gasPrice, gasLimit, preview, err := parseGasOptions(req) - if err != nil { - return err - } - - methodID := builtin.MethodSend - methodInput, ok := req.Options["method"].(uint64) - if ok { - methodID = abi.MethodNum(methodInput) - } - - if preview { - usedGas, err := GetPorcelainAPI(env).MessagePreview( - req.Context, - fromAddr, - target, - methodID, - ) - if err != nil { - return err - } - return re.Emit(&MessageSendResult{ - Cid: cid.Cid{}, - GasUsed: usedGas, - Preview: true, - }) - } - - c, _, err := GetPorcelainAPI(env).MessageSend( - req.Context, - fromAddr, - target, - val, - gasPrice, - gasLimit, - methodID, - adt.Empty, - ) - if err != nil { - return err - } - - return re.Emit(&MessageSendResult{ - Cid: c, - GasUsed: gas.NewGas(0), - Preview: false, - }) - }, - Type: &MessageSendResult{}, -} - -var signedMsgSendCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Send a signed message", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("message", true, false, "Signed Json message"), - }, - Options: []cmdkit.Option{}, - - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - msg := req.Arguments[0] - - m := types.SignedMessage{} - - bmsg := []byte(msg) - err := json.Unmarshal(bmsg, &m) - if err != nil { - return err - } - signed := &m - - c, _, err := GetPorcelainAPI(env).SignedMessageSend( - req.Context, - signed, - ) - if err != nil { - return err - } - - return re.Emit(&MessageSendResult{ - Cid: c, - GasUsed: gas.NewGas(0), - Preview: false, - }) - }, - Type: &MessageSendResult{}, -} - -// WaitResult is the result of a message wait call. -type WaitResult struct { - Message *types.SignedMessage - Receipt *vm.MessageReceipt - Signature vm.ActorMethodSignature -} - -var msgWaitCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Wait for a message to appear in a mined block", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of the message to wait for"), - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption("message", "Print the whole message").WithDefault(true), - cmdkit.BoolOption("receipt", "Print the whole message receipt").WithDefault(true), - cmdkit.BoolOption("return", "Print the return value from the receipt").WithDefault(false), - cmdkit.Uint64Option("lookback", "Number of previous tipsets to be checked before waiting").WithDefault(msg.DefaultMessageWaitLookback), - cmdkit.StringOption("timeout", "Maximum time to wait for message. e.g., 300ms, 1.5h, 2h45m.").WithDefault("10m"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - msgCid, err := cid.Parse(req.Arguments[0]) - if err != nil { - return errors.Wrap(err, "invalid cid "+req.Arguments[0]) - } - - fmt.Printf("waiting for: %s\n", req.Arguments[0]) - - found := false - - timeoutDuration, err := time.ParseDuration(req.Options["timeout"].(string)) - if err != nil { - return errors.Wrap(err, "Invalid timeout string") - } - - lookback, _ := req.Options["lookback"].(uint64) - - ctx, cancel := context.WithTimeout(req.Context, timeoutDuration) - defer cancel() - - err = GetPorcelainAPI(env).MessageWait(ctx, msgCid, lookback, func(blk *block.Block, msg *types.SignedMessage, receipt *vm.MessageReceipt) error { - found = true - sig, err := GetPorcelainAPI(env).ActorGetSignature(req.Context, msg.Message.To, msg.Message.Method) - if err != nil && err != cst.ErrNoMethod && err != cst.ErrNoActorImpl { - return errors.Wrap(err, "Couldn't get signature for message") - } - - res := WaitResult{ - Message: msg, - Receipt: receipt, - // Signature is required to decode the output. - Signature: sig, - } - re.Emit(&res) // nolint: errcheck - - return nil - }) - - if err != nil && !found { - return err - } - return nil - }, - Type: WaitResult{}, -} - -// MessageStatusResult is the status of a message on chain or in the message queue/pool -type MessageStatusResult struct { - InPool bool // Whether the message is found in the mpool - PoolMsg *types.SignedMessage - InOutbox bool // Whether the message is found in the outbox - OutboxMsg *message.Queued - ChainMsg *msg.ChainMessage -} - -var msgStatusCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show status of a message", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of the message to inspect"), - }, - Options: []cmdkit.Option{}, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - msgCid, err := cid.Parse(req.Arguments[0]) - if err != nil { - return errors.Wrap(err, "invalid cid "+req.Arguments[0]) - } - - api := GetPorcelainAPI(env) - result := MessageStatusResult{} - - // Look in message pool - result.PoolMsg, result.InPool = api.MessagePoolGet(msgCid) - - // Look in outbox - for _, addr := range api.OutboxQueues() { - for _, qm := range api.OutboxQueueLs(addr) { - cid, err := qm.Msg.Cid() - if err != nil { - return err - } - if cid.Equals(msgCid) { - result.InOutbox = true - result.OutboxMsg = qm - } - } - } - - return re.Emit(&result) - }, - Type: &MessageStatusResult{}, -} diff --git a/cmd/go-filecoin/message_integration_test.go b/cmd/go-filecoin/message_integration_test.go deleted file mode 100644 index fabc546de8..0000000000 --- a/cmd/go-filecoin/message_integration_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package commands_test - -import ( - "context" - "encoding/json" - "strconv" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestMessageSend(t *testing.T) { - t.Skip("This can be unskipped with fake proofs") - tf.IntegrationTest(t) - ctx := context.Background() - builder := test.NewNodeBuilder(t) - defaultAddr := fortest.TestAddresses[0] - - cs := node.FixtureChainSeed(t) - builder.WithGenesisInit(cs.GenesisInitFunc) - builder.WithConfig(cs.MinerConfigOpt(0)) - builder.WithConfig(node.DefaultAddressConfigOpt(defaultAddr)) - builder.WithInitOpt(cs.KeyInitOpt(1)) - builder.WithInitOpt(cs.KeyInitOpt(0)) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - _, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - from, err := n.PorcelainAPI.WalletDefaultAddress() // this should = fixtures.TestAddresses[0] - require.NoError(t, err) - - t.Log("[failure] invalid target") - cmdClient.RunFail( - ctx, - address.ErrUnknownNetwork.Error(), - "message", "send", - "--from", from.String(), - "--gas-price", "0", "--gas-limit", "300", - "--value=10", "xyz", - ) - - t.Log("[success] with from") - cmdClient.RunSuccess( - ctx, - "message", "send", - "--from", from.String(), - "--gas-price", "1", - "--gas-limit", "300", - fortest.TestAddresses[3].String(), - ) - - t.Log("[success] with from and int value") - cmdClient.RunSuccess( - ctx, - "message", "send", - "--from", from.String(), - "--gas-price", "1", - "--gas-limit", "300", - "--value", "10", - fortest.TestAddresses[3].String(), - ) - - t.Log("[success] with from and decimal value") - cmdClient.RunSuccess( - ctx, - "message", "send", - "--from", from.String(), - "--gas-price", "1", - "--gas-limit", "300", - "--value", "5.5", - fortest.TestAddresses[3].String(), - ) -} - -func TestMessageWait(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - seed, genCfg, fakeClock, chainClock := test.CreateBootstrapSetup(t) - node := test.CreateBootstrapMiner(ctx, t, seed, chainClock, genCfg) - - cmdClient, clientStop := test.RunNodeAPI(ctx, node, t) - defer clientStop() - - t.Run("[success] transfer only", func(t *testing.T) { - var sendResult commands.MessageSendResult - cmdClient.RunMarshaledJSON(ctx, &sendResult, "message", "send", - "--gas-price", "1", - "--gas-limit", "300", - fortest.TestAddresses[1].String(), - ) - - // Fail with timeout before the message has been mined - cmdClient.RunFail( - ctx, - "deadline exceeded", - "message", "wait", - "--message=false", - "--receipt=false", - "--timeout=100ms", - "--return", - sendResult.Cid.String(), - ) - - test.RequireMineOnce(ctx, t, fakeClock, node) - - var waitResult commands.WaitResult - cmdClient.RunMarshaledJSON( - ctx, - &waitResult, - "message", "wait", - "--message=false", - "--receipt=false", - "--timeout=1m", - "--return", - sendResult.Cid.String(), - ) - assert.Equal(t, fortest.TestAddresses[1], waitResult.Message.Message.To) - }) - - t.Run("[success] lookback", func(t *testing.T) { - var sendResult commands.MessageSendResult - cmdClient.RunMarshaledJSON(ctx, &sendResult, "message", "send", - "--from", fortest.TestAddresses[0].String(), - "--gas-price", "1", - "--gas-limit", "300", - fortest.TestAddresses[1].String(), - ) - - // mine 4 times so message is on the chain a few tipsets back - for i := 0; i < 4; i++ { - test.RequireMineOnce(ctx, t, fakeClock, node) - } - - // Fail with timeout because the message is too early for the default lookback (2) - cmdClient.RunFail( - ctx, - "deadline exceeded", - "message", "wait", - "--message=false", - "--receipt=false", - "--timeout=1s", - "--return", - sendResult.Cid.String(), - ) - - // succeed by specifying a higher lookback - var waitResult commands.WaitResult - cmdClient.RunMarshaledJSON( - ctx, - &waitResult, - "message", "wait", - "--message=false", - "--receipt=false", - "--lookback=10", - "--timeout=1m", - "--return", - sendResult.Cid.String(), - ) - }) -} - -func TestMessageSendBlockGasLimit(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("Unskip using fake proofs") - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - defaultAddr := fortest.TestAddresses[0] - - buildWithMiner(t, builder) - builder.WithConfig(node.DefaultAddressConfigOpt(defaultAddr)) - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - doubleTheBlockGasLimit := strconv.Itoa(int(types.BlockGasLimit) * 2) - halfTheBlockGasLimit := strconv.Itoa(int(types.BlockGasLimit) / 2) - result := struct{ Messages types.TxMeta }{} - - t.Run("when the gas limit is above the block limit, the message fails", func(t *testing.T) { - cmdClient.RunFail( - ctx, - "block gas limit", - "message", "send", - "--gas-price", "1", "--gas-limit", doubleTheBlockGasLimit, - "--value=10", fortest.TestAddresses[1].String(), - ) - }) - - t.Run("when the gas limit is below the block limit, the message succeeds", func(t *testing.T) { - cmdClient.RunSuccess( - ctx, - "message", "send", - "--gas-price", "1", "--gas-limit", halfTheBlockGasLimit, - "--value=10", fortest.TestAddresses[1].String(), - ) - - blk, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - blkCid := blk.Cid().String() - - blockInfo := cmdClient.RunSuccess(ctx, "show", "header", blkCid, "--enc", "json").ReadStdoutTrimNewlines() - - require.NoError(t, json.Unmarshal([]byte(blockInfo), &result)) - assert.NotEmpty(t, result.Messages.SecpRoot, "msg under the block gas limit passes validation and is run in the block") - }) -} - -func TestMessageStatus(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("Unskip with fake proofs") - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - buildWithMiner(t, builder) - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - t.Run("queue then on chain", func(t *testing.T) { - msg := cmdClient.RunSuccess( - ctx, - "message", "send", - "--from", fortest.TestAddresses[0].String(), - "--gas-price", "1", "--gas-limit", "300", - "--value=1234", - fortest.TestAddresses[1].String(), - ) - - msgcid := msg.ReadStdoutTrimNewlines() - status := cmdClient.RunSuccess(ctx, "message", "status", msgcid).ReadStdout() - - assert.Contains(t, status, "In outbox") - assert.Contains(t, status, "In mpool") - assert.NotContains(t, status, "On chain") // not found on chain (yet) - assert.Contains(t, status, "1234") // the "value" - - _, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - status = cmdClient.RunSuccess(ctx, "message", "status", msgcid).ReadStdout() - - assert.NotContains(t, status, "In outbox") - assert.NotContains(t, status, "In mpool") - assert.Contains(t, status, "On chain") - assert.Contains(t, status, "1234") // the "value" - - status = cmdClient.RunSuccess(ctx, "message", "status", "QmPVkJMTeRC6iBByPWdrRkD3BE5UXsj5HPzb4kPqL186mS").ReadStdout() - assert.NotContains(t, status, "In outbox") - assert.NotContains(t, status, "In mpool") - assert.NotContains(t, status, "On chain") - }) -} diff --git a/cmd/go-filecoin/miner.go b/cmd/go-filecoin/miner.go deleted file mode 100644 index 4e25fb4081..0000000000 --- a/cmd/go-filecoin/miner.go +++ /dev/null @@ -1,326 +0,0 @@ -package commands - -import ( - "fmt" - "math/big" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - cid "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -var minerCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Manage a single miner actor", - }, - Subcommands: map[string]*cmds.Command{ - "create": minerCreateCmd, - "status": minerStatusCommand, - "set-price": minerSetPriceCmd, - "update-peerid": minerUpdatePeerIDCmd, - "set-worker": minerSetWorkerAddressCmd, - }, -} - -// MinerCreateResult is the type returned when creating a miner. -type MinerCreateResult struct { - Address address.Address - GasUsed gas.Unit - Preview bool -} - -var minerCreateCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Create a new file miner with FIL", - ShortDescription: `Issues a new message to the network to create the miner, then waits for the -message to be mined as this is required to return the address of the new miner. -Collateral will be committed at the rate of 0.001FIL per sector. When the -miner's collateral drops below 0.001FIL, the miner will not be able to commit -additional sectors.`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("collateral", true, false, "The amount of collateral, in FIL."), - }, - Options: []cmdkit.Option{ - cmdkit.StringOption("sectorsize", "size of the sectors which this miner will commit, in bytes"), - cmdkit.StringOption("from", "address to send from"), - cmdkit.StringOption("peerid", "Base58-encoded libp2p peer ID that the miner will operate"), - priceOption, - limitOption, - previewOption, - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - var err error - - sectorSize, err := optionalSectorSizeWithDefault(req.Options["sectorsize"], constants.DevSectorSize) - if err != nil { - return err - } - - sealProofType, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize) - if err != nil { - return err - } - - fromAddr, err := fromAddrOrDefault(req, env) - if err != nil { - return err - } - - var pid peer.ID - peerid := req.Options["peerid"] - if peerid != nil { - pid, err = peer.Decode(peerid.(string)) - if err != nil { - return errors.Wrap(err, "invalid peer id") - } - } - if pid == "" { - pid = GetPorcelainAPI(env).NetworkGetPeerID() - } - - collateral, ok := types.NewAttoFILFromFILString(req.Arguments[0]) - if !ok { - return ErrInvalidCollateral - } - - gasPrice, gasLimit, preview, err := parseGasOptions(req) - if err != nil { - return err - } - - if preview { - usedGas, err := GetPorcelainAPI(env).MinerPreviewCreate( - req.Context, - fromAddr, - sectorSize, - pid, - ) - if err != nil { - return err - } - return re.Emit(&MinerCreateResult{ - Address: address.Undef, - GasUsed: usedGas, - Preview: true, - }) - } - - addr, err := GetPorcelainAPI(env).MinerCreate( - req.Context, - fromAddr, - gasPrice, - gasLimit, - sealProofType, - pid, - collateral, - ) - if err != nil { - return errors.Wrap(err, "Could not create miner. Please consult the documentation to setup your wallet and genesis block correctly") - } - - return re.Emit(&MinerCreateResult{ - Address: addr, - GasUsed: gas.NewGas(0), - Preview: false, - }) - }, - Type: &MinerCreateResult{}, -} - -// MinerSetPriceResult is the return type for miner set-price command -type MinerSetPriceResult struct { - MinerAddress address.Address - Price types.AttoFIL -} - -var minerSetPriceCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Set the minimum price for storage", - ShortDescription: `Sets the mining.minimumPrice in config and creates a new ask for the given price. -This command waits for the ask to be mined.`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("storageprice", true, false, "The new price of storage in FIL per byte per block"), - cmdkit.StringArg("duration", true, false, "How long this ask is valid for in epochs"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - price, ok := types.NewAttoFILFromFILString(req.Arguments[0]) - if !ok { - return ErrInvalidPrice - } - - expiry, ok := big.NewInt(0).SetString(req.Arguments[1], 10) - if !ok { - return fmt.Errorf("expiry must be a valid integer") - } - - err := GetStorageAPI(env).AddAsk(price, abi.ChainEpoch(expiry.Uint64())) - if err != nil { - return err - } - - minerAddr, err := GetBlockAPI(env).MinerAddress() - if err != nil { - return err - } - - return re.Emit(&MinerSetPriceResult{minerAddr, price}) - }, - Type: &MinerSetPriceResult{}, -} - -// MinerUpdatePeerIDResult is the return type for miner update-peerid command -type MinerUpdatePeerIDResult struct { - Cid cid.Cid - GasUsed gas.Unit - Preview bool -} - -var minerUpdatePeerIDCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Change the libp2p identity that a miner is operating", - ShortDescription: `Issues a new message to the network to update the miner's libp2p identity.`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("address", true, false, "Miner address to update peer ID for"), - cmdkit.StringArg("peerid", true, false, "Base58-encoded libp2p peer ID that the miner will operate"), - }, - Options: []cmdkit.Option{ - cmdkit.StringOption("from", "Address to send from"), - priceOption, - limitOption, - previewOption, - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - minerAddr, err := address.NewFromString(req.Arguments[0]) - if err != nil { - return err - } - - fromAddr, err := fromAddrOrDefault(req, env) - if err != nil { - return err - } - - newPid, err := peer.Decode(req.Arguments[1]) - if err != nil { - return err - } - - gasPrice, gasLimit, preview, err := parseGasOptions(req) - if err != nil { - return err - } - - if preview { - usedGas, err := GetPorcelainAPI(env).MessagePreview( - req.Context, - fromAddr, - minerAddr, - builtin.MethodsMiner.ChangePeerID, - newPid, - ) - if err != nil { - return err - } - - return re.Emit(&MinerUpdatePeerIDResult{ - Cid: cid.Cid{}, - GasUsed: usedGas, - Preview: true, - }) - } - - params := miner.ChangePeerIDParams{NewID: newPid} - - c, _, err := GetPorcelainAPI(env).MessageSend( - req.Context, - fromAddr, - minerAddr, - types.ZeroAttoFIL, - gasPrice, - gasLimit, - builtin.MethodsMiner.ChangePeerID, - ¶ms, - ) - if err != nil { - return err - } - - return re.Emit(&MinerUpdatePeerIDResult{ - Cid: c, - GasUsed: gas.NewGas(0), - Preview: false, - }) - }, - Type: &MinerUpdatePeerIDResult{}, -} - -var minerStatusCommand = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Get the status of a miner", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - minerAddr, err := optionalAddr(req.Arguments[0]) - if err != nil { - return err - } - - porcelainAPI := GetPorcelainAPI(env) - status, err := porcelainAPI.MinerGetStatus(req.Context, minerAddr, porcelainAPI.ChainHeadKey()) - if err != nil { - return err - } - return re.Emit(status) - }, - Type: porcelain.MinerStatus{}, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("miner", true, false, "A miner actor address"), - }, -} - -var minerSetWorkerAddressCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Set the address of the miner worker. Returns a message CID", - ShortDescription: "Set the address of the miner worker to the provided address. When a miner is created, this address defaults to the miner owner. Use this command to change the default. Returns a message CID to wait for the message to appear on chain.", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("new-address", true, false, "The address of the new miner worker."), - }, - Options: []cmdkit.Option{ - priceOption, - limitOption, - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - newWorker, err := address.NewFromString(req.Arguments[0]) - if err != nil { - return err - } - - gasPrice, gasLimit, _, err := parseGasOptions(req) - if err != nil { - return err - } - - msgCid, err := GetPorcelainAPI(env).MinerSetWorkerAddress(req.Context, newWorker, gasPrice, gasLimit) - if err != nil { - return err - } - - return re.Emit(msgCid) - }, - Type: cid.Cid{}, -} diff --git a/cmd/go-filecoin/miner_daemon_test.go b/cmd/go-filecoin/miner_daemon_test.go deleted file mode 100644 index 55dcb159bf..0000000000 --- a/cmd/go-filecoin/miner_daemon_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package commands_test - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "math/big" - "strings" - "sync" - "testing" - "time" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - specsbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" - "github.com/filecoin-project/go-filecoin/tools/fast/series" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -func TestMinerHelp(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.IntegrationTest(t) - - t.Run("--help shows general miner help", func(t *testing.T) { - - expected := []string{ - "miner create - Create a new file miner with FIL", - "miner owner - Show the actor address of ", - "miner power - Get the power of a miner versus the total storage market power", - "miner set-price - Set the minimum price for storage", - "miner update-peerid
- Change the libp2p identity that a miner is operating", - } - - result := runHelpSuccess(t, "miner", "--help") - for _, elem := range expected { - assert.Contains(t, result, elem) - } - }) - - t.Run("update-peerid --help shows update-peerid help", func(t *testing.T) { - - result := runHelpSuccess(t, "miner", "update-peerid", "--help") - assert.Contains(t, result, "Issues a new message to the network to update the miner's libp2p identity.") - }) - - t.Run("owner --help shows owner help", func(t *testing.T) { - - result := runHelpSuccess(t, "miner", "owner", "--help") - assert.Contains(t, result, "Given miner address, output the address of the actor that owns the miner.") - }) - - t.Run("create --help shows create help", func(t *testing.T) { - - expected := []string{ - "Issues a new message to the network to create the miner, then waits for the", - "message to be mined as this is required to return the address of the new miner.", - "Collateral will be committed at the rate of 0.001FIL per sector. When the", - "miner's collateral drops below 0.001FIL, the miner will not be able to commit", - "additional sectors.", - } - - result := runHelpSuccess(t, "miner", "create", "--help") - for _, elem := range expected { - assert.Contains(t, result, elem) - } - }) - t.Run("set-worker --help shows set-worker help", func(t *testing.T) { - expected := []string{ - "go-filecoin miner set-worker - Set the address of the miner worker", - "go-filecoin miner set-worker [--gas-price=] [--gas-limit=] [--] ", - " - The address of the new miner worker.", - "--gas-price string - Price (FIL e.g. 0.00013) to pay for each GasUnit consumed mining this message.", - "--gas-limit uint64 - Maximum GasUnits this message is allowed to consume.", - "Set the address of the miner worker to the provided address. When a miner is created, this address defaults to the miner owner. Use this command to change the default.", - } - result := runHelpSuccess(t, "miner", "set-worker", "--help") - for _, elem := range expected { - assert.Contains(t, result, elem) - } - }) - t.Run("worker --help shows worker help", func(t *testing.T) { - result := runHelpSuccess(t, "miner", "worker", "--help") - assert.Contains(t, result, "go-filecoin miner worker - Show the address of the miner worker") - }) -} - -func runHelpSuccess(t *testing.T, args ...string) string { - fi, err := ioutil.TempFile("", "gengentest") - if err != nil { - t.Fatal(err) - } - - if _, err = gengen.GenGenesisCar(minerDaemonTestConfig(t), fi); err != nil { - t.Fatal(err) - } - - _ = fi.Close() - d := th.NewDaemon(t, th.GenesisFile(fi.Name())).Start() - defer d.ShutdownSuccess() - - op := d.RunSuccess(args...) - return op.ReadStdoutTrimNewlines() -} - -func TestMinerCreate(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.IntegrationTest(t) - - testAddr := fortest.TestAddresses[2] - t.Run("success", func(t *testing.T) { - - var err error - var addr address.Address - - tf := func(fromAddress address.Address, pid peer.ID) { - d1 := makeTestDaemonWithMinerAndStart(t) - defer d1.ShutdownSuccess() - - d := th.NewDaemon(t, th.KeyFile(fortest.KeyFilePaths()[2])).Start() - defer d.ShutdownSuccess() - - d1.ConnectSuccess(d) - - args := []string{"miner", "create", "--from", fromAddress.String(), "--gas-price", "1", "--gas-limit", "300"} - - if pid.Pretty() != peer.ID("").Pretty() { - args = append(args, "--peerid", pid.Pretty()) - } - - collateral := specsbig.Mul(specsbig.NewInt(int64(1000000*constants.DevSectorSize)), abi.NewTokenAmount(10)) - args = append(args, collateral.String()) - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - miner := d.RunSuccess(args...) - addr, err = address.NewFromString(strings.Trim(miner.ReadStdout(), "\n")) - assert.NoError(t, err) - assert.NotEqual(t, addr, address.Undef) - wg.Done() - }() - - // ensure mining runs after the command in our goroutine - d1.MineAndPropagate(time.Second, d) - wg.Wait() - - // expect address to have been written in config - config := d.RunSuccess("config mining.minerAddress") - assert.Contains(t, config.ReadStdout(), addr.String()) - } - - tf(testAddr, peer.ID("")) - - // Will accept a peer ID if one is provided - tf(testAddr, th.RequireRandomPeerID(t)) - }) - - t.Run("unsupported sector size", func(t *testing.T) { - d := th.NewDaemon(t).Start() - defer d.ShutdownSuccess() - - d.CreateAddress() - - d.RunFail("unsupported sector size", - "miner", "create", "20", - "--sectorsize", "42", - ) - }) - - t.Run("validation failure", func(t *testing.T) { - - d := th.NewDaemon(t).Start() - defer d.ShutdownSuccess() - - d.CreateAddress() - - d.RunFail("invalid sector size", - "miner", "create", "20", - "--sectorsize", "ninetybillion", - ) - d.RunFail("invalid peer id", - "miner", "create", - "--from", testAddr.String(), "--gas-price", "1", "--gas-limit", "300", "--peerid", "flarp", "20", - ) - d.RunFail("invalid from address", - "miner", "create", - "--from", "hello", "--gas-price", "1", "--gas-limit", "1000000", "20", - ) - d.RunFail("invalid collateral", - "miner", "create", - "--from", testAddr.String(), "--gas-price", "1", "--gas-limit", "100", "2f", - ) - }) -} - -func TestMinerSetPrice(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.IntegrationTest(t) - - d1 := th.NewDaemon(t, - th.WithMiner(fortest.TestMiners[0]), - th.KeyFile(fortest.KeyFilePaths()[0]), - th.DefaultAddress(fortest.TestAddresses[0])).Start() - defer d1.ShutdownSuccess() - - d1.RunSuccess("mining", "start") - - setPrice := d1.RunSuccess("miner", "set-price", "62", "6", "--gas-price", "1", "--gas-limit", "300") - assert.Contains(t, setPrice.ReadStdoutTrimNewlines(), fmt.Sprintf("Set price for miner %s to 62.", fortest.TestMiners[0])) - - configuredPrice := d1.RunSuccess("config", "mining.storagePrice") - - assert.Equal(t, `"62"`, configuredPrice.ReadStdoutTrimNewlines()) -} - -func TestMinerCreateSuccess(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.IntegrationTest(t) - - ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fast.FilecoinOpts{}) - defer func() { - require.NoError(t, env.Teardown(ctx)) - }() - - minerNode := env.RequireNewNodeWithFunds(1000) - - series.CtxMiningNext(ctx, 1) - minerAddress := requireMinerCreate(ctx, t, env, minerNode) - - assert.NotEqual(t, address.Undef, minerAddress) -} - -func requireMinerCreate(ctx context.Context, t *testing.T, env *fastesting.TestEnvironment, minerNode *fast.Filecoin) address.Address { - - pparams, err := minerNode.Protocol(ctx) - require.NoError(t, err) - - sinfo := pparams.SupportedSectors[0] - - minerAddress, err := minerNode.MinerCreate(ctx, big.NewInt(1), fast.AOSectorSize(sinfo.Size), fast.AOPrice(big.NewFloat(1.0)), fast.AOLimit(300)) - require.NoError(t, err) - - return minerAddress -} - -func TestMinerCreateChargesGas(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.IntegrationTest(t) - t.Skip("new runtime") - - // miningMinerOwnerAddr := fixtures.TestAddresses[0] - - // d1 := makeTestDaemonWithMinerAndStart(t) - // defer d1.ShutdownSuccess() - // d := th.NewDaemon(t, th.KeyFile(fixtures.KeyFilePaths()[2])).Start() - // defer d.ShutdownSuccess() - // d1.ConnectSuccess(d) - // var wg sync.WaitGroup - - // // make sure the FIL shows up in the MinerOwnerAccount - // startingBalance := queryBalance(t, d, miningMinerOwnerAddr) - - // wg.Add(1) - // go func() { - // testMiner := d.RunSuccess("miner", "create", "--from", fixtures.TestAddresses[2], "--gas-price", "333", "--gas-limit", "100", "200") - // addr, err := address.NewFromString(strings.Trim(testMiner.ReadStdout(), "\n")) - // assert.NoError(t, err) - // assert.NotEqual(t, addr, address.Undef) - // wg.Done() - // }() - // // ensure mining runs after the command in our goroutine - // d1.MineAndPropagate(time.Second, d) - // wg.Wait() - - // expectedBlockReward := consensus.NewDefaultBlockRewarder().BlockRewardAmount() - // expectedPrice := types.NewAttoFILFromFIL(333) - // expectedGasCost := big.NewInt(100) - // expectedBalance := expectedBlockReward.Add(expectedPrice.MulBigInt(expectedGasCost)) - // newBalance := queryBalance(t, d, miningMinerOwnerAddr) - // assert.Equal(t, expectedBalance.String(), newBalance.Sub(startingBalance).String()) -} - -// func queryBalance(t *testing.T, d *th.TestDaemon, actorAddr address.Address) types.AttoFIL { -// output := d.RunSuccess("actor", "ls", "--enc", "json") -// result := output.ReadStdoutTrimNewlines() -// for _, line := range bytes.Split([]byte(result), []byte{'\n'}) { -// var a commands.ActorView -// err := json.Unmarshal(line, &a) -// require.NoError(t, err) -// if a.Address == actorAddr.String() { -// return a.Balance -// } -// } -// t.Fail() -// return types.ZeroAttoFIL -// } - -func TestMinerStatus(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.IntegrationTest(t) - - fi, err := ioutil.TempFile("", "gengentest") - if err != nil { - t.Fatal(err) - } - - if _, err = gengen.GenGenesisCar(minerDaemonTestConfig(t), fi); err != nil { - t.Fatal(err) - } - - _ = fi.Close() - - d := th.NewDaemon(t, th.GenesisFile(fi.Name())).Start() - defer d.ShutdownSuccess() - - actorLsOutput := d.RunSuccess("actor", "ls") - - scanner := bufio.NewScanner(strings.NewReader(actorLsOutput.ReadStdout())) - var addressStruct struct{ Address string } - - for scanner.Scan() { - line := scanner.Text() - if strings.Contains(line, "MinerActor") { - err = json.Unmarshal([]byte(line), &addressStruct) - assert.NoError(t, err) - break - } - } - - powerOutput := d.RunSuccess("miner", "status", addressStruct.Address) - - power := powerOutput.ReadStdoutTrimNewlines() - - assert.NoError(t, err) - assert.Equal(t, "3072 / 6144", power) -} - -func minerDaemonTestConfig(t *testing.T) *gengen.GenesisCfg { - - commCfgs, err := gengen.MakeCommitCfgs(3) - require.NoError(t, err) - return &gengen.GenesisCfg{ - Seed: 0, - KeysToGen: 4, - PreallocatedFunds: []string{ - "0", - "0", - "10", - "50", - }, - Miners: []*gengen.CreateStorageMinerConfig{ - { - Owner: 0, - CommittedSectors: commCfgs, - SealProofType: constants.DevSealProofType, - }, - { - Owner: 1, - CommittedSectors: commCfgs, - SealProofType: constants.DevSealProofType, - }, - }, - Network: "gfctest", - Time: 123456789, - } -} - -func TestMinerSetWorker(t *testing.T) { - t.Skip("Long term solution: #3642") - - tf.IntegrationTest(t) - ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fast.FilecoinOpts{}) - defer func() { - require.NoError(t, env.Teardown(ctx)) - }() - - minerNode := env.RequireNewNodeWithFunds(1000) - newAddr := vmaddr.NewForTestGetter()() - - t.Run("fails if there is no miner worker", func(t *testing.T) { - _, err := minerNode.MinerSetWorker(ctx, newAddr, fast.AOPrice(big.NewFloat(1.0)), fast.AOLimit(300)) - require.NotNil(t, err) - - series.CtxMiningOnce(ctx) - - lastErr, err := minerNode.LastCmdStdErrStr() - require.NoError(t, err) - assert.Contains(t, lastErr, "actor not found") - }) - - t.Run("succceeds if there is a miner", func(t *testing.T) { - series.CtxMiningNext(ctx, 1) - minerAddr := requireMinerCreate(ctx, t, env, minerNode) - - msgCid, err := minerNode.MinerSetWorker(ctx, newAddr, fast.AOPrice(big.NewFloat(1.0)), fast.AOLimit(300)) - require.NoError(t, err) - - series.CtxMiningOnce(ctx) - - resp, err := minerNode.MessageWait(ctx, msgCid) - require.NoError(t, err) - require.Equal(t, 0, int(resp.Receipt.ExitCode)) - - res2, err := minerNode.MinerStatus(ctx, minerAddr) - require.NoError(t, err) - - assert.Equal(t, newAddr, res2.WorkerAddress) - }) -} diff --git a/cmd/go-filecoin/miner_integration_test.go b/cmd/go-filecoin/miner_integration_test.go deleted file mode 100644 index 61c64e93c5..0000000000 --- a/cmd/go-filecoin/miner_integration_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestMinerCreateIntegration(t *testing.T) { - tf.IntegrationTest(t) - - ctx, cancel1 := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel1() - - nodes, cancel2 := test.MustCreateNodesWithBootstrap(ctx, t, 1) - defer cancel2() - - newMiner := nodes[1] - - env := commands.CreateServerEnv(ctx, newMiner) - porcelainAPI := commands.GetPorcelainAPI(env) - - defaultAddr := newMiner.Repo.Config().Wallet.DefaultAddress - peer := newMiner.Network().Network.GetPeerID() - - minerAddr, err := porcelainAPI.MinerCreate(ctx, defaultAddr, types.NewAttoFILFromFIL(1), 10000, abi.RegisteredProof_StackedDRG2KiBSeal, peer, types.NewAttoFILFromFIL(1)) - require.NoError(t, err) - - // inspect results on chain - tsk := newMiner.Chain().ChainReader.GetHead() - view, err := newMiner.Chain().ActorState.StateView(tsk) - require.NoError(t, err) - owner, _, err := view.MinerControlAddresses(ctx, minerAddr) - require.NoError(t, err) - - resolvedDefaultAddress, err := view.InitResolveAddress(ctx, defaultAddr) - require.NoError(t, err) - - assert.Equal(t, resolvedDefaultAddress, owner) -} - -func TestSetPrice(t *testing.T) { - tf.IntegrationTest(t) - - ctx, cancel1 := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel1() - - nodes, cancel2 := test.MustCreateNodesWithBootstrap(ctx, t, 0) - defer cancel2() - - env := commands.CreateServerEnv(ctx, nodes[0]) - - err := commands.GetStorageAPI(env).AddAsk(abi.NewTokenAmount(1000), abi.ChainEpoch(400)) - require.NoError(t, err) - - minerAddr, err := commands.GetBlockAPI(env).MinerAddress() - require.NoError(t, err) - - asks, err := commands.GetStorageAPI(env).ListAsks(minerAddr) - require.NoError(t, err) - require.Len(t, asks, 1) - assert.Equal(t, abi.NewTokenAmount(1000), asks[0].Ask.Price) - assert.Equal(t, abi.ChainEpoch(400), asks[0].Ask.Expiry) -} diff --git a/cmd/go-filecoin/mining.go b/cmd/go-filecoin/mining.go deleted file mode 100644 index 07889549fd..0000000000 --- a/cmd/go-filecoin/mining.go +++ /dev/null @@ -1,128 +0,0 @@ -package commands - -import ( - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -var miningCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Manage all mining operations for a node", - }, - Subcommands: map[string]*cmds.Command{ - "address": miningAddrCmd, - "once": miningOnceCmd, - "start": miningStartCmd, - "status": miningStatusCmd, - "stop": miningStopCmd, - "setup": miningSetupCmd, - "pledge-sector": miningPledgeSectorCmd, - }, -} - -var miningAddrCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Retrieve address of miner actor associated with this node", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - minerAddress, err := GetBlockAPI(env).MinerAddress() - if err != nil { - return err - } - return re.Emit(minerAddress.String()) - }, - Type: address.Address{}, -} - -var miningOnceCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Mine a single block", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - blk, err := GetBlockAPI(env).MiningOnce(req.Context) - if err != nil { - return err - } - return re.Emit(blk.Cid()) - }, - Type: cid.Cid{}, -} - -var miningSetupCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Prepare node to receive storage deals without starting the mining scheduler", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - if err := GetBlockAPI(env).MiningSetup(req.Context); err != nil { - return err - } - return re.Emit("mining ready") - }, - Type: "", -} - -var miningStartCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Start mining blocks and other mining related operations", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - if err := GetBlockAPI(env).MiningStart(req.Context); err != nil { - return err - } - return re.Emit("Started mining") - }, - Type: "", -} - -// MiningStatusResult is the type returned when get mining status. -type MiningStatusResult struct { - Miner address.Address `json:"minerAddress"` - Active bool `json:"active"` -} - -var miningStatusCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Report on mining status", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - isMining := GetBlockAPI(env).MiningIsActive() - - // Get the Miner Address - minerAddress, err := GetBlockAPI(env).MinerAddress() - if err != nil { - return err - } - - return re.Emit(&MiningStatusResult{ - Miner: minerAddress, - Active: isMining, - }) - }, - Type: &MiningStatusResult{}, -} - -var miningStopCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Stop block mining", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - GetBlockAPI(env).MiningStop(req.Context) - return re.Emit("Stopped mining") - }, -} - -var miningPledgeSectorCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Pledge an empty sector immediately", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - err := GetStorageAPI(env).PledgeSector(req.Context) - if err != nil { - return err - } - return re.Emit("Sector pledged") - }, - Type: "", -} diff --git a/cmd/go-filecoin/mining_integration_test.go b/cmd/go-filecoin/mining_integration_test.go deleted file mode 100644 index 491b47b6ac..0000000000 --- a/cmd/go-filecoin/mining_integration_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package commands_test - -import ( - "context" - "math/big" - "testing" - "time" - - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - specsbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" - "github.com/filecoin-project/go-filecoin/tools/fast/series" -) - -func TestMiningGenBlock(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("Unskip with fake proofs") - ctx := context.Background() - builder := test.NewNodeBuilder(t) - buildWithMiner(t, builder) - - n := builder.BuildAndStart(ctx) - defer n.Stop(ctx) - - addr := fortest.TestAddresses[0] - - attoFILBefore, err := n.PorcelainAPI.WalletBalance(ctx, addr) - require.NoError(t, err) - - _, err = n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - attoFILAfter, err := n.PorcelainAPI.WalletBalance(ctx, addr) - require.NoError(t, err) - - assert.Equal(t, specsbig.Add(attoFILBefore, types.NewAttoTokenFromToken(1000)), attoFILAfter) -} - -func TestMiningAddPieceAndSealNow(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.FunctionalTest(t) - - ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fast.FilecoinOpts{ - InitOpts: []fast.ProcessInitOption{fast.POAutoSealIntervalSeconds(1)}, - DaemonOpts: []fast.ProcessDaemonOption{fast.POBlockTime(50 * time.Millisecond)}, - }) - defer func() { - require.NoError(t, env.Teardown(ctx)) - }() - - genesisNode := env.GenesisMiner - - minerNode := env.RequireNewNodeWithFunds(1000) - - // Connect the clientNode and the minerNode - require.NoError(t, series.Connect(ctx, genesisNode, minerNode)) - - pparams, err := minerNode.Protocol(ctx) - require.NoError(t, err) - - sinfo := pparams.SupportedSectors[0] - - // start mining so we get to a block height that - require.NoError(t, genesisNode.MiningStart(ctx)) - defer func() { - require.NoError(t, genesisNode.MiningStop(ctx)) - }() - - _, err = series.CreateStorageMinerWithAsk(ctx, minerNode, big.NewInt(500), big.NewFloat(0.0001), big.NewInt(3000), sinfo.Size) - require.NoError(t, err) - - // get address of miner so we can check power - miningAddress, err := minerNode.MiningAddress(ctx) - require.NoError(t, err) - - // start mining for miner node to seal and schedule PoSting - require.NoError(t, minerNode.MiningStart(ctx)) - defer func() { - require.NoError(t, minerNode.MiningStop(ctx)) - }() - - // add a piece - //_, err = minerNode.AddPiece(ctx, files.NewBytesFile([]byte("HODL"))) - //require.NoError(t, err) - - // start sealing - err = minerNode.SealNow(ctx) - require.NoError(t, err) - - // We know the miner has sealed and committed a sector if their power increases on chain. - // Wait up to 300 seconds for that to happen. - for i := 0; i < 300; i++ { - power, err := minerNode.MinerStatus(ctx, miningAddress) - require.NoError(t, err) - - if power.QualityAdjustedPower.GreaterThan(fbig.Zero()) { - // miner has gained power, so seal was successful - return - } - time.Sleep(time.Second) - } - assert.Fail(t, "timed out waiting for miner to gain power from sealing") -} diff --git a/cmd/go-filecoin/mpool.go b/cmd/go-filecoin/mpool.go deleted file mode 100644 index 4d539b77dc..0000000000 --- a/cmd/go-filecoin/mpool.go +++ /dev/null @@ -1,84 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var mpoolCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Manage the message pool", - }, - Subcommands: map[string]*cmds.Command{ - "ls": mpoolLsCmd, - "show": mpoolShowCmd, - "rm": mpoolRemoveCmd, - }, -} - -var mpoolLsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "View the pool of outstanding messages", - }, - Options: []cmdkit.Option{ - cmdkit.UintOption("wait-for-count", "Block until this number of messages are in the pool").WithDefault(0), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - messageCount, _ := req.Options["wait-for-count"].(uint) - - pending, err := GetPorcelainAPI(env).MessagePoolWait(req.Context, messageCount) - if err != nil { - return err - } - - return re.Emit(pending) - }, - Type: []*types.SignedMessage{}, -} - -var mpoolShowCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show content of an outstanding message", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "The CID of the message to show"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - msgCid, err := cid.Parse(req.Arguments[0]) - if err != nil { - return errors.Wrap(err, "invalid message cid") - } - - msg, ok := GetPorcelainAPI(env).MessagePoolGet(msgCid) - if !ok { - return fmt.Errorf("message %s not found in pool (already mined?)", msgCid) - } - return re.Emit(msg) - }, - Type: &types.SignedMessage{}, -} - -var mpoolRemoveCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Delete a message from the message pool", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "The CID of the message to delete"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - msgCid, err := cid.Parse(req.Arguments[0]) - if err != nil { - return errors.Wrap(err, "invalid message cid") - } - - GetPorcelainAPI(env).MessagePoolRemove(msgCid) - - return nil - }, -} diff --git a/cmd/go-filecoin/mpool_integration_test.go b/cmd/go-filecoin/mpool_integration_test.go deleted file mode 100644 index 0f6e3befb1..0000000000 --- a/cmd/go-filecoin/mpool_integration_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package commands_test - -import ( - "context" - "sync" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestMpoolLs(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("hangs") - - sendMessage := func(ctx context.Context, cmdClient *test.Client, from address.Address, to address.Address) *th.CmdOutput { - return cmdClient.RunSuccess(ctx, "message", "send", - "--from", from.String(), - "--gas-price", "1", "--gas-limit", "300", - "--value=10", to.String(), - ) - } - cs := node.FixtureChainSeed(t) - - t.Run("return all messages", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(cs.KeyInitOpt(0)) - builder.WithGenesisInit(cs.GenesisInitFunc) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[2]) - sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[2]) - - cids := cmdClient.RunSuccessLines(ctx, "mpool", "ls") - - assert.Equal(t, 2, len(cids)) - - for _, c := range cids { - ci, err := cid.Decode(c) - assert.NoError(t, err) - assert.True(t, ci.Defined()) - } - - // Should return immediately with --wait-for-count equal to message count - cids = cmdClient.RunSuccessLines(ctx, "mpool", "ls", "--wait-for-count=2") - assert.Equal(t, 2, len(cids)) - }) - - t.Run("wait for enough messages", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(cs.KeyInitOpt(0)) - builder.WithGenesisInit(cs.GenesisInitFunc) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - wg := sync.WaitGroup{} - wg.Add(1) - - complete := false - go func() { - c := cmdClient.RunSuccessLines(ctx, "mpool", "ls", "--wait-for-count=3") - complete = true - assert.Equal(t, 3, len(c)) - wg.Done() - }() - - sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[1]) - assert.False(t, complete) - sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[1]) - assert.False(t, complete) - sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[1]) - - wg.Wait() - - assert.True(t, complete) - }) -} - -func TestMpoolShow(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("hangs") - cs := node.FixtureChainSeed(t) - - t.Run("shows message", func(t *testing.T) { - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(cs.KeyInitOpt(0)) - builder.WithGenesisInit(cs.GenesisInitFunc) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - msgCid := cmdClient.RunSuccess(ctx, "message", "send", - "--from", fortest.TestAddresses[0].String(), - "--gas-price", "1", "--gas-limit", "300", - "--value=10", fortest.TestAddresses[2].String(), - ).ReadStdoutTrimNewlines() - - out := cmdClient.RunSuccess(ctx, "mpool", "show", msgCid).ReadStdoutTrimNewlines() - - assert.Contains(t, out, "From: "+fortest.TestAddresses[0].String()) - assert.Contains(t, out, "To: "+fortest.TestAddresses[2].String()) - assert.Contains(t, out, "Value: 10") - }) - - t.Run("fails missing message", func(t *testing.T) { - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(cs.KeyInitOpt(0)) - builder.WithGenesisInit(cs.GenesisInitFunc) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - const c = "QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw" - - out := cmdClient.RunFail(ctx, "not found", "mpool", "show", c).ReadStderr() - assert.Contains(t, out, c) - }) -} - -func TestMpoolRm(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("hangs") - - t.Run("remove a message", func(t *testing.T) { - cs := node.FixtureChainSeed(t) - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(cs.KeyInitOpt(0)) - builder.WithGenesisInit(cs.GenesisInitFunc) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - msgCid := cmdClient.RunSuccess(ctx, "message", "send", - "--from", fortest.TestAddresses[0].String(), - "--gas-price", "1", "--gas-limit", "300", - "--value=10", fortest.TestAddresses[2].String(), - ).ReadStdoutTrimNewlines() - - // wait for the pool to have the message - _, err := n.PorcelainAPI.MessagePoolWait(ctx, 1) - require.NoError(t, err) - - // remove message in process so the following ls cannot race on lock - // acquire - c, err := cid.Parse(msgCid) - require.NoError(t, err) - n.PorcelainAPI.MessagePoolRemove(c) - - out := cmdClient.RunSuccess(ctx, "mpool", "ls").ReadStdoutTrimNewlines() - - assert.Equal(t, "", out) - }) -} diff --git a/cmd/go-filecoin/outbox.go b/cmd/go-filecoin/outbox.go deleted file mode 100644 index 16aa88c224..0000000000 --- a/cmd/go-filecoin/outbox.go +++ /dev/null @@ -1,85 +0,0 @@ -package commands - -import ( - "github.com/filecoin-project/go-address" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - - "github.com/filecoin-project/go-filecoin/internal/pkg/message" -) - -var outboxCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "View and manipulate the outbound message queue", - }, - Subcommands: map[string]*cmds.Command{ - "clear": outboxClearCmd, - "ls": outboxLsCmd, - }, -} - -// OutboxLsResult is a listing of the outbox for a single address. -type OutboxLsResult struct { - Address address.Address - Messages []*message.Queued -} - -var outboxLsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "List the queue(s) of sent but un-mined messages", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("address", false, false, "Address of the queue to list (otherwise lists all)"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addresses, err := queueAddressesFromArg(req, env, 0) - if err != nil { - return err - } - - for _, addr := range addresses { - msgs := GetPorcelainAPI(env).OutboxQueueLs(addr) - err := re.Emit(OutboxLsResult{addr, msgs}) - if err != nil { - return err - } - } - return nil - }, - Type: OutboxLsResult{}, -} - -var outboxClearCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Clear the queue(s) of sent messages", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("address", false, false, "Address of the queue to clear (otherwise clears all)"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - addresses, err := queueAddressesFromArg(req, env, 0) - if err != nil { - return err - } - - for _, addr := range addresses { - GetPorcelainAPI(env).OutboxQueueClear(req.Context, addr) - } - return nil - }, -} - -// Reads an address from an argument, or lists addresses of all outbox queues if no arg is given. -func queueAddressesFromArg(req *cmds.Request, env cmds.Environment, argIndex int) ([]address.Address, error) { - var addresses []address.Address - if len(req.Arguments) > argIndex { - addr, e := address.NewFromString(req.Arguments[argIndex]) - if e != nil { - return nil, e - } - addresses = []address.Address{addr} - } else { - addresses = GetPorcelainAPI(env).OutboxQueues() - } - return addresses, nil -} diff --git a/cmd/go-filecoin/outbox_integration_test.go b/cmd/go-filecoin/outbox_integration_test.go deleted file mode 100644 index 6066079f36..0000000000 --- a/cmd/go-filecoin/outbox_integration_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestOutbox(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("not working") - - sendMessage := func(ctx context.Context, cmdClient *test.Client, from address.Address, to address.Address) *th.CmdOutput { - return cmdClient.RunSuccess(ctx, "message", "send", - "--from", from.String(), - "--gas-price", "1", "--gas-limit", "300", - "--value=10", to.String(), - ) - } - cs := node.FixtureChainSeed(t) - - t.Run("list queued messages", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(cs.KeyInitOpt(0)) - builder.WithInitOpt(cs.KeyInitOpt(1)) - builder.WithGenesisInit(cs.GenesisInitFunc) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - c1 := sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[2]).ReadStdoutTrimNewlines() - c2 := sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[2]).ReadStdoutTrimNewlines() - c3 := sendMessage(ctx, cmdClient, fortest.TestAddresses[1], fortest.TestAddresses[2]).ReadStdoutTrimNewlines() - - out := cmdClient.RunSuccess(ctx, "outbox", "ls").ReadStdout() - assert.Contains(t, out, fortest.TestAddresses[0]) - assert.Contains(t, out, fortest.TestAddresses[1]) - assert.Contains(t, out, c1) - assert.Contains(t, out, c2) - assert.Contains(t, out, c3) - - // With address filter - out = cmdClient.RunSuccess(ctx, "outbox", "ls", fortest.TestAddresses[1].String()).ReadStdout() - assert.NotContains(t, out, fortest.TestAddresses[0]) - assert.Contains(t, out, fortest.TestAddresses[1]) - assert.NotContains(t, out, c1) - assert.NotContains(t, out, c2) - assert.Contains(t, out, c3) - }) - - t.Run("clear queue", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(cs.KeyInitOpt(0)) - builder.WithInitOpt(cs.KeyInitOpt(1)) - builder.WithGenesisInit(cs.GenesisInitFunc) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - c1 := sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[2]).ReadStdoutTrimNewlines() - c2 := sendMessage(ctx, cmdClient, fortest.TestAddresses[0], fortest.TestAddresses[2]).ReadStdoutTrimNewlines() - c3 := sendMessage(ctx, cmdClient, fortest.TestAddresses[1], fortest.TestAddresses[2]).ReadStdoutTrimNewlines() - - // With address filter - cmdClient.RunSuccess(ctx, "outbox", "clear", fortest.TestAddresses[1].String()) - out := cmdClient.RunSuccess(ctx, "outbox", "ls").ReadStdout() - assert.Contains(t, out, fortest.TestAddresses[0]) - assert.NotContains(t, out, fortest.TestAddresses[1]) // Cleared - assert.Contains(t, out, c1) - assert.Contains(t, out, c2) - assert.NotContains(t, out, c3) // cleared - - // Repopulate - sendMessage(ctx, cmdClient, fortest.TestAddresses[1], fortest.TestAddresses[2]).ReadStdoutTrimNewlines() - - // #nofilter - cmdClient.RunSuccess(ctx, "outbox", "clear") - out = cmdClient.RunSuccess(ctx, "outbox", "ls").ReadStdoutTrimNewlines() - assert.Empty(t, out) - - // Clearing empty queue - cmdClient.RunSuccess(ctx, "outbox", "clear") - out = cmdClient.RunSuccess(ctx, "outbox", "ls").ReadStdoutTrimNewlines() - assert.Empty(t, out) - }) -} diff --git a/cmd/go-filecoin/ping.go b/cmd/go-filecoin/ping.go deleted file mode 100644 index 82bb5a2ac1..0000000000 --- a/cmd/go-filecoin/ping.go +++ /dev/null @@ -1,74 +0,0 @@ -package commands - -import ( - "fmt" - "time" - - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - peer "github.com/libp2p/go-libp2p-core/peer" - - //"github.com/libp2p/go-libp2p/p2p/protocol/ping" - "github.com/pkg/errors" -) - -// PingResult is a wrapper for the pongs returned from the ping channel. It -// contains 2 fields: Count and Time. -// -// * Count is a uint representing the index of pongs returned so far. -// * Time is the amount of time elapsed from ping to pong in seconds. -// -type PingResult struct { - Count uint - RTT time.Duration -} - -var pingCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Send echo request packets to p2p network members", - ShortDescription: ` -'ping' is a tool to test sending data to other nodes. It finds nodes -via the routing system, sends pings, waits for pongs, and prints out round- -trip latency information. - `, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("peer ID", true, false, "ID of peer to be pinged").EnableStdin(), - }, - Options: []cmdkit.Option{ - cmdkit.UintOption("count", "c", "Number of ping messages to send").WithDefault(0), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - peerID, err := peer.Decode(req.Arguments[0]) - if err != nil { - return fmt.Errorf("failed to parse peer address '%s': %s", req.Arguments[0], err) - } - - numPings, _ := req.Options["count"].(uint) - - pingCh, err := GetPorcelainAPI(env).NetworkPing(req.Context, peerID) - if err != nil { - return err - } - - for i := uint(0); numPings == 0 || i < numPings; i++ { - pong, pingChOpen := <-pingCh - if pong.Error != nil { - return pong.Error - } - result := &PingResult{ - Count: i, - RTT: pong.RTT, - } - if err := re.Emit(result); err != nil { - return err - } - if !pingChOpen { - return errors.New("Ping channel closed by sender") - } - } - - return nil - }, - Type: PingResult{}, -} diff --git a/cmd/go-filecoin/ping_integration_test.go b/cmd/go-filecoin/ping_integration_test.go deleted file mode 100644 index 2410efedc4..0000000000 --- a/cmd/go-filecoin/ping_integration_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestPing2Nodes(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - n1 := builder.BuildAndStart(ctx) - n2 := builder.BuildAndStart(ctx) - defer n1.Stop(ctx) - defer n2.Stop(ctx) - - // The node are not initially connected, so ping should fail. - res0, err := n1.PorcelainAPI.NetworkPing(ctx, n2.Network().Host.ID()) - assert.NoError(t, err) - assert.Error(t, (<-res0).Error) // No peers in table - - // Connect nodes and check each can ping the other. - node.ConnectNodes(t, n1, n2) - - res1, err := n1.PorcelainAPI.NetworkPing(ctx, n2.Network().Host.ID()) - assert.NoError(t, err) - assert.NoError(t, (<-res1).Error) - - res2, err := n2.PorcelainAPI.NetworkPing(ctx, n1.Network().Host.ID()) - assert.NoError(t, err) - assert.NoError(t, (<-res2).Error) -} diff --git a/cmd/go-filecoin/protocol.go b/cmd/go-filecoin/protocol.go deleted file mode 100644 index 6f95f8759c..0000000000 --- a/cmd/go-filecoin/protocol.go +++ /dev/null @@ -1,22 +0,0 @@ -package commands - -import ( - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" -) - -var protocolCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show protocol parameter details", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - params, err := GetPorcelainAPI(env).ProtocolParameters(env.Context()) - if err != nil { - return err - } - return re.Emit(params) - }, - Type: porcelain.ProtocolParams{}, -} diff --git a/cmd/go-filecoin/protocol_cmd_test.go b/cmd/go-filecoin/protocol_cmd_test.go deleted file mode 100644 index cf66bd7bf3..0000000000 --- a/cmd/go-filecoin/protocol_cmd_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestProtocol(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - // Create node (it's not necessary to start it). - b := test.NewNodeBuilder(t) - node := b. - WithConfig(func(c *config.Config) { - c.Mining.AutoSealIntervalSeconds = 120 - }). - Build(ctx) - require.NoError(t, node.Chain().ChainReader.Load(ctx)) - - // Run the command API. - cmd, stop := test.RunNodeAPI(ctx, node, t) - defer stop() - - out := cmd.RunSuccess(ctx, "protocol").ReadStdout() - assert.Contains(t, out, "\"Network\": \"gfctest\"") - assert.Contains(t, out, "\"AutoSealInterval\": 120") -} diff --git a/cmd/go-filecoin/retrieval_client.go b/cmd/go-filecoin/retrieval_client.go deleted file mode 100644 index 5790919fb4..0000000000 --- a/cmd/go-filecoin/retrieval_client.go +++ /dev/null @@ -1,50 +0,0 @@ -package commands - -import ( - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -var retrievalClientCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Manage retrieval client operations", - }, - Subcommands: map[string]*cmds.Command{ - "retrieve-piece": clientRetrievePieceCmd, - }, -} - -var clientRetrievePieceCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Read out piece data stored by a miner on the network", - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("miner", true, false, "Retrieval miner actor address"), - cmdkit.StringArg("cid", true, false, "Content identifier of piece to read"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - panic("TODO: go-fil-markets integration") - - //minerAddr, err := address.NewFromString(req.Arguments[0]) - //if err != nil { - // return err - //} - // - //pieceCID, err := cid.Decode(req.Arguments[1]) - //if err != nil { - // return err - //} - // - //mpid, err := GetPorcelainAPI(env).MinerGetPeerID(req.Context, minerAddr) - //if err != nil { - // return err - //} - // - //readCloser, err := GetRetrievalAPI(env).RetrievePiece(req.Context, pieceCID, mpid, minerAddr) - //if err != nil { - // return err - //} - // - //return re.Emit(readCloser) - }, -} diff --git a/cmd/go-filecoin/retrieval_client_daemon_test.go b/cmd/go-filecoin/retrieval_client_daemon_test.go deleted file mode 100644 index 7158410c9e..0000000000 --- a/cmd/go-filecoin/retrieval_client_daemon_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package commands_test - -import ( - "context" - "math/big" - "testing" - - "github.com/filecoin-project/go-address" - files "github.com/ipfs/go-ipfs-files" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" - "github.com/filecoin-project/go-filecoin/tools/fast/series" -) - -func TestSelfDialRetrievalGoodError(t *testing.T) { - t.Skip("Long term solution: #3642") - tf.IntegrationTest(t) - - ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fast.FilecoinOpts{}) - // Teardown after test ends. - defer func() { - err := env.Teardown(ctx) - require.NoError(t, err) - }() - - // Update genesis miner's peerid - var minerAddr address.Address - err := env.GenesisMiner.ConfigGet(ctx, "mining.minerAddress", &minerAddr) - require.NoError(t, err) - details, err := env.GenesisMiner.ID(ctx) - require.NoError(t, err) - msgCid, err := env.GenesisMiner.MinerUpdatePeerid(ctx, minerAddr, details.ID, fast.AOPrice(big.NewFloat(1.0)), fast.AOLimit(300)) - require.NoError(t, err) - - series.CtxMiningOnce(ctx) - _, err = env.GenesisMiner.MessageWait(ctx, msgCid) - require.NoError(t, err) - - // Add data to Genesis Miner. - f := files.NewBytesFile([]byte("satyamevajayate")) - cid, err := env.GenesisMiner.ClientImport(ctx, f) - require.NoError(t, err) - - // Genesis Miner fails on self dial when retrieving from itself. - _, err = env.GenesisMiner.RetrievalClientRetrievePiece(ctx, cid, minerAddr) - assert.Error(t, err) - fastesting.AssertStdErrContains(t, env.GenesisMiner, "attempting to retrieve piece from self") -} diff --git a/cmd/go-filecoin/show.go b/cmd/go-filecoin/show.go deleted file mode 100644 index ede00177a0..0000000000 --- a/cmd/go-filecoin/show.go +++ /dev/null @@ -1,135 +0,0 @@ -package commands - -import ( - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - - "github.com/ipfs/go-cid" - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" -) - -var showCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Get human-readable representations of filecoin objects", - }, - Subcommands: map[string]*cmds.Command{ - "block": showBlockCmd, - "header": showHeaderCmd, - "messages": showMessagesCmd, - "receipts": showReceiptsCmd, - }, -} - -var showBlockCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show a full filecoin block by its header CID", - ShortDescription: `Prints the miner, parent weight, height, -and nonce of a given block. If JSON encoding is specified with the --enc flag, -all other block properties will be included as well.`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of block to show"), - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption("messages", "m", "show messages in block"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - cid, err := cid.Decode(req.Arguments[0]) - if err != nil { - return err - } - - block, err := GetPorcelainAPI(env).ChainGetFullBlock(req.Context, cid) - if err != nil { - return err - } - - return re.Emit(block) - }, - Type: block.FullBlock{}, -} - -var showHeaderCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show a filecoin block header by its CID", - ShortDescription: `Prints the miner, parent weight, height, -and nonce of a given block. If JSON encoding is specified with the --enc flag, -all other block properties will be included as well.`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of block to show"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - cid, err := cid.Decode(req.Arguments[0]) - if err != nil { - return err - } - - block, err := GetPorcelainAPI(env).ChainGetBlock(req.Context, cid) - if err != nil { - return err - } - - return re.Emit(block) - }, - Type: block.Block{}, -} - -type allMessages struct { - BLS []*types.UnsignedMessage - SECP []*types.SignedMessage -} - -var showMessagesCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show a filecoin message collection by txmeta CID", - ShortDescription: `Prints info for all messages in a collection, -at the given CID. This CID is found in the "Messages" field of -the filecoin block header.`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of message collection to show"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - cid, err := cid.Decode(req.Arguments[0]) - if err != nil { - return err - } - - bls, secp, err := GetPorcelainAPI(env).ChainGetMessages(req.Context, cid) - if err != nil { - return err - } - - return re.Emit(&allMessages{BLS: bls, SECP: secp}) - }, - Type: &allMessages{}, -} - -var showReceiptsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show a filecoin receipt collection by its CID", - ShortDescription: `Prints info for all receipts in a collection, -at the given CID. Receipt collection CIDs are found in the "MessageReceipts" -field of the filecoin block header.`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("cid", true, false, "CID of receipt collection to show"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - cid, err := cid.Decode(req.Arguments[0]) - if err != nil { - return err - } - - receipts, err := GetPorcelainAPI(env).ChainGetReceipts(req.Context, cid) - if err != nil { - return err - } - - return re.Emit(receipts) - }, - Type: []vm.MessageReceipt{}, -} diff --git a/cmd/go-filecoin/show_test.go b/cmd/go-filecoin/show_test.go deleted file mode 100644 index 06bcfd7db2..0000000000 --- a/cmd/go-filecoin/show_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package commands_test - -import ( - "context" - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestBlockDaemon(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("Unskip with fake proofs") - - t.Run("show block returns human readable output for the filecoin block", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - buildWithMiner(t, builder) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - // mine a block and get its CID - minedBlock, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - minedBlockCid := minedBlock.Cid() - - // get the mined block by its CID - output := cmdClient.RunSuccess(ctx, "show", "block", minedBlockCid.String()).ReadStdoutTrimNewlines() - - assert.Contains(t, output, "Block Details") - assert.Contains(t, output, "Weight: 0") - assert.Contains(t, output, "Height: 1") - assert.Contains(t, output, "Timestamp: ") - }) - - t.Run("show block --messages returns human readable output for the filecoin block including messages", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - buildWithMiner(t, builder) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - // mine a block and get its CID - minedBlock, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - // get the mined block by its CID - output := cmdClient.RunSuccess(ctx, "show", "block", "--messages", minedBlock.Cid().String()).ReadStdoutTrimNewlines() - - assert.Contains(t, output, "Block Details") - assert.Contains(t, output, "Weight: 0") - assert.Contains(t, output, "Height: 1") - assert.Contains(t, output, "Timestamp: ") - assert.Contains(t, output, "Messages: ") - }) - - t.Run("show block --enc json returns JSON for a filecoin block", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - buildWithMiner(t, builder) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - // mine a block and get its CID - minedBlock, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - // get the mined block by its CID - blockGetLine := cmdClient.RunSuccessFirstLine(ctx, "show", "block", minedBlock.Cid().String(), "--enc", "json") - var blockGetBlock block.FullBlock - require.NoError(t, json.Unmarshal([]byte(blockGetLine), &blockGetBlock)) - - // ensure that we were returned the correct block - - require.Equal(t, minedBlock.Cid().String(), blockGetBlock.Header.Cid().String()) - }) - - t.Run("show header --enc json returns JSON for a filecoin block header", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - buildWithMiner(t, builder) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - // mine a block and get its CID - minedBlock, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - // get the mined block by its CID - headerGetLine := cmdClient.RunSuccessFirstLine(ctx, "show", "header", minedBlock.Cid().String(), "--enc", "json") - - var headerGetBlock block.Block - require.NoError(t, json.Unmarshal([]byte(headerGetLine), &headerGetBlock)) - - // ensure that we were returned the correct block - - require.Equal(t, minedBlock.Cid().String(), headerGetBlock.Cid().String()) - }) - - t.Run("show messages returns empty message collection", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - buildWithMiner(t, builder) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - _, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - emptyMessagesLine := cmdClient.RunSuccessFirstLine(ctx, "show", "messages", types.EmptyMessagesCID.String(), "--enc", "json") - - var messageCollection []*types.SignedMessage - require.NoError(t, json.Unmarshal([]byte(emptyMessagesLine), &messageCollection)) - - assert.Equal(t, 0, len(messageCollection)) - }) - - t.Run("show receipts returns empty receipt collection", func(t *testing.T) { - ctx := context.Background() - builder := test.NewNodeBuilder(t) - buildWithMiner(t, builder) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - _, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - emptyReceiptsLine := cmdClient.RunSuccessFirstLine(ctx, "show", "receipts", types.EmptyReceiptsCID.String(), "--enc", "json") - - var receipts []vm.MessageReceipt - require.NoError(t, json.Unmarshal([]byte(emptyReceiptsLine), &receipts)) - - assert.Equal(t, 0, len(receipts)) - }) - - t.Run("show messages", func(t *testing.T) { - cs := node.FixtureChainSeed(t) - defaultAddr := fortest.TestAddresses[0] - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithGenesisInit(cs.GenesisInitFunc) - builder.WithConfig(cs.MinerConfigOpt(0)) - builder.WithConfig(node.DefaultAddressConfigOpt(defaultAddr)) - builder.WithInitOpt(cs.KeyInitOpt(1)) - builder.WithInitOpt(cs.KeyInitOpt(0)) - - n, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - _, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - from, err := n.PorcelainAPI.WalletDefaultAddress() // this should = fixtures.TestAddresses[0] - require.NoError(t, err) - cmdClient.RunSuccess(ctx, "message", "send", - "--from", from.String(), - "--gas-price", "1", - "--gas-limit", "300", - fortest.TestAddresses[3].String(), - ) - - cmdClient.RunSuccess(ctx, "message", "send", - "--from", from.String(), - "--gas-price", "1", - "--gas-limit", "300", - "--value", "10", - fortest.TestAddresses[3].String(), - ) - - cmdClient.RunSuccess(ctx, "message", "send", - "--from", from.String(), - "--gas-price", "1", - "--gas-limit", "300", - "--value", "5.5", - fortest.TestAddresses[3].String(), - ) - - blk, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - - // Full block checks out - blockGetLine := cmdClient.RunSuccessFirstLine(ctx, "show", "block", blk.Cid().String(), "--enc", "json") - var blockGetBlock block.FullBlock - require.NoError(t, json.Unmarshal([]byte(blockGetLine), &blockGetBlock)) - - assert.Equal(t, 3, len(blockGetBlock.SECPMessages)) - - assert.Equal(t, from, blockGetBlock.SECPMessages[0].Message.From) - - // Full block matches show messages - messagesGetLine := cmdClient.RunSuccessFirstLine(ctx, "show", "messages", blockGetBlock.Header.Messages.String(), "--enc", "json") - var messages []*types.SignedMessage - require.NoError(t, json.Unmarshal([]byte(messagesGetLine), &messages)) - assert.Equal(t, blockGetBlock.SECPMessages, messages) - }) -} diff --git a/cmd/go-filecoin/stats.go b/cmd/go-filecoin/stats.go deleted file mode 100644 index 99dd71b8a2..0000000000 --- a/cmd/go-filecoin/stats.go +++ /dev/null @@ -1,28 +0,0 @@ -package commands - -import ( - "github.com/ipfs/go-ipfs-cmdkit" - "github.com/ipfs/go-ipfs-cmds" - "github.com/libp2p/go-libp2p-core/metrics" -) - -var statsCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "View various filecoin node statistics", - }, - Subcommands: map[string]*cmds.Command{ - "bandwidth": statsBandwidthCmd, - }, -} - -var statsBandwidthCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "View bandwidth usage metrics", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - bandwidthStats := GetPorcelainAPI(env).NetworkGetBandwidthStats() - - return re.Emit(bandwidthStats) - }, - Type: metrics.Stats{}, -} diff --git a/cmd/go-filecoin/stats_integration_test.go b/cmd/go-filecoin/stats_integration_test.go deleted file mode 100644 index 66e43b89e1..0000000000 --- a/cmd/go-filecoin/stats_integration_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestStatsBandwidth(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - stats := cmdClient.RunSuccess(ctx, "stats", "bandwidth").ReadStdoutTrimNewlines() - - assert.Equal(t, "{\n\t\"TotalIn\": 0,\n\t\"TotalOut\": 0,\n\t\"RateIn\": 0,\n\t\"RateOut\": 0\n}", stats) -} diff --git a/cmd/go-filecoin/swarm.go b/cmd/go-filecoin/swarm.go deleted file mode 100644 index 7ad3ffef88..0000000000 --- a/cmd/go-filecoin/swarm.go +++ /dev/null @@ -1,86 +0,0 @@ -package commands - -import ( - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-filecoin/internal/pkg/net" -) - -// swarmCmd contains swarm commands. -var swarmCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Interact with the swarm", - ShortDescription: ` -'go-filecoin swarm' is a tool to manipulate the libp2p swarm. The swarm is the -component that opens, listens for, and maintains connections to other -libp2p peers on the internet. -`, - }, - Subcommands: map[string]*cmds.Command{ - "connect": swarmConnectCmd, - "peers": swarmPeersCmd, - }, -} - -var swarmPeersCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "List peers with open connections.", - ShortDescription: ` -'go-filecoin swarm peers' lists the set of peers this node is connected to. -`, - }, - Options: []cmdkit.Option{ - cmdkit.BoolOption("verbose", "v", "Display all extra information"), - cmdkit.BoolOption("streams", "Also list information about open streams for each peer"), - cmdkit.BoolOption("latency", "Also list information about latency to each peer"), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - verbose, _ := req.Options["verbose"].(bool) - latency, _ := req.Options["latency"].(bool) - streams, _ := req.Options["streams"].(bool) - - out, err := GetPorcelainAPI(env).NetworkPeers(req.Context, verbose, latency, streams) - if err != nil { - return err - } - - return re.Emit(&out) - }, - Type: net.SwarmConnInfos{}, -} - -var swarmConnectCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Open connection to a given address.", - ShortDescription: ` -'go-filecoin swarm connect' opens a new direct connection to a peer address. - -The address format is a multiaddr: - -go-filecoin swarm connect /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ -`, - }, - Arguments: []cmdkit.Argument{ - cmdkit.StringArg("address", true, true, "Address of peer to connect to.").EnableStdin(), - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - results, err := GetPorcelainAPI(env).NetworkConnect(req.Context, req.Arguments) - if err != nil { - return err - } - - for result := range results { - if result.Err != nil { - return result.Err - } - if err := re.Emit(result.PeerID); err != nil { - return err - } - } - - return nil - }, - Type: peer.ID(""), -} diff --git a/cmd/go-filecoin/swarm_integration_test.go b/cmd/go-filecoin/swarm_integration_test.go deleted file mode 100644 index 3c6a43da38..0000000000 --- a/cmd/go-filecoin/swarm_integration_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package commands_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestSwarmConnectPeersValid(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - n1 := builder.BuildAndStart(ctx) - defer n1.Stop(ctx) - n2 := builder.BuildAndStart(ctx) - defer n2.Stop(ctx) - - node.ConnectNodes(t, n1, n2) -} - -func TestSwarmConnectPeersInvalid(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - - _, cmdClient, done := builder.BuildAndStartAPI(ctx) - defer done() - - cmdClient.RunFail(ctx, "failed to parse ip4 addr", - "swarm", "connect", "/ip4/hello", - ) -} diff --git a/cmd/go-filecoin/utils.go b/cmd/go-filecoin/utils.go deleted file mode 100644 index 08d327549c..0000000000 --- a/cmd/go-filecoin/utils.go +++ /dev/null @@ -1,137 +0,0 @@ -package commands - -import ( - "fmt" - "io" - "strconv" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/pkg/errors" -) - -// SilentWriter writes to a stream, stopping after the first error and discarding output until -// the error is cleared. -// No printing methods return an error (to avoid warnings about ignoring it), but they do return -// a boolean indicating whether an error is waiting to be cleared. -// Example usage: -// sw := NewSilentWriter(w) -// sw.Println("A line") -// sw.Println("Another line") -// return sw.Error() -type SilentWriter struct { - w io.Writer - err error -} - -// NewSilentWriter returns a new writer backed by `w`. -func NewSilentWriter(w io.Writer) *SilentWriter { - return &SilentWriter{w: w} -} - -// Error returns any error encountered while writing. -func (sw *SilentWriter) Error() error { - return sw.err -} - -// ClearError clears and returns any error encountered while writing. -// Subsequent writes will attempt to write to the underlying writer again. -func (sw *SilentWriter) ClearError() error { - err := sw.err - sw.err = nil - return err -} - -// Write writes with io.Writer.Write and returns true if there was no error. -func (sw *SilentWriter) Write(p []byte) bool { - if sw.err == nil { - _, sw.err = sw.w.Write(p) - } - return sw.err == nil -} - -// WriteString writes with io.WriteString and returns true if there was no error. -func (sw *SilentWriter) WriteString(str string) bool { - if sw.err == nil { - _, sw.err = io.WriteString(sw.w, str) - } - return sw.err == nil -} - -// Print writes with fmt.Fprint and returns true if there was no error. -func (sw *SilentWriter) Print(a ...interface{}) bool { - if sw.err == nil { - _, sw.err = fmt.Fprint(sw.w, a...) - } - return sw.err == nil -} - -// Println writes with fmt.Fprintln and returns true if there was no error. -func (sw *SilentWriter) Println(a ...interface{}) bool { - if sw.err == nil { - _, sw.err = fmt.Fprintln(sw.w, a...) - } - return sw.err == nil -} - -// Printf writes with fmt.Fprintf and returns true if there was no error. -func (sw *SilentWriter) Printf(format string, a ...interface{}) bool { - if sw.err == nil { - _, sw.err = fmt.Fprintf(sw.w, format, a...) - } - return sw.err == nil -} - -// PrintString prints a given Stringer to the writer. -func PrintString(w io.Writer, s fmt.Stringer) error { - _, err := fmt.Fprintln(w, s.String()) - return err -} - -func optionalAddr(o interface{}) (ret address.Address, err error) { - if o != nil { - ret, err = address.NewFromString(o.(string)) - if err != nil { - err = errors.Wrap(err, "invalid from address") - } - } - return -} - -func optionalSectorSizeWithDefault(o interface{}, def abi.SectorSize) (abi.SectorSize, error) { - if o != nil { - n, err := strconv.ParseUint(o.(string), 10, 64) - if err != nil || n == 0 { - return abi.SectorSize(0), fmt.Errorf("invalid sector size: %s", o.(string)) - } - - return abi.SectorSize(n), nil - } - - return def, nil -} - -func fromAddrOrDefault(req *cmds.Request, env cmds.Environment) (address.Address, error) { - addr, err := optionalAddr(req.Options["from"]) - if err != nil { - return address.Undef, err - } - if addr.Empty() { - return GetPorcelainAPI(env).WalletDefaultAddress() - } - return addr, nil -} - -func cidsFromSlice(args []string) ([]cid.Cid, error) { - out := make([]cid.Cid, len(args)) - for i, arg := range args { - c, err := cid.Decode(arg) - if err != nil { - return nil, err - } - out[i] = c - } - return out, nil -} diff --git a/cmd/go-filecoin/version.go b/cmd/go-filecoin/version.go deleted file mode 100644 index 981cbdf298..0000000000 --- a/cmd/go-filecoin/version.go +++ /dev/null @@ -1,25 +0,0 @@ -package commands - -import ( - cmdkit "github.com/ipfs/go-ipfs-cmdkit" - cmds "github.com/ipfs/go-ipfs-cmds" - - "github.com/filecoin-project/go-filecoin/build/flags" -) - -type versionInfo struct { - // Commit, is the git sha that was used to build this version of go-filecoin. - Commit string -} - -var versionCmd = &cmds.Command{ - Helptext: cmdkit.HelpText{ - Tagline: "Show go-filecoin version information", - }, - Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { - return re.Emit(&versionInfo{ - Commit: flags.GitCommit, - }) - }, - Type: versionInfo{}, -} diff --git a/cmd/go-filecoin/version_daemon_test.go b/cmd/go-filecoin/version_daemon_test.go deleted file mode 100644 index e686d56e02..0000000000 --- a/cmd/go-filecoin/version_daemon_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package commands_test - -import ( - "fmt" - "io/ioutil" - "net/http" - "os/exec" - "strings" - "testing" - - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - manet "github.com/multiformats/go-multiaddr-net" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestVersion(t *testing.T) { - tf.IntegrationTest(t) - - commit := getCodeCommit(t) - - verOut, err := exec.Command(th.MustGetFilecoinBinary(), "version").Output() - require.NoError(t, err) - - version := string(verOut) - assert.Exactly(t, fmt.Sprintf("{\n\t\"Commit\": \"%s\"\n}\n", commit), version) -} - -func TestVersionOverHttp(t *testing.T) { - tf.IntegrationTest(t) - - td := th.NewDaemon(t).Start() - defer td.ShutdownSuccess() - - maddr, err := td.CmdAddr() - require.NoError(t, err) - - _, host, err := manet.DialArgs(maddr) - require.NoError(t, err) - - url := fmt.Sprintf("http://%s/api/version", host) - req, err := http.NewRequest("POST", url, nil) - require.NoError(t, err) - res, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, http.StatusOK, res.StatusCode) - - commit := strings.Trim(getCodeCommit(t), "\n ") - expected := fmt.Sprintf("{\"Commit\":\"%s\"}\n", commit) - - defer res.Body.Close() // nolint: errcheck - body, err := ioutil.ReadAll(res.Body) - require.NoError(t, err) - require.Equal(t, expected, string(body)) -} - -func getCodeCommit(t *testing.T) string { - var gitOut []byte - var err error - gitArgs := []string{"rev-parse", "--verify", "HEAD"} - if gitOut, err = exec.Command("git", gitArgs...).Output(); err != nil { - assert.NoError(t, err) - } - return strings.TrimSpace(string(gitOut)) -} diff --git a/cmd/import.go b/cmd/import.go new file mode 100644 index 0000000000..188b61e89a --- /dev/null +++ b/cmd/import.go @@ -0,0 +1,114 @@ +package cmd + +import ( + "bufio" + "context" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/DataDog/zstd" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/mitchellh/go-homedir" + "gopkg.in/cheggaaa/pb.v1" +) + +var logImport = logging.Logger("commands/import") + +// Import cache tipset cids to store. +// The value of the cached tipset CIDS is used as the check-point when running `venus daemon` +func Import(ctx context.Context, r repo.Repo, fileName string) error { + return importChain(ctx, r, fileName) +} + +func importChain(ctx context.Context, r repo.Repo, fname string) error { + var rd io.Reader + var l int64 + if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") { + resp, err := http.Get(fname) //nolint:gosec + if err != nil { + return err + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("non-200 response: %d", resp.StatusCode) + } + + rd = resp.Body + l = resp.ContentLength + } else { + fname, err := homedir.Expand(fname) + if err != nil { + return err + } + + fi, err := os.Open(fname) + if err != nil { + return err + } + defer fi.Close() //nolint:errcheck + + st, err := os.Stat(fname) + if err != nil { + return err + } + + rd = fi + l = st.Size() + } + + bs := r.Datastore() + // setup a ipldCbor on top of the local store + chainStore := chain.NewStore(r.ChainDatastore(), bs, cid.Undef, chain.NewMockCirculatingSupplyCalculator()) + + bufr := bufio.NewReaderSize(rd, 1<<20) + + header, err := bufr.Peek(4) + if err != nil { + return fmt.Errorf("peek header: %w", err) + } + + bar := pb.New64(l) + br := bar.NewProxyReader(bufr) + bar.ShowTimeLeft = true + bar.ShowPercent = true + bar.ShowSpeed = true + bar.Units = pb.U_BYTES + + var ir io.Reader = br + if string(header[1:]) == "\xB5\x2F\xFD" { // zstd + zr := zstd.NewReader(br) + defer func() { + if err := zr.Close(); err != nil { + log.Errorw("closing zstd reader", "error", err) + } + }() + ir = zr + } + + bar.Start() + tip, err := chainStore.Import(ctx, ir) + if err != nil { + return fmt.Errorf("importing chain failed: %s", err) + } + bar.Finish() + + err = chainStore.SetHead(context.TODO(), tip) + if err != nil { + return fmt.Errorf("importing chain failed: %s", err) + } + logImport.Infof("accepting %s as new head", tip.Key().String()) + + err = chainStore.WriteCheckPoint(context.TODO(), tip.Key()) + if err != nil { + logImport.Errorf("set check point error: %s", err.Error()) + } + + return err +} diff --git a/cmd/info.go b/cmd/info.go new file mode 100644 index 0000000000..e3b5764f03 --- /dev/null +++ b/cmd/info.go @@ -0,0 +1,147 @@ +package cmd + +import ( + "bytes" + "context" + "fmt" + "strings" + "text/tabwriter" + "time" + + "github.com/dustin/go-humanize" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/app/node" + v1 "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + cmds "github.com/ipfs/go-ipfs-cmds" +) + +var infoCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print node info", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + chainAPI := env.(*node.Env).ChainAPI + commonAPI := env.(*node.Env).CommonAPI + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + netParams, err := chainAPI.StateGetNetworkParams(ctx) + if err != nil { + return err + } + writer.Printf("Network: %s\n", netParams.NetworkName) + + start, err := commonAPI.StartTime(ctx) + if err != nil { + return err + } + writer.Printf("StartTime: %s (started at %s)\n", time.Since(start).Truncate(time.Second), start.Truncate(time.Second)) + + if err := SyncBasefeeCheck(ctx, chainAPI, int64(netParams.BlockDelaySecs), writer); err != nil { + return err + } + status, err := commonAPI.NodeStatus(ctx, true) + if err != nil { + return err + } + writer.Printf("Peers to: [publish messages %d] [publish blocks %d]\n", status.PeerStatus.PeersToPublishMsgs, + status.PeerStatus.PeersToPublishBlocks) + + //Chain health calculated as percentage: amount of blocks in last finality / very healthy amount of blocks in a finality (900 epochs * 5 blocks per tipset) + health := (100 * (900 * status.ChainStatus.BlocksPerTipsetLastFinality) / (900 * 5)) + switch { + case health > 85: + writer.Printf("Chain health: %.f%% [healthy]\n", health) + case health < 85: + writer.Printf("Chain health: %.f%% [unhealthy]\n", health) + } + writer.Println() + + addr, err := env.(*node.Env).WalletAPI.WalletDefaultAddress(ctx) + if err == nil && !addr.Empty() { + fmt.Printf("Default address: \n") + balance, err := env.(*node.Env).WalletAPI.WalletBalance(ctx, addr) + if err != nil { + return err + } + writer.Printf(" %s [%s]\n", addr.String(), types.FIL(balance).Short()) + } else { + writer.Printf("Default address: address not set\n") + } + writer.Println() + + addrs := env.(*node.Env).WalletAPI.WalletAddresses(ctx) + totalBalance := big.Zero() + for _, addr := range addrs { + totbal, err := env.(*node.Env).WalletAPI.WalletBalance(ctx, addr) + if err != nil { + return err + } + totalBalance = big.Add(totalBalance, totbal) + } + writer.Printf("Wallet: %v address\n", len(addrs)) + writer.Printf(" Total balance: %s\n", types.FIL(totalBalance).Short()) + + mbLockedSum := big.Zero() + mbAvailableSum := big.Zero() + for _, addr := range addrs { + mbal, err := env.(*node.Env).ChainAPI.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err != nil { + if strings.Contains(err.Error(), "actor not found") { + continue + } + return err + } + mbLockedSum = big.Add(mbLockedSum, mbal.Locked) + mbAvailableSum = big.Add(mbAvailableSum, mbal.Escrow) + } + writer.Printf(" Market locked: %s\n", types.FIL(mbLockedSum).Short()) + writer.Printf(" Market available: %s\n", types.FIL(mbAvailableSum).Short()) + writer.Println() + + chs, err := env.(*node.Env).PaychAPI.PaychList(ctx) + if err != nil { + return err + } + writer.Printf("Payment Channels: %v channels\n", len(chs)) + writer.Println() + + s, err := env.(*node.Env).NetworkAPI.NetBandwidthStats(ctx) + if err != nil { + return err + } + tw := tabwriter.NewWriter(writer.w, 6, 6, 2, ' ', 0) + writer.Printf("Bandwidth:\n") + fmt.Fprintf(tw, "\tTotalIn\tTotalOut\tRateIn\tRateOut\n") + fmt.Fprintf(tw, "\t%s\t%s\t%s/s\t%s/s\n", humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + if err := tw.Flush(); err != nil { + return err + } + + return re.Emit(buf) + }, +} + +func SyncBasefeeCheck(ctx context.Context, chainAPI v1.IChain, blockDelaySecs int64, writer *SilentWriter) error { + head, err := chainAPI.ChainHead(ctx) + if err != nil { + return err + } + + var syncStatus string + switch { + case time.Now().Unix()-int64(head.MinTimestamp()) < blockDelaySecs*3/2: // within 1.5 epochs + syncStatus = "[sync ok]" + case time.Now().Unix()-int64(head.MinTimestamp()) < blockDelaySecs*5: // within 5 epochs + syncStatus = fmt.Sprintf("[sync slow (%s behind)]", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) + default: + syncStatus = fmt.Sprintf("[sync behind! (%s behind)]", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) + } + basefee := head.MinTicketBlock().ParentBaseFee + + writer.Printf("Chain: %s [basefee %s] [epoch %v]\n", syncStatus, types.FIL(basefee).Short(), head.Height()) + + return nil +} diff --git a/cmd/inspector.go b/cmd/inspector.go new file mode 100644 index 0000000000..aa1e1df34f --- /dev/null +++ b/cmd/inspector.go @@ -0,0 +1,131 @@ +package cmd + +import ( + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" + cmds "github.com/ipfs/go-ipfs-cmds" +) + +var inspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show info about the venus node", + }, + Subcommands: map[string]*cmds.Command{ + "all": allInspectCmd, + "runtime": runtimeInspectCmd, + "disk": diskInspectCmd, + "memory": memoryInspectCmd, + "config": configInspectCmd, + "environment": envInspectCmd, + "protocol": protocolInspectCmd, + }, +} + +var allInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print all diagnostic information.", + ShortDescription: "Prints out information about filecoin process and its environment.", + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + var allInfo node.AllInspectorInfo + allInfo.Runtime = env.(*node.Env).InspectorAPI.Runtime() + + dsk, err := env.(*node.Env).InspectorAPI.Disk() + if err != nil { + return err + } + allInfo.Disk = dsk + + mem, err := env.(*node.Env).InspectorAPI.Memory() + if err != nil { + return err + } + allInfo.Memory = mem + allInfo.Config = env.(*node.Env).InspectorAPI.Config() + allInfo.Environment = env.(*node.Env).InspectorAPI.Environment() + allInfo.FilecoinVersion = env.(*node.Env).InspectorAPI.FilecoinVersion() + return cmds.EmitOnce(res, allInfo) + }, + Type: node.AllInspectorInfo{}, +} + +var runtimeInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print runtime diagnostic information.", + ShortDescription: "Prints out information about the golang runtime.", + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + out := env.(*node.Env).InspectorAPI.Runtime() + return cmds.EmitOnce(res, out) + }, + Type: node.RuntimeInfo{}, +} + +var diskInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print filesystem usage information.", + ShortDescription: "Prints out information about the filesystem.", + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + out, err := env.(*node.Env).InspectorAPI.Disk() + if err != nil { + return err + } + return cmds.EmitOnce(res, out) + }, + Type: node.DiskInfo{}, +} + +var memoryInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print memory usage information.", + ShortDescription: "Prints out information about memory usage.", + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + out, err := env.(*node.Env).InspectorAPI.Memory() + if err != nil { + return err + } + return cmds.EmitOnce(res, out) + }, + Type: node.MemoryInfo{}, +} + +var configInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print in-memory config information.", + ShortDescription: "Prints out information about your filecoin nodes config.", + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + out := env.(*node.Env).InspectorAPI.Config() + return cmds.EmitOnce(res, out) + }, + Type: config.Config{}, +} + +var envInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print filecoin environment information.", + ShortDescription: "Prints out information about your filecoin nodes environment.", + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + out := env.(*node.Env).InspectorAPI.Environment() + return cmds.EmitOnce(res, out) + }, + Type: node.EnvironmentInfo{}, +} + +var protocolInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show protocol parameter details", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + params, err := env.(*node.Env).ChainAPI.ProtocolParameters(req.Context) + if err != nil { + return err + } + return re.Emit(params) + }, + Type: types.ProtocolParams{}, +} diff --git a/cmd/log.go b/cmd/log.go new file mode 100644 index 0000000000..4f4e50269c --- /dev/null +++ b/cmd/log.go @@ -0,0 +1,112 @@ +package cmd + +import ( + "fmt" + "strings" + + cmds "github.com/ipfs/go-ipfs-cmds" + logging "github.com/ipfs/go-log/v2" +) + +var logCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Interact with the daemon subsystems log output.", + ShortDescription: ` +'venus log' contains utility commands to affect the subsystems logging +output of a running daemon. +`, + }, + + Subcommands: map[string]*cmds.Command{ + "set-level": logLevelCmd, + "list": logLsCmd, + "tail": logTailCmd, + }, +} + +var logTailCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Read subsystems log output.", + ShortDescription: ` +Outputs subsystems log output as it is generated. +`, + }, + + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + r := logging.NewPipeReader() + go func() { + defer r.Close() // nolint: errcheck + <-req.Context.Done() + }() + + return re.Emit(r) + }, +} + +var logLevelCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Set the logging level.", + ShortDescription: `Set the log level for logging systems: + + The system flag can be specified multiple times. + + eg) log set-level --system chain --system pubsub debug + + Available Levels: + debug + info + warn + error + fatal + panic +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("set-level", true, false, `The log level, with 'debug' the most verbose and 'panic' the least verbose. + One of: debug, info, warning, error, fatal, panic. + `), + }, + + Options: []cmds.Option{ + cmds.StringsOption("system", "The system logging identifier"), + }, + + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + level := strings.ToLower(req.Arguments[0]) + + var s string + if system, ok := req.Options["system"].([]string); ok { + for _, v := range system { + if err := logging.SetLogLevel(v, level); err != nil { + return err + } + } + s = fmt.Sprintf("Set log level of '%s' to '%s'", strings.Join(system, ","), level) + } else { + lvl, err := logging.LevelFromString(level) + if err != nil { + return err + } + logging.SetAllLoggers(lvl) + s = fmt.Sprintf("Set log level of all subsystems to: %s", level) + } + + return cmds.EmitOnce(res, s) + }, + Type: string(""), +} + +var logLsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List the logging subsystems.", + ShortDescription: ` +'venus log list' is a utility command used to list the logging +subsystems of a running daemon. +`, + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + return cmds.EmitOnce(res, logging.GetSubsystems()) + }, + Type: []string{}, +} diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000000..7d5aae1352 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,319 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/ipfs/go-ipfs-cmds/cli" + cmdhttp "github.com/ipfs/go-ipfs-cmds/http" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/app/paths" + "github.com/filecoin-project/venus/pkg/repo" +) + +const ( + // OptionAPI is the name of the option for specifying the api port. + OptionAPI = "cmdapiaddr" + + OptionToken = "token" + // OptionRepoDir is the name of the option for specifying the directory of the repo. + OptionRepoDir = "repo" + + OptionLegacyRepoDir = "repodir" + + // OptionSectorDir is the name of the option for specifying the directory into which staged and sealed sectors will be written. + // OptionSectorDir = "sectordir" + + // OptionPresealedSectorDir is the name of the option for specifying the directory from which presealed sectors should be pulled when initializing. + // OptionPresealedSectorDir = "presealed-sectordir" + + // OptionDrandConfigAddr is the init option for configuring drand to a given network address at init time + OptionDrandConfigAddr = "drand-config-addr" + + // offlineMode tells us if we should try to connect this Filecoin node to the network + OfflineMode = "offline" + + // ELStdout tells the daemon to write event logs to stdout. + ELStdout = "elstdout" + + ULimit = "manage-fdlimit" + + // AutoSealIntervalSeconds configures the daemon to check for and seal any staged sectors on an interval. + // AutoSealIntervalSeconds = "auto-seal-interval-seconds" + + // SwarmAddress is the multiaddr for this Filecoin node + SwarmAddress = "swarmlisten" + + // SwarmPublicRelayAddress is a public address that the venus node + // will listen on if it is operating as a relay. We use this to specify + // the public ip:port of a relay node that is sitting behind a static + // NAT mapping. + SwarmPublicRelayAddress = "swarmrelaypublic" + + // GenesisFile is the path of file containing archive of genesis block DAG data + GenesisFile = "genesisfile" + + // Network populates config with network-specific parameters for a known network (e.g. testnet2) + Network = "network" + + // IsRelay when set causes the the daemon to provide libp2p relay + // services allowing other filecoin nodes behind NATs to talk directly. + IsRelay = "is-relay" + + Size = "size" + + ImportSnapshot = "import-snapshot" + + // wallet password + Password = "password" + + AuthServiceURL = "auth-url" +) + +func init() { + // add pretty json as an encoding type + cmds.Encoders["pretty-json"] = func(req *cmds.Request) func(io.Writer) cmds.Encoder { + return func(w io.Writer) cmds.Encoder { + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + return enc + } + } +} + +// command object for the local cli +var RootCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Filecoin decentralized storage network client", + Subcommands: ` +START RUNNING VENUS + daemon - Start a venus daemon process + wallet - Manage wallet + msig - Interact with a multisig wallet + info - Print node info + +VIEW DATA STRUCTURES + chain - Inspect the filecoin blockchain + sync - Inspect the filecoin Sync + +NETWORK COMMANDS + swarm - Interact with the swarm + drand - Retrieve randomness from drand server + +MESSAGE COMMANDS + send - Send message + mpool - Manage the message pool + +MINER COMMANDS + miner - Interact with actors + +State COMMANDS + state - query states of the filecoin network + +Paych COMMANDS + paych - Manage payment channels + +Cid COMMANDS + manifest-cid-from-car - Get the manifest CID from a car file + +TOOL COMMANDS + inspect - Show info about the venus node + log - Interact with the daemon event log output + version - Show venus version information + seed - Seal sectors for genesis miner + fetch - Fetch proving parameters +`, + }, + Options: []cmds.Option{ + cmds.StringsOption(OptionToken, "set the auth token to use"), + cmds.StringOption(OptionAPI, "set the api port to use"), + cmds.StringOption(OptionRepoDir, OptionLegacyRepoDir, "set the repo directory, defaults to ~/.venus"), + cmds.StringOption(cmds.EncLong, cmds.EncShort, "The encoding type the output should be encoded with (pretty-json or json)").WithDefault("pretty-json"), + cmds.BoolOption("help", "Show the full command help text."), + cmds.BoolOption("h", "Show a short version of the command help text."), + }, + Subcommands: make(map[string]*cmds.Command), +} + +// command object for the daemon +var RootCmdDaemon = &cmds.Command{ + Subcommands: make(map[string]*cmds.Command), +} + +// all top level commands, not available to daemon +var rootSubcmdsLocal = map[string]*cmds.Command{ + "daemon": daemonCmd, + "fetch": fetchCmd, + "version": versionCmd, + "seed": seedCmd, + "cid": cidCmd, +} + +// all top level commands, available on daemon. set during init() to avoid configuration loops. +var rootSubcmdsDaemon = map[string]*cmds.Command{ + "chain": chainCmd, + "sync": syncCmd, + "drand": drandCmd, + "inspect": inspectCmd, + "log": logCmd, + "send": msgSendCmd, + "mpool": mpoolCmd, + "swarm": swarmCmd, + "wallet": walletCmd, + "version": versionCmd, + "state": stateCmd, + "miner": minerCmd, + "paych": paychCmd, + "msig": multisigCmd, + "info": infoCmd, +} + +func init() { + for k, v := range rootSubcmdsLocal { + RootCmd.Subcommands[k] = v + } + + for k, v := range rootSubcmdsDaemon { + RootCmd.Subcommands[k] = v + RootCmdDaemon.Subcommands[k] = v + } +} + +// Run processes the arguments and stdin +func Run(ctx context.Context, args []string, stdin, stdout, stderr *os.File) (int, error) { + err := cli.Run(ctx, RootCmd, args, stdin, stdout, stderr, buildEnv, makeExecutor) + if err == nil { + return 0, nil + } + if exerr, ok := err.(cli.ExitError); ok { + return int(exerr), nil + } + return 1, err +} + +func buildEnv(ctx context.Context, _ *cmds.Request) (cmds.Environment, error) { + return node.NewClientEnv(ctx), nil +} + +type executor struct { + api string + token string + exec cmds.Executor +} + +func (e *executor) Execute(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if e.api == "" { + return e.exec.Execute(req, re, env) + } + + client := cmdhttp.NewClient(e.api, cmdhttp.ClientWithAPIPrefix(node.APIPrefix), cmdhttp.ClientWithHeader("Authorization", "Bearer "+e.token)) + + return client.Execute(req, re, env) +} + +func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) { + isDaemonRequired := requiresDaemon(req) + var ( + apiInfo *APIInfo + err error + ) + + if isDaemonRequired { + apiInfo, err = getAPIInfo(req) + if err != nil { + return nil, err + } + } + if apiInfo == nil && isDaemonRequired { + return nil, fmt.Errorf("daemon must be started before using this command") + } + if apiInfo == nil { + apiInfo = &APIInfo{} + } + + return &executor{ + api: apiInfo.Addr, + token: apiInfo.Token, + exec: cmds.NewExecutor(RootCmd), + }, nil +} + +type APIInfo struct { + Addr string + Token string +} + +func getAPIInfo(req *cmds.Request) (*APIInfo, error) { + repoDir, _ := req.Options[OptionRepoDir].(string) + repoDir, err := paths.GetRepoPath(repoDir) + if err != nil { + return nil, err + } + var rawAddr string + // second highest precedence is env vars. + if envapi := os.Getenv("FIL_API"); envapi != "" { + rawAddr = envapi + } + + // first highest precedence is cmd flag. + if apiAddress, ok := req.Options[OptionAPI].(string); ok && apiAddress != "" { + rawAddr = apiAddress + } + // we will read the api file if no other option is given. + if len(rawAddr) == 0 { + rpcAPI, err := repo.APIAddrFromRepoPath(repoDir) + if err != nil { + return nil, errors.Wrap(err, "can't find API endpoint address in environment, command-line, or local repo (is the daemon running?)") + } + rawAddr = rpcAPI // NOTICE command only use api + } + + rawAddr = strings.Trim(rawAddr, " \n\t") + maddr, err := ma.NewMultiaddr(rawAddr) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("unable to convert API endpoint address %s to a multiaddr", rawAddr)) + } + + _, host, err := manet.DialArgs(maddr) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("unable to dial API endpoint address %s", maddr)) + } + + token := "" + if tk, ok := req.Options[OptionToken]; ok { + tkArr := tk.([]string) + if len(tkArr) > 0 { + token = tkArr[0] + } + } + if len(token) == 0 { + tk, err := repo.APITokenFromRepoPath(repoDir) + if err != nil { + return nil, errors.Wrap(err, "can't find token in environment") + } + token = tk + } + + return &APIInfo{ + Addr: host, + Token: token, + }, nil +} + +func requiresDaemon(req *cmds.Request) bool { + for cmd := range rootSubcmdsLocal { + if len(req.Path) > 0 && req.Path[0] == cmd { + return false + } + } + return true +} diff --git a/cmd/go-filecoin/main_daemon_test.go b/cmd/main_daemon_test.go similarity index 77% rename from cmd/go-filecoin/main_daemon_test.go rename to cmd/main_daemon_test.go index 0c02deb0ea..0c0222969a 100644 --- a/cmd/go-filecoin/main_daemon_test.go +++ b/cmd/main_daemon_test.go @@ -1,24 +1,25 @@ -package commands_test +package cmd_test import ( "os" "os/exec" "path" "testing" + "time" + "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" ) func TestNoDaemonNoHang(t *testing.T) { tf.IntegrationTest(t) - // Start the daemon to initialize a new repo d := testhelpers.NewDaemon(t).Start() + // todo should readline for stdout for confirming rpc status + time.Sleep(time.Second * 5) // rename the lock files to a safe place repoDir := d.RepoDir() require.NoError(t, os.Rename(path.Join(repoDir, "api"), path.Join(repoDir, "api.backup"))) @@ -32,7 +33,7 @@ func TestNoDaemonNoHang(t *testing.T) { require.NoError(t, os.Rename(path.Join(repoDir, "repo.lock.backup"), path.Join(repoDir, "repo.lock"))) // run actor ls with the old repo that still has the lock file, but no running daemon - out, _ := exec.Command(testhelpers.MustGetFilecoinBinary(), "--repodir", d.RepoDir(), "actor", "ls").CombinedOutput() + out, _ := exec.Command(testhelpers.MustGetFilecoinBinary(), "--repo", d.RepoDir(), "state", "list-actor").CombinedOutput() assert.Contains(t, string(out), "Is the daemon running?") } diff --git a/cmd/main_test.go b/cmd/main_test.go new file mode 100644 index 0000000000..588307533a --- /dev/null +++ b/cmd/main_test.go @@ -0,0 +1,26 @@ +package cmd + +import ( + "context" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/stretchr/testify/assert" +) + +func TestRequiresDaemon(t *testing.T) { + tf.UnitTest(t) + + reqWithDaemon, err := cmds.NewRequest(context.Background(), []string{"chain", "head"}, nil, []string{}, nil, RootCmdDaemon) + assert.NoError(t, err) + assert.True(t, requiresDaemon(reqWithDaemon)) + + reqWithoutDaemon, err := cmds.NewRequest(context.Background(), []string{"daemon"}, nil, []string{}, nil, RootCmd) + assert.NoError(t, err) + assert.False(t, requiresDaemon(reqWithoutDaemon)) + + reqSubcmdDaemon, err := cmds.NewRequest(context.Background(), []string{"version"}, nil, []string{}, nil, RootCmd) + assert.NoError(t, err) + assert.False(t, requiresDaemon(reqSubcmdDaemon)) +} diff --git a/cmd/message.go b/cmd/message.go new file mode 100644 index 0000000000..a665bff257 --- /dev/null +++ b/cmd/message.go @@ -0,0 +1,223 @@ +package cmd + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "fmt" + "reflect" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + fbig "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/pkg/errors" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/utils" +) + +var ( + feecapOption = cmds.StringOption("gas-feecap", "Price (FIL e.g. 0.00013) to pay for each GasUnit consumed mining this message") + premiumOption = cmds.StringOption("gas-premium", "Price (FIL e.g. 0.00013) to pay for each GasUnit consumed mining this message") + limitOption = cmds.Int64Option("gas-limit", "Maximum GasUnits this message is allowed to consume") +) + +func parseGasOptions(req *cmds.Request) (fbig.Int, fbig.Int, int64, error) { + var ( + feecap = types.FIL{Int: types.NewInt(0).Int} + premium = types.FIL{Int: types.NewInt(0).Int} + ok = false + gasLimitInt = int64(0) + ) + + var err error + feecapOption := req.Options["gas-feecap"] + if feecapOption != nil { + feecap, err = types.ParseFIL(feecapOption.(string)) + if err != nil { + return types.ZeroFIL, types.ZeroFIL, 0, errors.New("invalid gas price (specify FIL as a decimal number)") + } + } + + premiumOption := req.Options["gas-premium"] + if premiumOption != nil { + premium, err = types.ParseFIL(premiumOption.(string)) + if err != nil { + return types.ZeroFIL, types.ZeroFIL, 0, errors.New("invalid gas price (specify FIL as a decimal number)") + } + } + + limitOption := req.Options["gas-limit"] + if limitOption != nil { + gasLimitInt, ok = limitOption.(int64) + if !ok { + msg := fmt.Sprintf("invalid gas limit: %s", limitOption) + return types.ZeroFIL, types.ZeroFIL, 0, errors.New(msg) + } + } + + return fbig.Int{Int: feecap.Int}, fbig.Int{Int: premium.Int}, gasLimitInt, nil +} + +// MessageSendResult is the return type for message send command +type MessageSendResult struct { + Cid cid.Cid + GasUsed int64 + Preview bool +} + +var msgSendCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Send a message", // This feels too generic... + }, + Arguments: []cmds.Argument{ + cmds.StringArg("target", true, false, "address of the actor to send the message to"), + cmds.StringArg("value", true, false, "amount of FIL"), + }, + Options: []cmds.Option{ + cmds.StringOption("value", "Value to send with message in FIL"), + cmds.StringOption("from", "address to send message from"), + feecapOption, + premiumOption, + limitOption, + cmds.Uint64Option("nonce", "specify the nonce to use"), + cmds.StringOption("params-json", "specify invocation parameters in json"), + cmds.StringOption("params-hex", "specify invocation parameters in hex"), + cmds.Uint64Option("method", "The method to invoke on the target actor"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + toAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + v := req.Arguments[1] + val, err := types.ParseFIL(v) + if err != nil { + return fmt.Errorf("mal-formed value: %v", err) + } + + methodID := builtin.MethodSend + method, ok := req.Options["method"] + if ok { + methodID = abi.MethodNum(method.(uint64)) + } + + fromAddr, err := fromAddrOrDefault(req, env) + if err != nil { + return err + } + + if methodID == builtin.MethodSend && fromAddr.String() == toAddr.String() { + return errors.New("self-transfer is not allowed") + } + + feecap, premium, gasLimit, err := parseGasOptions(req) + if err != nil { + return err + } + + if err := utils.LoadBuiltinActors(req.Context, env.(*node.Env).ChainAPI); err != nil { + return err + } + + var params []byte + rawPJ := req.Options["params-json"] + if rawPJ != nil { + decparams, err := decodeTypedParams(req.Context, env.(*node.Env), toAddr, methodID, rawPJ.(string)) + if err != nil { + return fmt.Errorf("failed to decode json params: %s", err) + } + params = decparams + } + + rawPH := req.Options["params-hex"] + if rawPH != nil { + if params != nil { + return fmt.Errorf("can only specify one of 'params-json' and 'params-hex'") + } + decparams, err := hex.DecodeString(rawPH.(string)) + if err != nil { + return fmt.Errorf("failed to decode hex params: %s", err) + } + params = decparams + } + + msg := &types.Message{ + From: fromAddr, + To: toAddr, + Value: abi.TokenAmount{Int: val.Int}, + GasPremium: premium, + GasFeeCap: feecap, + GasLimit: gasLimit, + Method: methodID, + Params: params, + } + + nonceOption := req.Options["nonce"] + var c cid.Cid + if nonceOption != nil { + nonce, ok := nonceOption.(uint64) + if !ok { + return fmt.Errorf("invalid nonce option: %v", nonceOption) + } + msg.Nonce = nonce + + sm, err := env.(*node.Env).WalletAPI.WalletSignMessage(req.Context, msg.From, msg) + if err != nil { + return err + } + + _, err = env.(*node.Env).MessagePoolAPI.MpoolPush(req.Context, sm) + if err != nil { + return err + } + c = sm.Cid() + } else { + sm, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(req.Context, msg, nil) + if err != nil { + return err + } + c = sm.Cid() + } + + return re.Emit(c.String()) + }, +} + +func decodeTypedParams(ctx context.Context, fapi *node.Env, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) { + act, err := fapi.ChainAPI.StateGetActor(ctx, to, types.EmptyTSK) + if err != nil { + return nil, err + } + + methodMeta, found := utils.MethodsMap[act.Code][method] + if !found { + return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code) + } + + p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler) + + if err := json.Unmarshal([]byte(paramstr), p); err != nil { + return nil, fmt.Errorf("unmarshaling input into params type: %s", err) + } + + buf := new(bytes.Buffer) + if err := p.MarshalCBOR(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// WaitResult is the result of a message wait call. +type WaitResult struct { + Message *types.Message + Receipt *types.MessageReceipt + Signature vm.ActorMethodSignature +} diff --git a/cmd/message_integration_test.go b/cmd/message_integration_test.go new file mode 100644 index 0000000000..310cf2770c --- /dev/null +++ b/cmd/message_integration_test.go @@ -0,0 +1,105 @@ +package cmd_test + +import ( + "context" + "strconv" + "testing" + + "github.com/filecoin-project/venus/pkg/constants" + + "github.com/filecoin-project/go-address" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/app/node/test" + "github.com/filecoin-project/venus/fixtures/fortest" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestMessageSend(t *testing.T) { + t.Skip("This can be unskipped with fake proofs") + tf.IntegrationTest(t) + ctx := context.Background() + builder := test.NewNodeBuilder(t) + defaultAddr := fortest.TestAddresses[0] + + cs := test.FixtureChainSeed(t) + builder.WithGenesisInit(cs.GenesisInitFunc) + builder.WithConfig(test.DefaultAddressConfigOpt(defaultAddr)) + builder.WithInitOpt(cs.KeyInitOpt(1)) + builder.WithInitOpt(cs.KeyInitOpt(0)) + + n, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + from, err := n.Wallet().API().WalletDefaultAddress(ctx) // this should = fixtures.TestAddresses[0] + require.NoError(t, err) + + t.Log("[failure] invalid target") + cmdClient.RunFail( + ctx, + address.ErrUnknownNetwork.Error(), + "send", + "--from", from.String(), + "--gas-price", "0", "--gas-limit", "300", + "--value=10", "xyz", + ) + + t.Log("[success] with from") + cmdClient.RunSuccess( + ctx, + "send", + "--from", from.String(), + "--gas-price", "1", + "--gas-limit", "300", + fortest.TestAddresses[3].String(), + ) + + t.Log("[success] with from and int value") + cmdClient.RunSuccess( + ctx, + "send", + "--from", from.String(), + "--gas-price", "1", + "--gas-limit", "300", + "--value", "10", + fortest.TestAddresses[3].String(), + ) + + t.Log("[success] with from and decimal value") + cmdClient.RunSuccess( + ctx, + "send", + "--from", from.String(), + "--gas-price", "1", + "--gas-limit", "300", + "--value", "5.5", + fortest.TestAddresses[3].String(), + ) +} + +func TestMessageSendBlockGasLimit(t *testing.T) { + tf.IntegrationTest(t) + t.Skip("Unskip using fake proofs") + + ctx := context.Background() + builder := test.NewNodeBuilder(t) + defaultAddr := fortest.TestAddresses[0] + + cs := test.FixtureChainSeed(t) + builder.WithGenesisInit(cs.GenesisInitFunc) + builder.WithConfig(test.DefaultAddressConfigOpt(defaultAddr)) + _, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + doubleTheBlockGasLimit := strconv.Itoa(int(constants.BlockGasLimit) * 2) + + t.Run("when the gas limit is above the block limit, the message fails", func(t *testing.T) { + cmdClient.RunFail( + ctx, + "block gas limit", + "send", + "--gas-price", "1", "--gas-limit", doubleTheBlockGasLimit, + "--value=10", fortest.TestAddresses[1].String(), + ) + }) +} diff --git a/cmd/miner.go b/cmd/miner.go new file mode 100644 index 0000000000..f6e0fbabc9 --- /dev/null +++ b/cmd/miner.go @@ -0,0 +1,407 @@ +package cmd + +import ( + "bytes" + "fmt" + "time" + + "github.com/docker/go-units" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" + cmds "github.com/ipfs/go-ipfs-cmds" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/params" +) + +var minerCmdLog = logging.Logger("miner.cmd") + +var minerCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Interact with actors. Actors are built-in smart contracts.", + }, + Subcommands: map[string]*cmds.Command{ + "new": newMinerCmd, + "info": minerInfoCmd, + "actor": minerActorCmd, + "proving": minerProvingCmd, + }, +} + +var newMinerCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Create a new miner.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("owner", true, false, "owner key to use"), + }, + Options: []cmds.Option{ + cmds.StringOption("worker", "worker key to use (overrides --create-worker-key)"), + cmds.BoolOption("create-worker-key", "Create separate worker key"), + cmds.StringOption("from", "Select which address to send actor creation message from"), + cmds.StringOption("gas-premium", "Set gas premium for initialization messages in AttoFIL").WithDefault("0"), + cmds.StringOption("sector-size", "specify sector size to use"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + + owner, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ssize, err := abi.RegisteredSealProof_StackedDrg32GiBV1.SectorSize() + if err != nil { + return fmt.Errorf("failed to calculate default sector size: %w", err) + } + + sectorSize, ok := req.Options["sector-size"].(string) + if ok { + sectorSizeInt, err := units.RAMInBytes(sectorSize) + if err != nil { + return err + } + ssize = abi.SectorSize(sectorSizeInt) + } + + gp, _ := req.Options["gas-premium"].(string) + gasPrice, err := types.ParseFIL(gp) + if err != nil { + return fmt.Errorf("failed to parse gas-price flag: %s", err) + } + + worker := owner + workerAddr, _ := req.Options["worker-address"].(string) + createWorkerKey, _ := req.Options["create-worker-key"].(bool) + if workerAddr != "" { + worker, err = address.NewFromString(workerAddr) + } else if createWorkerKey { // TODO: Do we need to force this if owner is Secpk? + if !env.(*node.Env).WalletAPI.HasPassword(ctx) { + return errMissPassword + } + if env.(*node.Env).WalletAPI.WalletState(req.Context) == wallet.Lock { + return errWalletLocked + } + if worker, err = env.(*node.Env).WalletAPI.WalletNewAddress(req.Context, address.BLS); err != nil { + return err + } + } + if err != nil { + return err + } + + // make sure the worker account exists on chain + _, err = env.(*node.Env).ChainAPI.StateLookupID(ctx, worker, types.EmptyTSK) + if err != nil { + signed, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + From: owner, + To: worker, + Value: big.NewInt(0), + }, nil) + if err != nil { + return fmt.Errorf("push worker init: %v", err) + } + + cid := signed.Cid() + + minerCmdLog.Infof("Initializing worker account %s, message: %s", worker, cid) + minerCmdLog.Infof("Waiting for confirmation") + _ = re.Emit("Initializing worker account " + worker.String() + ", message: " + cid.String()) + + mw, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, cid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return fmt.Errorf("waiting for worker init: %v", err) + } + if mw.Receipt.ExitCode != 0 { + return fmt.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + nv, err := env.(*node.Env).ChainAPI.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting network version: %v", err) + } + + spt, err := miner.SealProofTypeFromSectorSize(ssize, nv) + if err != nil { + return fmt.Errorf("getting seal proof type: %v", err) + } + + peerID, err := env.(*node.Env).NetworkAPI.ID(ctx) + if err != nil { + return err + } + params, err := actors.SerializeParams(&power2.CreateMinerParams{ + Owner: owner, + Worker: worker, + SealProofType: spt, + Peer: abi.PeerID(peerID), + }) + if err != nil { + return err + } + + minerCmdLog.Info("peer id: ", peerID.Pretty()) + + sender := owner + fromstr, _ := req.Options["from"].(string) + if len(fromstr) != 0 { + faddr, err := address.NewFromString(fromstr) + if err != nil { + return fmt.Errorf("could not parse from address: %v", err) + } + sender = faddr + } + + createStorageMinerMsg := &types.Message{ + To: power.Address, + From: sender, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + + GasLimit: 0, + GasPremium: abi.TokenAmount{Int: gasPrice.Int}, + } + + signed, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + if err != nil { + return fmt.Errorf("pushing createMiner message: %w", err) + } + + cid := signed.Cid() + minerCmdLog.Infof("Pushed CreateMiner message: %s", cid) + minerCmdLog.Infof("Waiting for confirmation") + _ = re.Emit("Pushed CreateMiner message: " + cid.String()) + + mw, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, cid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return fmt.Errorf("waiting for createMiner message: %v", err) + } + + if mw.Receipt.ExitCode != 0 { + return fmt.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode) + } + + var retval power2.CreateMinerReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { + return err + } + + s := fmt.Sprintf("New miners address is: %s (%s)", retval.IDAddress, retval.RobustAddress) + minerCmdLog.Info(s) + + return re.Emit(s) + }, + Type: "", +} + +var minerInfoCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print miner info.", + }, + Options: []cmds.Option{ + cmds.BoolOption("hide-sectors-info", "hide-sectors-info"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ctx := req.Context + blockstoreAPI := env.(*node.Env).BlockStoreAPI + api := env.(*node.Env).ChainAPI + + blockDelay, err := blockDelay(req) + if err != nil { + return err + } + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + var chainSyncStr string + switch { + case time.Now().Unix()-int64(head.MinTimestamp()) < int64(blockDelay*3/2): // within 1.5 epochs + chainSyncStr = "[Chain: sync ok]" + case time.Now().Unix()-int64(head.MinTimestamp()) < int64(blockDelay*5): // within 5 epochs + chainSyncStr = fmt.Sprintf("[Chain: sync slow (%s behind)]", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) + default: + chainSyncStr = fmt.Sprintf("[Chain: sync behind! (%s behind)]", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) + } + + basefee := head.MinTicketBlock().ParentBaseFee + writer.Printf("%s [basefee %s]\n", chainSyncStr, types.FIL(basefee).Short()) + + mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + tbs := blockstoreutil.NewTieredBstore(blockstoreutil.NewAPIBlockstore(blockstoreAPI), blockstoreutil.NewTemporary()) + mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) + if err != nil { + return err + } + + // Sector size + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + ssize := types.SizeStr(big.NewInt(int64(mi.SectorSize))) + writer.Printf("Miner: %s (%s sectors)\n", maddr, ssize) + + pow, err := api.StateMinerPower(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + rpercI := big.Div(big.Mul(pow.MinerPower.RawBytePower, big.NewInt(1000000)), pow.TotalPower.RawBytePower) + qpercI := big.Div(big.Mul(pow.MinerPower.QualityAdjPower, big.NewInt(1000000)), pow.TotalPower.QualityAdjPower) + + writer.Printf("Power: %s / %s (%0.4f%%)\n", + types.DeciStr(pow.MinerPower.QualityAdjPower), + types.DeciStr(pow.TotalPower.QualityAdjPower), + float64(qpercI.Int64())/10000) + + writer.Printf("Raw: %s / %s (%0.4f%%)\n", + types.SizeStr(pow.MinerPower.RawBytePower), + types.SizeStr(pow.TotalPower.RawBytePower), + float64(rpercI.Int64())/10000) + + secCounts, err := api.StateMinerSectorCount(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + proving := secCounts.Active + secCounts.Faulty + nfaults := secCounts.Faulty + writer.Printf("\tCommitted: %s\n", types.SizeStr(big.Mul(big.NewInt(int64(secCounts.Live)), big.NewInt(int64(mi.SectorSize))))) + if nfaults == 0 { + writer.Printf("\tProving: %s\n", types.SizeStr(big.Mul(big.NewInt(int64(proving)), big.NewInt(int64(mi.SectorSize))))) + } else { + var faultyPercentage float64 + if secCounts.Live != 0 { + faultyPercentage = float64(10000*nfaults/secCounts.Live) / 100. + } + writer.Printf("Proving: %s (%s Faulty, %.2f%%)\n", + types.SizeStr(big.Mul(big.NewInt(int64(proving)), big.NewInt(int64(mi.SectorSize)))), + types.SizeStr(big.Mul(big.NewInt(int64(nfaults)), big.NewInt(int64(mi.SectorSize)))), + faultyPercentage) + } + + if !pow.HasMinPower { + writer.Println("Below minimum power threshold, no blocks will be won") + } else { + expWinChance := float64(big.Mul(qpercI, big.NewInt(int64(params.BlocksPerEpoch))).Int64()) / 1000000 + if expWinChance > 0 { + if expWinChance > 1 { + expWinChance = 1 + } + winRate := time.Duration(float64(time.Second*time.Duration(blockDelay)) / expWinChance) + winPerDay := float64(time.Hour*24) / float64(winRate) + + writer.Printf("Expected block win rate: %.4f/day (every %s)\n", winPerDay, winRate.Truncate(time.Second)) + } + } + + writer.Println() + + spendable := big.Zero() + + // NOTE: there's no need to unlock anything here. Funds only + // vest on deadline boundaries, and they're unlocked by cron. + lockedFunds, err := mas.LockedFunds() + if err != nil { + return fmt.Errorf("getting locked funds: %w", err) + } + availBalance, err := mas.AvailableBalance(mact.Balance) + if err != nil { + return fmt.Errorf("getting available balance: %w", err) + } + spendable = big.Add(spendable, availBalance) + + writer.Printf("Miner Balance: %s\n", types.FIL(mact.Balance).Short()) + writer.Printf(" PreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits).Short()) + writer.Printf(" Pledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement).Short()) + writer.Printf(" Vesting: %s\n", types.FIL(lockedFunds.VestingFunds).Short()) + writer.Printf(" Available: %s\n", types.FIL(availBalance).Short()) + + mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting market balance: %w", err) + } + spendable = big.Add(spendable, big.Sub(mb.Escrow, mb.Locked)) + + writer.Printf("Market Balance: %s\n", types.FIL(mb.Escrow).Short()) + writer.Printf(" Locked: %s\n", types.FIL(mb.Locked).Short()) + writer.Printf(" Available: %s\n", types.FIL(big.Sub(mb.Escrow, mb.Locked)).Short()) + + wb, err := env.(*node.Env).WalletAPI.WalletBalance(ctx, mi.Worker) + if err != nil { + return fmt.Errorf("getting worker balance: %w", err) + } + spendable = big.Add(spendable, wb) + writer.Printf("Worker Balance: %s\n", types.FIL(wb).Short()) + if len(mi.ControlAddresses) > 0 { + cbsum := big.Zero() + for _, ca := range mi.ControlAddresses { + b, err := env.(*node.Env).WalletAPI.WalletBalance(ctx, ca) + if err != nil { + return fmt.Errorf("getting control address balance: %w", err) + } + cbsum = big.Add(cbsum, b) + } + spendable = big.Add(spendable, cbsum) + + writer.Printf(" Control: %s\n", types.FIL(cbsum).Short()) + } + writer.Printf("Total Spendable: %s\n", types.FIL(spendable).Short()) + + if mi.Beneficiary != address.Undef { + writer.Printf("Beneficiary:\t%s\n", mi.Beneficiary) + if mi.Beneficiary != mi.Owner { + writer.Printf("Beneficiary Quota:\t%s\n", mi.BeneficiaryTerm.Quota) + writer.Printf("Beneficiary Used Quota:\t%s\n", mi.BeneficiaryTerm.UsedQuota) + writer.Printf("Beneficiary Expiration:\t%s\n", mi.BeneficiaryTerm.Expiration) + } + } + if mi.PendingBeneficiaryTerm != nil { + writer.Printf("Pending Beneficiary Term:\n") + writer.Printf("New Beneficiary:\t%s\n", mi.PendingBeneficiaryTerm.NewBeneficiary) + writer.Printf("New Quota:\t%s\n", mi.PendingBeneficiaryTerm.NewQuota) + writer.Printf("New Expiration:\t%s\n", mi.PendingBeneficiaryTerm.NewExpiration) + writer.Printf("Approved By Beneficiary:\t%t\n", mi.PendingBeneficiaryTerm.ApprovedByBeneficiary) + writer.Printf("Approved By Nominee:\t%t\n", mi.PendingBeneficiaryTerm.ApprovedByNominee) + } + + // TODO: grab actr state / info + // * Sealed sectors (count / bytes) + // * Power + + return re.Emit(buf) + }, +} diff --git a/cmd/miner_actor.go b/cmd/miner_actor.go new file mode 100644 index 0000000000..4fec8e942c --- /dev/null +++ b/cmd/miner_actor.go @@ -0,0 +1,883 @@ +package cmd + +import ( + "bytes" + "encoding/hex" + "fmt" + "strings" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/blockstore" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + cmds "github.com/ipfs/go-ipfs-cmds" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/cmd/tablewriter" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var minerActorCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "manipulate the miner actor.", + }, + Subcommands: map[string]*cmds.Command{ + "set-addrs": actorSetAddrsCmd, + "set-peer-id": actorSetPeeridCmd, + "withdraw": actorWithdrawCmd, + "repay-debt": actorRepayDebtCmd, + "set-owner": actorSetOwnerCmd, + "control": actorControl, + "propose-change-worker": actorProposeChangeWorker, + "confirm-change-worker": actorConfirmChangeWorker, + }, +} + +var actorSetAddrsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "set addresses that your miner can be publicly dialed on.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Options: []cmds.Option{ + cmds.Int64Option("gas-limit", "set gas limit").WithDefault(int64(0)), + cmds.StringsOption("addrs", "set addresses"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + var addrs []abi.Multiaddrs + addresses, _ := req.Options["addrs"].([]string) + for _, addr := range addresses { + maddr, err := ma.NewMultiaddr(addr) + if err != nil { + return fmt.Errorf("failed to parse %q as a multiaddr: %v", addr, err) + } + + maddrNop2p, strip := ma.SplitFunc(maddr, func(c ma.Component) bool { + return c.Protocol().Code == ma.P_P2P + }) + + if strip != nil { + _ = re.Emit(fmt.Sprint("Stripping peerid ", strip, " from ", maddr)) + } + addrs = append(addrs, maddrNop2p.Bytes()) + } + + mi, err := env.(*node.Env).ChainAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + params, err := actors.SerializeParams(&miner2.ChangeMultiaddrsParams{NewMultiaddrs: addrs}) + if err != nil { + return err + } + + gasLimit, _ := req.Options["gas-limit"].(int64) + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: mi.Worker, + Value: big.NewInt(0), + GasLimit: gasLimit, + Method: builtintypes.MethodsMiner.ChangeMultiaddrs, + Params: params, + }, nil) + if err != nil { + return err + } + + return re.Emit(fmt.Sprintf("Requested multiaddrs change in message %s", smsg.Cid())) + }, + Type: "", +} + +var actorSetPeeridCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "set the peer id of your miner.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + cmds.StringArg("peer-id", true, false, "set peer id"), + }, + Options: []cmds.Option{ + cmds.Int64Option("gas-limit", "set gas limit"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + pid, err := peer.Decode(req.Arguments[1]) + if err != nil { + return fmt.Errorf("failed to parse input as a peerId: %w", err) + } + + mi, err := env.(*node.Env).ChainAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(pid)}) + if err != nil { + return err + } + + gasLimit, _ := req.Options["gas-limit"].(int64) + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: mi.Worker, + Value: big.NewInt(0), + GasLimit: gasLimit, + Method: builtintypes.MethodsMiner.ChangePeerID, + Params: params, + }, nil) + if err != nil { + return err + } + return re.Emit(fmt.Sprintf("Requested peerid change in message %s", smsg.Cid())) + }, + Type: "", +} + +var actorWithdrawCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "withdraw available balance to beneficiary.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + cmds.StringArg("amount", true, false, "[amount (FIL)]"), + }, + Options: []cmds.Option{ + cmds.Uint64Option("confidence", "number of block confirmations to wait for").WithDefault(constants.MessageConfidence), + cmds.BoolOption("beneficiary", "send withdraw message from the beneficiary address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + mi, err := env.(*node.Env).ChainAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + available, err := env.(*node.Env).ChainAPI.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + amount := available + f, err := types.ParseFIL(req.Arguments[1]) + if err != nil { + return fmt.Errorf("parsing 'amount' argument: %v", err) + } + + amount = abi.TokenAmount(f) + + if amount.GreaterThan(available) { + return fmt.Errorf("can't withdraw more funds than available; requested: %s; available: %s", amount, available) + } + + params, err := actors.SerializeParams(&miner2.WithdrawBalanceParams{ + AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor + }) + if err != nil { + return err + } + + sender := mi.Owner + if beneficiary, _ := req.Options["beneficiary"].(bool); beneficiary { + sender = mi.Beneficiary + } + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: sender, + Value: big.NewInt(0), + Method: builtintypes.MethodsMiner.WithdrawBalance, + Params: params, + }, nil) + if err != nil { + return err + } + _ = re.Emit(fmt.Sprintf("Requested rewards withdrawal in message %s", smsg.Cid())) + + confidence, _ := req.Options["confidence"].(uint64) + // wait for it to get mined into a block + _ = re.Emit(fmt.Sprintf("waiting for %d epochs for confirmation..", confidence)) + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, smsg.Cid(), confidence, -1, true) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + return err + } + + nv, err := env.(*node.Env).ChainAPI.StateNetworkVersion(ctx, wait.TipSet) + if err != nil { + return err + } + + if nv >= network.Version14 { + var withdrawn abi.TokenAmount + if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return err + } + + _ = re.Emit(fmt.Sprintf("Successfully withdrew %s", types.MustParseFIL(withdrawn.String()+"attofil"))) + if withdrawn.LessThan(amount) { + _ = re.Emit(fmt.Sprintf("Note that this is less than the requested amount of %s\n", amount.String()+"attofil")) + } + } + + return nil + }, + Type: "", +} + +var actorRepayDebtCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "pay down a miner's debt.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Options: []cmds.Option{ + cmds.StringsOption("amount", "[amount (FIL)]"), + cmds.StringsOption("from", "optionally specify the account to send funds from"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + mi, err := env.(*node.Env).ChainAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + var amount abi.TokenAmount + fil, _ := req.Options["amount"].(string) + if len(fil) != 0 { + f, err := types.ParseFIL(fil) + if err != nil { + return fmt.Errorf("parsing 'amount' argument: %w", err) + } + + amount = abi.TokenAmount(f) + } else { + mact, err := env.(*node.Env).ChainAPI.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(env.(*node.Env).BlockStoreAPI))) + + mst, err := miner.Load(store, mact) + if err != nil { + return err + } + + amount, err = mst.FeeDebt() + if err != nil { + return err + } + + } + + fromAddr := mi.Worker + from, _ := req.Options["from"].(string) + if from != "" { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr + } + + fromID, err := env.(*node.Env).ChainAPI.StateLookupID(ctx, fromAddr, types.EmptyTSK) + if err != nil { + return err + } + + if !isController(mi, fromID) { + return fmt.Errorf("sender isn't a controller of miner: %s", fromID) + } + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: fromID, + Value: amount, + Method: builtintypes.MethodsMiner.RepayDebt, + Params: nil, + }, nil) + if err != nil { + return err + } + + return re.Emit(fmt.Sprintf("Sent repay debt message %s", smsg.Cid())) + }, + Type: "", +} + +var actorSetOwnerCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "set-owner.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("miner-address", true, false, "Current miner address"), + cmds.StringArg("owner-address", true, false, "Owner address"), + }, + Options: []cmds.Option{ + cmds.BoolOption("really-do-it", "Actually send transaction performing the action").WithDefault(false), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if !req.Options["really-do-it"].(bool) { + return re.Emit("Pass --really-do-it to actually execute this action") + } + + ctx := req.Context + api := env.(*node.Env).ChainAPI + + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + na, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + sp, err := actors.SerializeParams(&newAddr) + if err != nil { + return fmt.Errorf("serializing params: %w", err) + } + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtintypes.MethodsMiner.ChangeOwnerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return fmt.Errorf("mpool push: %w", err) + } + + cid := smsg.Cid() + _ = re.Emit("Propose Message CID: " + cid.String()) + + // wait for it to get mined into a block + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, cid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + _ = re.Emit(fmt.Sprintf("Propose owner change failed, exitcode: %d", wait.Receipt.ExitCode)) + return err + } + + smsg, err = env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + From: newAddr, + To: maddr, + Method: builtintypes.MethodsMiner.ChangeOwnerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return fmt.Errorf("mpool push: %w", err) + } + + cid = smsg.Cid() + _ = re.Emit("Approve Message CID: " + cid.String()) + + // wait for it to get mined into a block + wait, err = env.(*node.Env).ChainAPI.StateWaitMsg(ctx, cid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + _ = re.Emit(fmt.Sprintf("Approve owner change failed, exitcode: %d", wait.Receipt.ExitCode)) + return err + } + return re.Emit(fmt.Sprintf("Requested rewards withdrawal in message %s", smsg.Cid())) + }, + Type: "", +} + +var actorControl = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manage control addresses.", + }, + Subcommands: map[string]*cmds.Command{ + "list": actorControlList, + "set": actorControlSet, + }, +} + +var actorControlList = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get currently set control addresses.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Options: []cmds.Option{ + cmds.BoolOption("verbose", "verbose").WithDefault(false), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + ctx := req.Context + api := env.(*node.Env).ChainAPI + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + tw := tablewriter.New( + tablewriter.Col("name"), + tablewriter.Col("ID"), + tablewriter.Col("key"), + tablewriter.Col("use"), + tablewriter.Col("balance"), + ) + + commit := map[address.Address]struct{}{} + precommit := map[address.Address]struct{}{} + post := map[address.Address]struct{}{} + + for _, ca := range mi.ControlAddresses { + post[ca] = struct{}{} + } + + printKey := func(name string, a address.Address) { + api := env.(*node.Env).ChainAPI + actor, err := api.StateGetActor(ctx, a, types.EmptyTSK) + if err != nil { + _ = re.Emit(fmt.Sprintf("get actor(%s) failed: %s", a, err)) + return + } + b := actor.Balance + + var k address.Address + // param 'a` maybe a 'robust', in that case, 'StateAccountKey' returns an error. + if builtin.IsAccountActor(actor.Code) { + if k, err = api.StateAccountKey(ctx, a, types.EmptyTSK); err != nil { + _ = re.Emit(fmt.Sprintf("%s %s: error getting account key: %s", name, a, err)) + return + } + } else { // if builtin.IsMultisigActor(actor.Code) + if k, err = api.StateLookupRobustAddress(ctx, a, types.EmptyTSK); err != nil { + _ = re.Emit(fmt.Sprintf("%s %s: error getting robust address: %s", name, a, err)) + return + } + } + kstr := k.String() + if !req.Options["verbose"].(bool) { + kstr = kstr[:9] + "..." + } + + var uses []string + if a == mi.Worker { + uses = append(uses, "other") + } + if _, ok := post[a]; ok { + uses = append(uses, "post") + } + if _, ok := precommit[a]; ok { + uses = append(uses, "precommit") + } + if _, ok := commit[a]; ok { + uses = append(uses, "commit") + } + + tw.Write(map[string]interface{}{ + "name": name, + "ID": a, + "key": kstr, + "use": strings.Join(uses, " "), + "balance": types.FIL(b).String(), + }) + } + + printKey("owner", mi.Owner) + printKey("worker", mi.Worker) + for i, ca := range mi.ControlAddresses { + printKey(fmt.Sprintf("control-%d", i), ca) + } + + buf := new(bytes.Buffer) + if err := tw.Flush(buf); err != nil { + return err + } + + return re.Emit(buf) + }, +} + +var actorControlSet = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Set control address(-es).", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("miner-address", true, false, "Address of miner to show"), + }, + Options: []cmds.Option{ + cmds.BoolOption("really-do-it", "Actually send transaction performing the action").WithDefault(false), + cmds.StringsOption("addrs", "Control addresses"), + cmds.BoolOption("dump-bytes", "Dumps the bytes of the message that would propose this change").WithDefault(false), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ctx := req.Context + api := env.(*node.Env).ChainAPI + + mi, err := env.(*node.Env).ChainAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + del := map[address.Address]struct{}{} + existing := map[address.Address]struct{}{} + for _, controlAddress := range mi.ControlAddresses { + ka, err := api.StateAccountKey(ctx, controlAddress, types.EmptyTSK) + if err != nil { + return err + } + + del[ka] = struct{}{} + existing[ka] = struct{}{} + } + + var toSet []address.Address + addrs, _ := req.Options["addrs"].([]string) + + for i, as := range addrs { + a, err := address.NewFromString(as) + if err != nil { + return fmt.Errorf("parsing address %d: %w", i, err) + } + + ka, err := api.StateAccountKey(ctx, a, types.EmptyTSK) + if err != nil { + return err + } + + // make sure the address exists on chain + _, err = api.StateLookupID(ctx, ka, types.EmptyTSK) + if err != nil { + return fmt.Errorf("looking up %s: %w", ka, err) + } + + delete(del, ka) + toSet = append(toSet, ka) + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + for a := range del { + writer.Println("Remove " + a.String()) + } + for _, a := range toSet { + if _, exists := existing[a]; !exists { + writer.Println("Add " + a.String()) + } + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: toSet, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return fmt.Errorf("serializing params: %w", err) + } + + msg := &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtintypes.MethodsMiner.ChangeWorkerAddress, + + Value: big.Zero(), + Params: sp, + } + + if ok, _ := req.Options["dump-bytes"].(bool); ok { + msg, err = env.(*node.Env).MessagePoolAPI.GasEstimateMessageGas(ctx, msg, nil, types.EmptyTSK) + if err != nil { + return err + } + + msgBytes, err := msg.Serialize() + if err != nil { + return err + } + + writer.Println("dump message bytes: ", hex.EncodeToString(msgBytes)) + return nil + } + + if !req.Options["really-do-it"].(bool) { + return re.Emit("Pass --really-do-it to actually execute this action") + } + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return fmt.Errorf("mpool push: %w", err) + } + + writer.Println("Message CID: " + smsg.Cid().String()) + + return re.Emit(buf) + }, +} + +var actorProposeChangeWorker = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Propose a worker address change.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("miner-address", true, false, "Address of miner to show"), + cmds.StringArg("work-address", true, false, "Propose a worker address change"), + }, + Options: []cmds.Option{ + cmds.BoolOption("really-do-it", "Actually send transaction performing the action").WithDefault(false), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + na, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + ctx := req.Context + api := env.(*node.Env).ChainAPI + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + if mi.Worker == newAddr { + return fmt.Errorf("worker address already set to %s", na) + } + } else { + if mi.NewWorker == newAddr { + return fmt.Errorf("change to worker address %s already pending", na) + } + } + + if !req.Options["really-do-it"].(bool) { + return re.Emit("Pass --really-do-it to actually execute this action") + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: newAddr, + NewControlAddrs: mi.ControlAddresses, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return fmt.Errorf("serializing params: %w", err) + } + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtintypes.MethodsMiner.ChangeWorkerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return fmt.Errorf("mpool push: %w", err) + } + + cid := smsg.Cid() + _ = re.Emit("Propose Message CID: " + cid.String()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, cid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + _ = re.Emit("Propose worker change failed!") + return err + } + + mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.NewWorker != newAddr { + return fmt.Errorf("proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) + } + + _ = re.Emit(fmt.Sprintf("Worker key change to %s successfully proposed.", na)) + return re.Emit(fmt.Sprintf("Call 'confirm-change-worker' at or after height %d to complete.", mi.WorkerChangeEpoch)) + }, + Type: "", +} + +var actorConfirmChangeWorker = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Confirm a worker address change.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("miner-address", true, false, "Address of miner to show"), + cmds.StringArg("work-address", true, false, "Address of worker to show"), + }, + Options: []cmds.Option{ + cmds.BoolOption("really-do-it", "Actually send transaction performing the action").WithDefault(false), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + na, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + ctx := req.Context + api := env.(*node.Env).ChainAPI + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + if mi.Worker == newAddr { + return fmt.Errorf("worker address already set to %s", na) + } + } else { + if mi.NewWorker == newAddr { + return fmt.Errorf("change to worker address %s already pending", na) + } + } + + head, err := api.ChainHead(ctx) + if err != nil { + return fmt.Errorf("failed to get the chain head: %w", err) + } + + height := head.Height() + if height < mi.WorkerChangeEpoch { + return fmt.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, height) + } + + if !req.Options["really-do-it"].(bool) { + return re.Emit("Pass --really-do-it to actually execute this action") + } + + smsg, err := env.(*node.Env).MessagePoolAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtintypes.MethodsMiner.ConfirmUpdateWorkerKey, + Value: big.Zero(), + }, nil) + if err != nil { + return fmt.Errorf("mpool push: %w", err) + } + + cid := smsg.Cid() + _ = re.Emit("Confirm Message CID: " + cid.String()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, cid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + _ = re.Emit("Worker change failed!") + return err + } + + mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.Worker != newAddr { + return fmt.Errorf("confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker) + } + + return re.Emit(fmt.Sprintf("Requested peerid change in message %s", smsg.Cid())) + }, + Type: "", +} diff --git a/cmd/miner_proving.go b/cmd/miner_proving.go new file mode 100644 index 0000000000..69dc70b57e --- /dev/null +++ b/cmd/miner_proving.go @@ -0,0 +1,357 @@ +package cmd + +import ( + "bytes" + "fmt" + "strconv" + "text/tabwriter" + + "github.com/filecoin-project/go-address" + cmds "github.com/ipfs/go-ipfs-cmds" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var minerProvingCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "View proving information.", + }, + Subcommands: map[string]*cmds.Command{ + "info": provingInfoCmd, + "deadlines": provingDeadlinesCmd, + "deadline": provingDeadlineInfoCmd, + "faults": provingFaultsCmd, + }, +} + +var provingInfoCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "View current state information.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + ctx := req.Context + + blockDelay, err := blockDelay(req) + if err != nil { + return err + } + + chainAPI := env.(*node.Env).ChainAPI + head, err := chainAPI.ChainHead(ctx) + if err != nil { + return fmt.Errorf("getting chain head: %v", err) + } + + mact, err := chainAPI.StateGetActor(ctx, maddr, head.Key()) + if err != nil { + return err + } + + stor := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(env.(*node.Env).BlockStoreAPI))) + + mas, err := miner.Load(stor, mact) + if err != nil { + return err + } + + cd, err := chainAPI.StateMinerProvingDeadline(ctx, maddr, head.Key()) + if err != nil { + return fmt.Errorf("getting miner info: %v", err) + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + writer.Printf("Miner: %s\n", maddr) + + proving := uint64(0) + faults := uint64(0) + recovering := uint64(0) + curDeadlineSectors := uint64(0) + + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { + if bf, err := part.LiveSectors(); err != nil { + return err + } else if count, err := bf.Count(); err != nil { + return err + } else { + proving += count + if dlIdx == cd.Index { + curDeadlineSectors += count + } + } + + if bf, err := part.FaultySectors(); err != nil { + return err + } else if count, err := bf.Count(); err != nil { + return err + } else { + faults += count + } + + if bf, err := part.RecoveringSectors(); err != nil { + return err + } else if count, err := bf.Count(); err != nil { + return err + } else { + recovering += count + } + + return nil + }) + }); err != nil { + return fmt.Errorf("walking miner deadlines and partitions: %v", err) + } + + var faultPerc float64 + if proving > 0 { + faultPerc = float64(faults*10000/proving) / 100 + } + + writer.Printf("Current Epoch: %d\n", cd.CurrentEpoch) + + writer.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%cd.WPoStProvingPeriod) + writer.Printf("Proving Period Start: %s\n", EpochTime(cd.CurrentEpoch, cd.PeriodStart, blockDelay)) + writer.Printf("Next Period Start: %s\n", EpochTime(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod, blockDelay)) + + writer.Println() + writer.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc) + writer.Printf("Recovering: %d\n", recovering) + + writer.Printf("Deadline Index: %d\n", cd.Index) + writer.Printf("Deadline Sectors: %d\n", curDeadlineSectors) + writer.Printf("Deadline Open: %s\n", EpochTime(cd.CurrentEpoch, cd.Open, blockDelay)) + writer.Printf("Deadline Close: %s\n", EpochTime(cd.CurrentEpoch, cd.Close, blockDelay)) + writer.Printf("Deadline Challenge: %s\n", EpochTime(cd.CurrentEpoch, cd.Challenge, blockDelay)) + writer.Printf("Deadline FaultCutoff: %s\n", EpochTime(cd.CurrentEpoch, cd.FaultCutoff, blockDelay)) + + return re.Emit(buf) + }, +} + +var provingDeadlinesCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "View the current proving period deadlines information.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + ctx := req.Context + api := env.(*node.Env).ChainAPI + + deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting deadlines: %w", err) + } + + di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting deadlines: %w", err) + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + writer.Printf("Miner: %s\n", maddr) + tw := tabwriter.NewWriter(buf, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintln(tw, "deadline\tpartitions\tsectors (faults)\tproven partitions") + + for dlIdx, deadline := range deadlines { + partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting partitions for deadline %d: %w", dlIdx, err) + } + + provenPartitions, err := deadline.PostSubmissions.Count() + if err != nil { + return err + } + + sectors := uint64(0) + faults := uint64(0) + + for _, partition := range partitions { + sc, err := partition.AllSectors.Count() + if err != nil { + return err + } + + sectors += sc + + fc, err := partition.FaultySectors.Count() + if err != nil { + return err + } + + faults += fc + } + + var cur string + if di.Index == uint64(dlIdx) { + cur += "\t(current)" + } + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d (%d)\t%d%s\n", dlIdx, len(partitions), sectors, faults, provenPartitions, cur) + } + if err := tw.Flush(); err != nil { + return err + } + + return re.Emit(buf) + }, +} + +var provingDeadlineInfoCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "View the current proving period deadlines information.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + cmds.StringArg("index", true, false, "Index of deadline to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 2 { + return fmt.Errorf("must pass two parameters") + } + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + ctx := req.Context + api := env.(*node.Env).ChainAPI + + dlIdx, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return fmt.Errorf("could not parse deadline index: %w", err) + } + + deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting deadlines: %w", err) + } + + di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting deadlines: %w", err) + } + + partitions, err := api.StateMinerPartitions(ctx, maddr, dlIdx, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting partitions for deadline %d: %w", dlIdx, err) + } + + provenPartitions, err := deadlines[dlIdx].PostSubmissions.Count() + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + writer.Printf("Deadline Index: %d\n", dlIdx) + writer.Printf("Partitions: %d\n", len(partitions)) + writer.Printf("Proven Partitions: %d\n", provenPartitions) + writer.Printf("Current: %t\n\n", di.Index == dlIdx) + + for pIdx, partition := range partitions { + sectorCount, err := partition.AllSectors.Count() + if err != nil { + return err + } + + sectorNumbers, err := partition.AllSectors.All(sectorCount) + if err != nil { + return err + } + + faultsCount, err := partition.FaultySectors.Count() + if err != nil { + return err + } + + fn, err := partition.FaultySectors.All(faultsCount) + if err != nil { + return err + } + + writer.Printf("Partition Index: %d\n", pIdx) + writer.Printf("Sectors: %d\n", sectorCount) + writer.Printf("Sector Numbers: %v\n", sectorNumbers) + writer.Printf("Faults: %d\n", faultsCount) + writer.Printf("Faulty Sectors: %d\n", fn) + } + return re.Emit(buf) + }, +} + +var provingFaultsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "View the currently known proving faulty sectors information.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ctx := req.Context + api := env.(*node.Env).ChainAPI + bstoreAPI := env.(*node.Env).BlockStoreAPI + stor := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(bstoreAPI))) + + mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + mas, err := miner.Load(stor, mact) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + writer.Printf("Miner: %s\n", maddr) + + tw := tabwriter.NewWriter(buf, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintln(tw, "deadline\tpartition\tsectors") + err = mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { + faults, err := part.FaultySectors() + if err != nil { + return err + } + return faults.ForEach(func(num uint64) error { + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\n", dlIdx, partIdx, num) + return nil + }) + }) + }) + if err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + + return re.Emit(buf) + }, +} diff --git a/cmd/mpool.go b/cmd/mpool.go new file mode 100644 index 0000000000..f9ebb1a003 --- /dev/null +++ b/cmd/mpool.go @@ -0,0 +1,743 @@ +package cmd + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sort" + "strconv" + + stdbig "math/big" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/messagepool" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var mpoolCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manage message pool", + }, + Subcommands: map[string]*cmds.Command{ + "pending": mpoolPending, + "clear": mpoolClear, + "sub": mpoolSub, + "stat": mpoolStat, + "replace": mpoolReplaceCmd, + "find": mpoolFindCmd, + "config": mpoolConfig, + "gas-perf": mpoolGasPerfCmd, + "publish": mpoolPublish, + "delete": mpoolDeleteAddress, + "select": mpoolSelect, + }, +} + +var mpoolDeleteAddress = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "delete", + ShortDescription: "delete message by address", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "optionally specify the wallet for publish message"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := context.TODO() + + from, _ := req.Options["from"].(string) + if from == "" { + return fmt.Errorf("address can`t be null") + } + + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + err = env.(*node.Env).MessagePoolAPI.MpoolDeleteByAdress(ctx, addr) + if err != nil { + return err + } + + return nil + }, +} + +var mpoolSelect = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "select", + ShortDescription: "select message from mpool", + }, + Options: []cmds.Option{ + cmds.FloatOption("quality", "optionally specify the wallet for publish message").WithDefault(float64(0.5)), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := context.TODO() + + quality, _ := req.Options["quality"].(float64) + head, err := env.(*node.Env).ChainAPI.ChainHead(ctx) + if err != nil { + return err + } + msgs, err := env.(*node.Env).MessagePoolAPI.MpoolSelect(ctx, head.Key(), quality) + if err != nil { + return err + } + selectMsg, err := json.MarshalIndent(msgs, " ", "\t") + if err != nil { + return err + } + + return printOneString(re, string(selectMsg)) + }, +} + +var mpoolPublish = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "publish", + ShortDescription: "publish pending messages", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "optionally specify the wallet for publish message"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + from, _ := req.Options["from"].(string) + + ctx := context.TODO() + + var fromAddr address.Address + if from == "" { + defaddr, err := env.(*node.Env).WalletAPI.WalletDefaultAddress(req.Context) + if err != nil { + return err + } + + fromAddr = defaddr + } else { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr + } + + err := env.(*node.Env).MessagePoolAPI.MpoolPublishByAddr(ctx, fromAddr) + if err != nil { + return err + } + + return nil + }, +} + +var mpoolFindCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "find", + ShortDescription: "find a message in the mempool", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "search for messages with given 'from' address"), + cmds.StringOption("to", "search for messages with given 'to' address"), + cmds.Int64Option("method", "search for messages with given method"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + from, _ := req.Options["from"].(string) + to, _ := req.Options["to"].(string) + method, _ := req.Options["method"].(int64) + + ctx := context.TODO() + pending, err := env.(*node.Env).MessagePoolAPI.MpoolPending(ctx, types.TipSetKey{}) + if err != nil { + return err + } + + var toFilter, fromFilter address.Address + if len(to) > 0 { + a, err := address.NewFromString(to) + if err != nil { + return fmt.Errorf("'to' address was invalid: %w", err) + } + + toFilter = a + } + + if len(from) > 0 { + a, err := address.NewFromString(from) + if err != nil { + return fmt.Errorf("'from' address was invalid: %w", err) + } + + fromFilter = a + } + + var methodFilter *abi.MethodNum + if method > 0 { + m := abi.MethodNum(method) + methodFilter = &m + } + + var out []*types.SignedMessage + for _, m := range pending { + if toFilter != address.Undef && m.Message.To != toFilter { + continue + } + + if fromFilter != address.Undef && m.Message.From != fromFilter { + continue + } + + if methodFilter != nil && *methodFilter != m.Message.Method { + continue + } + + out = append(out, m) + } + + _ = re.Emit(out) + return nil + }, +} + +var mpoolReplaceCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "replace", + ShortDescription: "replace a message in the mempool", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("from", false, true, "from"), + cmds.StringArg("nonce", false, true, "nonce"), + cmds.StringArg("message-cid", false, true, "message-cid"), + }, + Options: []cmds.Option{ + feecapOption, + premiumOption, + limitOption, + cmds.BoolOption("auto", "automatically reprice the specified message"), + cmds.StringOption("max-fee", "Spend up to X FIL for this message (applicable for auto mode)"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + feecap, premium, gasLimit, err := parseGasOptions(req) + if err != nil { + return err + } + + auto, _ := req.Options["auto"].(bool) + maxFee, _ := req.Options["max-fee"].(string) + + var from address.Address + var nonce uint64 + switch len(req.Arguments) { + case 1: + mcid, err := cid.Decode(req.Arguments[0]) + if err != nil { + return err + } + + msg, err := env.(*node.Env).ChainAPI.ChainGetMessage(req.Context, mcid) + if err != nil { + return fmt.Errorf("could not find referenced message: %w", err) + } + + from = msg.From + nonce = msg.Nonce + case 2: + f, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + n, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + from = f + nonce = n + default: + return errors.New("command syntax error") + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return fmt.Errorf("getting chain head: %w", err) + } + + pending, err := env.(*node.Env).MessagePoolAPI.MpoolPending(req.Context, ts.Key()) + if err != nil { + return err + } + + var found *types.SignedMessage + for _, p := range pending { + if p.Message.From == from && p.Message.Nonce == nonce { + found = p + break + } + } + + if found == nil { + return fmt.Errorf("no pending message found from %s with nonce %d", from, nonce) + } + + msg := found.Message + + if auto { + minRBF := messagepool.ComputeMinRBF(msg.GasPremium) + + var mss *types.MessageSendSpec + if len(maxFee) > 0 { + maxFee, err := big.FromString(maxFee) + if err != nil { + return fmt.Errorf("parsing max-spend: %w", err) + } + mss = &types.MessageSendSpec{ + MaxFee: maxFee, + } + } + + // msg.GasLimit = 0 // TODO: need to fix the way we estimate gas limits to account for the messages already being in the mempool + msg.GasFeeCap = abi.NewTokenAmount(0) + msg.GasPremium = abi.NewTokenAmount(0) + retm, err := env.(*node.Env).MessagePoolAPI.GasEstimateMessageGas(req.Context, &msg, mss, types.TipSetKey{}) + if err != nil { + return fmt.Errorf("failed to estimate gas values: %w", err) + } + + msg.GasPremium = big.Max(retm.GasPremium, minRBF) + msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium) + + mff := func() (abi.TokenAmount, error) { + return abi.TokenAmount{Int: config.DefaultDefaultMaxFee.Int}, nil + } + + messagepool.CapGasFee(mff, &msg, mss) + } else { + msg.GasFeeCap = abi.NewTokenAmount(0) + msg.GasPremium = abi.NewTokenAmount(0) + newMsg, err := env.(*node.Env).MessagePoolAPI.GasEstimateMessageGas(req.Context, &msg, nil, types.TipSetKey{}) + if err != nil { + return fmt.Errorf("failed to estimate gas values: %w", err) + } + + msg = *newMsg + if gasLimit > 0 { + msg.GasLimit = gasLimit + } + + if err == nil && premium.Int64() != 0 { + msg.GasPremium = premium + } + + // TODO: estimate fee cap here + msg.GasFeeCap = feecap + } + + smsg, err := env.(*node.Env).WalletAPI.WalletSignMessage(req.Context, msg.From, &msg) + if err != nil { + return fmt.Errorf("failed to sign message: %w", err) + } + + cid, err := env.(*node.Env).MessagePoolAPI.MpoolPush(req.Context, smsg) + if err != nil { + return fmt.Errorf("failed to push new message to mempool: %w", err) + } + + _ = re.Emit(fmt.Sprintf("new message cid: %s", cid)) + return nil + }, +} + +var mpoolStat = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "print mpool state messages", + ShortDescription: ` +Get pending messages. +`, + }, + Options: []cmds.Option{ + cmds.BoolOption("local", "print stats for addresses in local wallet only"), + cmds.Int64Option("basefee-lookback", "number of blocks to look back for minimum basefee"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + local, _ := req.Options["local"].(bool) + basefee, _ := req.Options["basefee-lookback"].(int) + + ctx := context.TODO() + ts, err := env.(*node.Env).ChainAPI.ChainHead(ctx) + if err != nil { + return fmt.Errorf("getting chain head: %w", err) + } + currBF := ts.Blocks()[0].ParentBaseFee + minBF := currBF + { + currTS := ts + for i := 0; i < basefee; i++ { + key := currTS.Parents() + currTS, err = env.(*node.Env).ChainAPI.ChainGetTipSet(req.Context, key) + if err != nil { + return fmt.Errorf("walking chain: %w", err) + } + if newBF := currTS.Blocks()[0].ParentBaseFee; newBF.LessThan(minBF) { + minBF = newBF + } + } + } + + var filter map[address.Address]struct{} + if local { + filter = map[address.Address]struct{}{} + + addrss := env.(*node.Env).WalletAPI.WalletAddresses(req.Context) + + for _, a := range addrss { + filter[a] = struct{}{} + } + } + + msgs, err := env.(*node.Env).MessagePoolAPI.MpoolPending(ctx, types.TipSetKey{}) + if err != nil { + return err + } + + type statBucket struct { + msgs map[uint64]*types.SignedMessage + } + type mpStat struct { + addr string + past, cur, future uint64 + belowCurr, belowPast uint64 + gasLimit big.Int + } + + buckets := map[address.Address]*statBucket{} + for _, v := range msgs { + if filter != nil { + if _, has := filter[v.Message.From]; !has { + continue + } + } + + bkt, ok := buckets[v.Message.From] + if !ok { + bkt = &statBucket{ + msgs: map[uint64]*types.SignedMessage{}, + } + buckets[v.Message.From] = bkt + } + + bkt.msgs[v.Message.Nonce] = v + } + + var out []mpStat + + for a, bkt := range buckets { + act, err := env.(*node.Env).ChainAPI.StateGetActor(ctx, a, ts.Key()) + if err != nil { + fmt.Printf("%s, err: %s\n", a, err) + continue + } + + cur := act.Nonce + for { + _, ok := bkt.msgs[cur] + if !ok { + break + } + cur++ + } + + var s mpStat + s.addr = a.String() + s.gasLimit = big.Zero() + + for _, m := range bkt.msgs { + if m.Message.Nonce < act.Nonce { + s.past++ + } else if m.Message.Nonce > cur { + s.future++ + } else { + s.cur++ + } + + if m.Message.GasFeeCap.LessThan(currBF) { + s.belowCurr++ + } + if m.Message.GasFeeCap.LessThan(minBF) { + s.belowPast++ + } + + s.gasLimit = big.Add(s.gasLimit, big.NewInt(m.Message.GasLimit)) + } + + out = append(out, s) + } + + sort.Slice(out, func(i, j int) bool { + return out[i].addr < out[j].addr + }) + + var total mpStat + total.gasLimit = big.Zero() + + for _, stat := range out { + total.past += stat.past + total.cur += stat.cur + total.future += stat.future + total.belowCurr += stat.belowCurr + total.belowPast += stat.belowPast + total.gasLimit = big.Add(total.gasLimit, stat.gasLimit) + + _ = re.Emit(fmt.Sprintf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, basefee, stat.belowPast, stat.gasLimit)) + } + + _ = re.Emit("-----") + _ = re.Emit(fmt.Sprintf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s", total.past, total.cur, total.future, total.belowCurr, basefee, total.belowPast, total.gasLimit)) + + return nil + }, +} + +var mpoolPending = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get pending messages", + ShortDescription: ` +Get pending messages. +`, + }, + Options: []cmds.Option{ + cmds.BoolOption("local", "print pending messages for addresses in local wallet only"), + cmds.BoolOption("cids", "only print cids of messages in output"), + cmds.StringOption("to", "return messages to a given address"), + cmds.StringOption("from", "return messages from a given address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + local, _ := req.Options["local"].(bool) + cids, _ := req.Options["cids"].(bool) + to, _ := req.Options["to"].(string) + from, _ := req.Options["from"].(string) + + var toa, froma address.Address + if to != "" { + a, err := address.NewFromString(to) + if err != nil { + return fmt.Errorf("given 'to' address %q was invalid: %w", to, err) + } + toa = a + } + + if from != "" { + a, err := address.NewFromString(from) + if err != nil { + return fmt.Errorf("given 'to' address %q was invalid: %w", from, err) + } + froma = a + } + + var filter map[address.Address]struct{} + if local { + filter = map[address.Address]struct{}{} + + addrss := env.(*node.Env).WalletAPI.WalletAddresses(req.Context) + for _, a := range addrss { + filter[a] = struct{}{} + } + } + + msgs, err := env.(*node.Env).MessagePoolAPI.MpoolPending(req.Context, types.TipSetKey{}) + if err != nil { + return err + } + for _, msg := range msgs { + if filter != nil { + if _, has := filter[msg.Message.From]; !has { + continue + } + } + + if toa != address.Undef && msg.Message.To != toa { + continue + } + if froma != address.Undef && msg.Message.From != froma { + continue + } + + if cids { + _ = re.Emit(msg.Cid()) + _ = re.Emit(err) + } else { + _ = re.Emit(msg) + } + } + + return nil + }, +} + +var mpoolClear = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "clear", + ShortDescription: ` +Clear all pending messages from the mpool (USE WITH CARE) +`, + }, + Options: []cmds.Option{ + cmds.BoolOption("local", "also clear local messages"), + cmds.BoolOption("really-do-it", "must be specified for the action to take effect"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + local, _ := req.Options["local"].(bool) + really, _ := req.Options["really-do-it"].(bool) + + if !really { + //nolint:golint + return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned") + } + + return env.(*node.Env).MessagePoolAPI.MpoolClear(context.TODO(), local) + }, +} + +var mpoolSub = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "sub", + ShortDescription: ` +Subscribe to mpool changes +`, + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := context.TODO() + sub, err := env.(*node.Env).MessagePoolAPI.MpoolSub(ctx) + if err != nil { + return err + } + + for { + select { + case update := <-sub: + _ = re.Emit(update) + case <-ctx.Done(): + return nil + } + } + }, +} + +var mpoolConfig = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "config", + ShortDescription: "get or set current mpool configuration", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cfg", false, false, "config"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := context.TODO() + + if len(req.Arguments) > 0 { + cfg := new(types.MpoolConfig) + + paras := req.Arguments[0] + err := json.Unmarshal([]byte(paras), cfg) + if err != nil { + return err + } + + return env.(*node.Env).MessagePoolAPI.MpoolSetConfig(ctx, cfg) + } + + cfg, err := env.(*node.Env).MessagePoolAPI.MpoolGetConfig(ctx) + if err != nil { + return err + } + _ = re.Emit(cfg) + + return nil + }, +} + +var mpoolGasPerfCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "gas-perf", + ShortDescription: ` +Check gas performance of messages in mempool +`, + }, + Options: []cmds.Option{ + cmds.BoolOption("all", "print gas performance for all mempool messages (default only prints for local)"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + all, _ := req.Options["all"].(bool) + + ctx := context.TODO() + + msgs, err := env.(*node.Env).MessagePoolAPI.MpoolPending(ctx, types.TipSetKey{}) + if err != nil { + return err + } + + var filter map[address.Address]struct{} + if !all { + filter = map[address.Address]struct{}{} + + addrss := env.(*node.Env).WalletAPI.WalletAddresses(req.Context) + + for _, a := range addrss { + filter[a] = struct{}{} + } + + var filtered []*types.SignedMessage + for _, msg := range msgs { + if _, has := filter[msg.Message.From]; !has { + continue + } + filtered = append(filtered, msg) + } + msgs = filtered + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(ctx) + if err != nil { + return fmt.Errorf("failed to get chain head: %w", err) + } + + baseFee := ts.Blocks()[0].ParentBaseFee + + bigBlockGasLimit := big.NewInt(constants.BlockGasLimit) + + getGasReward := func(msg *types.SignedMessage) big.Int { + maxPremium := big.Sub(msg.Message.GasFeeCap, baseFee) + if big.Cmp(maxPremium, msg.Message.GasPremium) < 0 { + maxPremium = msg.Message.GasPremium + } + return big.Mul(maxPremium, big.NewInt(msg.Message.GasLimit)) + } + + getGasPerf := func(gasReward big.Int, gasLimit int64) float64 { + // gasPerf = gasReward * constants.BlockGasLimit / gasLimit + a := new(stdbig.Rat).SetInt(new(stdbig.Int).Mul(gasReward.Int, bigBlockGasLimit.Int)) + b := stdbig.NewRat(1, gasLimit) + c := new(stdbig.Rat).Mul(a, b) + r, _ := c.Float64() + return r + } + + for _, m := range msgs { + gasReward := getGasReward(m) + gasPerf := getGasPerf(gasReward, m.Message.GasLimit) + + _ = re.Emit(fmt.Sprintf("%s %d %s %f", m.Message.From, m.Message.Nonce, gasReward, gasPerf)) + } + + return nil + }, +} diff --git a/cmd/multisig.go b/cmd/multisig.go new file mode 100644 index 0000000000..f7754d5e13 --- /dev/null +++ b/cmd/multisig.go @@ -0,0 +1,1431 @@ +package cmd + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "text/tabwriter" + + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/multisig" + "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/utils" + "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var multisigCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Interact with a multisig wallet", + }, + Options: []cmds.Option{ + cmds.Uint64Option("number of block confirmations to wait for").WithDefault(constants.MessageConfidence), + }, + Subcommands: map[string]*cmds.Command{ + "create": msigCreateCmd, + "inspect": msigInspectCmd, + "propose": msigProposeCmd, + "propose-remove": msigRemoveProposeCmd, + "approve": msigApproveCmd, + "add-propose": msigAddProposeCmd, + "add-approve": msigAddApproveCmd, + "cancel": msigCancelCmd, + "add-cancel": msigAddCancelCmd, + "swap-propose": msigSwapProposeCmd, + "swap-approve": msigSwapApproveCmd, + "swap-cancel": msigSwapCancelCmd, + "lock-propose": msigLockProposeCmd, + "lock-approve": msigLockApproveCmd, + "lock-cancel": msigLockCancelCmd, + "vested": msigVestedCmd, + "propose-threshold": msigProposeThresholdCmd, + }, +} + +var msigCreateCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Create a new multisig wallet", + Usage: "[address1 address2 ...]", + }, + Options: []cmds.Option{ + cmds.Uint64Option("required", "number of required approvals (uses number of signers provided if omitted)").WithDefault(uint64(0)), + cmds.StringOption("value", "initial funds to give to multisig").WithDefault("0"), + cmds.Uint64Option("duration", "length of the period over which funds unlock").WithDefault(uint64(0)), + cmds.StringOption("from", "account to send the create message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("addresses", true, false, "approving addresses,Ps:'addr1 addr2 ...'"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) < 1 { + return fmt.Errorf("multisigs must have at least one signer") + } + addrStr := req.Arguments[0] + addrArr := strings.Split(addrStr, ",") + var addrs []address.Address + for _, a := range addrArr { + addr, err := address.NewFromString(a) + if err != nil { + return err + } + addrs = append(addrs, addr) + } + + // get the address we're going to use to create the multisig (can be one of the above, as long as they have funds) + var sendAddr address.Address + send := reqStringOption(req, "from") + if send == "" { + defaddr, err := env.(*node.Env).WalletAPI.WalletDefaultAddress(req.Context) + if err != nil { + return err + } + sendAddr = defaddr + } else { + addr, err := address.NewFromString(send) + if err != nil { + return err + } + sendAddr = addr + } + val := reqStringOption(req, "value") + filval, err := types.ParseFIL(val) + if err != nil { + return err + } + intVal := types.BigInt(filval) + + required := reqUint64Option(req, "required") + + duration := reqUint64Option(req, "duration") + d := abi.ChainEpoch(duration) + gp := types.NewInt(1) + + msgCid, err := env.(*node.Env).MultiSigAPI.MsigCreate(req.Context, required, addrs, d, intVal, sendAddr, gp) + if err != nil { + return err + } + // wait for it to get mined into a block + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(req.Context, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + return err + } + // get address of newly created miner + var execreturn init2.ExecReturn + if err := execreturn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return err + } + // TODO: maybe register this somewhere + return re.Emit(fmt.Sprintf("Created new multisig: %s %s", execreturn.IDAddress, execreturn.RobustAddress)) + }, +} + +var msigInspectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Inspect a multisig wallet", + Usage: "[address]", + }, + Options: []cmds.Option{ + cmds.BoolOption("vesting", "Include vesting details)"), + cmds.BoolOption("decode-params", "Decode parameters of transaction proposals"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "a multiSig wallet address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) == 0 { + return fmt.Errorf("must specify address of multisig to inspect") + } + ctx := req.Context + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(env.(*node.Env).BlockStoreAPI))) + // store := env.(*node.Env).ChainAPI.ChainReader.Store(req.Context) + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + if err := utils.LoadBuiltinActors(ctx, env.(*node.Env).ChainAPI); err != nil { + return err + } + + head, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + act, err := env.(*node.Env).ChainAPI.StateGetActor(req.Context, maddr, head.Key()) + if err != nil { + return err + } + + ownId, err := env.(*node.Env).ChainAPI.StateLookupID(req.Context, maddr, types.EmptyTSK) //nolint + if err != nil { + return err + } + mstate, err := multisig.Load(store, act) + if err != nil { + return err + } + locked, err := mstate.LockedBalance(head.Height()) + if err != nil { + return err + } + cliw := new(bytes.Buffer) + fmt.Fprintf(cliw, "Balance: %s\n", types.FIL(act.Balance)) + fmt.Fprintf(cliw, "Spendable: %s\n", types.FIL(types.BigSub(act.Balance, locked))) + + vesting := reqBoolOption(req, "vesting") + if vesting { + ib, err := mstate.InitialBalance() + if err != nil { + return err + } + fmt.Fprintf(cliw, "InitialBalance: %s\n", types.FIL(ib)) + se, err := mstate.StartEpoch() + if err != nil { + return err + } + fmt.Fprintf(cliw, "StartEpoch: %d\n", se) + ud, err := mstate.UnlockDuration() + if err != nil { + return err + } + fmt.Fprintf(cliw, "UnlockDuration: %d\n", ud) + } + + signers, err := mstate.Signers() + if err != nil { + return err + } + threshold, err := mstate.Threshold() + if err != nil { + return err + } + fmt.Fprintf(cliw, "Threshold: %d / %d\n", threshold, len(signers)) + fmt.Fprintln(cliw, "Signers:") + + signerTable := tabwriter.NewWriter(cliw, 8, 4, 2, ' ', 0) + fmt.Fprintf(signerTable, "ID\tAddress\n") + for _, s := range signers { + signerActor, err := env.(*node.Env).ChainAPI.StateAccountKey(req.Context, s, types.EmptyTSK) + if err != nil { + fmt.Fprintf(signerTable, "%s\t%s\n", s, "N/A") + } else { + fmt.Fprintf(signerTable, "%s\t%s\n", s, signerActor) + } + } + if err := signerTable.Flush(); err != nil { + return fmt.Errorf("flushing output: %+v", err) + } + + pending := make(map[int64]multisig.Transaction) + if err := mstate.ForEachPendingTxn(func(id int64, txn multisig.Transaction) error { + pending[id] = txn + return nil + }); err != nil { + return fmt.Errorf("reading pending transactions: %w", err) + } + + decParams := reqBoolOption(req, "decode-params") + fmt.Fprintln(cliw, "Transactions: ", len(pending)) + if len(pending) > 0 { + var txids []int64 + for txid := range pending { + txids = append(txids, txid) + } + sort.Slice(txids, func(i, j int) bool { + return txids[i] < txids[j] + }) + + w := tabwriter.NewWriter(cliw, 8, 4, 2, ' ', 0) + fmt.Fprintf(w, "ID\tState\tApprovals\tTo\tValue\tMethod\tParams\n") + for _, txid := range txids { + tx := pending[txid] + target := tx.To.String() + if tx.To == ownId { + target += " (self)" + } + targAct, err := env.(*node.Env).ChainAPI.StateGetActor(req.Context, tx.To, types.EmptyTSK) + paramStr := fmt.Sprintf("%x", tx.Params) + + if err != nil { + if tx.Method == 0 { + fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "Send", tx.Method, paramStr) + } else { + fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr) + } + } else { + method := utils.MethodsMap[targAct.Code][tx.Method] + + if decParams && tx.Method != 0 { + ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler) + if err := ptyp.UnmarshalCBOR(bytes.NewReader(tx.Params)); err != nil { + return fmt.Errorf("failed to decode parameters of transaction %d: %w", txid, err) + } + + b, err := json.Marshal(ptyp) + if err != nil { + return fmt.Errorf("could not json marshal parameter type: %w", err) + } + paramStr = string(b) + } + fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), method.Name, tx.Method, paramStr) + } + } + if err := w.Flush(); err != nil { + return fmt.Errorf("flushing output: %+v", err) + } + } + return re.Emit(cliw) + }, +} + +var msigProposeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Propose a multisig transaction", + Usage: "[multisigAddress destinationAddress value (optional)]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the propose message from)"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "a multisig address which contains from"), + cmds.StringArg("destinationAddress", true, false, "recipient address"), + cmds.StringArg("value", true, false, "value to transfer"), + cmds.StringArg("methodId", false, false, "method to call in the proposed message"), + cmds.StringArg("methodParams", false, false, "params to include in the proposed message"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + reqLen := len(req.Arguments) + if reqLen < 3 { + return fmt.Errorf("must pass at least multisig address, destination, and value") + } + if reqLen > 3 && reqLen != 5 { + return fmt.Errorf("must either pass three or five arguments") + } + + ctx := req.Context + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + dest, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + value, err := types.ParseFIL(req.Arguments[2]) + if err != nil { + return err + } + + var method uint64 + var params []byte + if reqLen == 5 { + m, err := strconv.ParseUint(req.Arguments[3], 10, 64) + if err != nil { + return err + } + method = m + + p, err := hex.DecodeString(req.Arguments[4]) + if err != nil { + return err + } + params = p + } + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + + act, err := env.(*node.Env).ChainAPI.StateGetActor(ctx, msig, types.EmptyTSK) + if err != nil { + return fmt.Errorf("failed to look up multisig %s: %w", msig, err) + } + + if !builtin.IsMultisigActor(act.Code) { + return fmt.Errorf("actor %s is not a multisig actor", msig) + } + + msgCid, err := env.(*node.Env).MultiSigAPI.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params) + if err != nil { + return err + } + buf := new(bytes.Buffer) + + fmt.Fprintln(buf, "send proposal in message: ", msgCid) + confidence := reqConfidence(req) + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, confidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("proposal returned exit %d", wait.Receipt.ExitCode) + } + + var retval msig2.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + fmt.Fprintf(buf, "Transaction ID: %d\n", retval.TxnID) + + if retval.Applied { + fmt.Fprintf(buf, "Transaction was executed during propose\n") + fmt.Fprintf(buf, "Exit Code: %d\n", retval.Code) + fmt.Fprintf(buf, "Return Value: %x\n", retval.Ret) + } + return re.Emit(buf) + }, +} + +var msigRemoveProposeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Propose to remove a signer", + Usage: "[multisigAddress signer]", + }, + Options: []cmds.Option{ + cmds.BoolOption("decrease-threshold", "whether the number of required signers should be decreased").WithDefault(false), + cmds.StringOption("from", "account to send the propose message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("signer", true, false, "a wallet address of the multisig"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 2 { + return fmt.Errorf("must pass multisig address and signer address") + } + ctx := ReqContext(req.Context) + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + addr, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + dt := reqBoolOption(req, "decrease-threshold") + msgCid, err := env.(*node.Env).MultiSigAPI.MsigRemoveSigner(ctx, msig, from, addr, dt) + if err != nil { + return err + } + + fmt.Println("sent remove proposal in message: ", msgCid) + confidence := reqConfidence(req) + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, confidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("add proposal returned exit %d", wait.Receipt.ExitCode) + } + + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)) + if err != nil { + return fmt.Errorf("decoding proposal return: %w", err) + } + cliw := new(bytes.Buffer) + fmt.Fprintf(cliw, "sent remove singer proposal in message: %s\n", msgCid) + fmt.Fprintf(cliw, "TxnID: %d\n", ret.TxnID) + return re.Emit(cliw) + }, +} + +var msigApproveCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Approve a multisig message", + Usage: " [proposerAddress destination value [methodId methodParams]]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the approve message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("messageId", true, false, "proposed transaction ID"), + cmds.StringArg("proposerAddress", false, false, "proposer address"), + cmds.StringArg("destination", false, false, "recipient address"), + cmds.StringArg("value", false, false, "value to transfer"), + cmds.StringArg("methodId", false, false, "method to call in the proposed message"), + cmds.StringArg("methodParams", false, false, "params to include in the proposed message"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + argLen := len(req.Arguments) + if argLen < 2 { + return fmt.Errorf("must pass at least multisig address and message ID") + } + + if argLen > 2 && argLen < 5 { + return fmt.Errorf("usage: msig approve ") + } + + if argLen > 5 && argLen != 7 { + return fmt.Errorf("usage: msig approve [ ]") + } + + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + var msgCid cid.Cid + if argLen == 2 { + msgCid, err = env.(*node.Env).MultiSigAPI.MsigApprove(ctx, msig, txid, from) + if err != nil { + return err + } + } else { + proposer, err := address.NewFromString(req.Arguments[2]) + if err != nil { + return err + } + + if proposer.Protocol() != address.ID { + proposer, err = env.(*node.Env).ChainAPI.StateLookupID(ctx, proposer, types.EmptyTSK) + if err != nil { + return err + } + } + + dest, err := address.NewFromString(req.Arguments[3]) + if err != nil { + return err + } + + value, err := types.ParseFIL(req.Arguments[4]) + if err != nil { + return err + } + + var method uint64 + var params []byte + if argLen == 7 { + m, err := strconv.ParseUint(req.Arguments[5], 10, 64) + if err != nil { + return err + } + method = m + + p, err := hex.DecodeString(req.Arguments[6]) + if err != nil { + return err + } + params = p + } + + msgCid, err = env.(*node.Env).MultiSigAPI.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params) + if err != nil { + return err + } + } + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("approve returned exit %d", wait.Receipt.ExitCode) + } + return re.Emit(fmt.Sprintf("sent approval in message: %s", msgCid)) + }, +} + +var msigAddProposeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Propose to add a signer", + Usage: "[multisigAddress signer]", + }, + Options: []cmds.Option{ + cmds.BoolOption("increase-threshold", "whether the number of required signers should be increased").WithDefault(false), + cmds.StringOption("from", "account to send the propose message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("signer", true, false, "a wallet address of the multisig"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 2 { + return fmt.Errorf("must pass multisig address and signer address") + } + ctx := ReqContext(req.Context) + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + addr, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(env.(*node.Env).BlockStoreAPI))) + + head, err := env.(*node.Env).ChainAPI.ChainHead(ctx) + if err != nil { + return err + } + + act, err := env.(*node.Env).ChainAPI.StateGetActor(ctx, msig, head.Key()) + if err != nil { + return err + } + + mstate, err := multisig.Load(store, act) + if err != nil { + return err + } + + signers, err := mstate.Signers() + if err != nil { + return err + } + + addrID, err := env.(*node.Env).ChainAPI.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + return err + } + + for _, s := range signers { + if s == addrID { + return fmt.Errorf("%s is already a signer", addr.String()) + } + } + + msgCid, err := env.(*node.Env).MultiSigAPI.MsigAddPropose(ctx, msig, from, addr, reqBoolOption(req, "increase-threshold")) + if err != nil { + return err + } + + confidence := reqConfidence(req) + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, confidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("add proposal returned exit %d", wait.Receipt.ExitCode) + } + + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)) + if err != nil { + return fmt.Errorf("decoding proposal return: %w", err) + } + cliw := new(bytes.Buffer) + fmt.Fprintf(cliw, "sent add singer proposal in message: %s\n", msgCid) + fmt.Fprintf(cliw, "TxnID: %d\n", ret.TxnID) + return re.Emit(cliw) + }, +} + +var msigAddApproveCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Approve a message to add a signer", + Usage: "[multisigAddress proposerAddress txId newAddress increaseThreshold]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the approve message from)"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("proposerAddress", true, false, "sender address of the approve msg"), + cmds.StringArg("txId", true, false, "proposed message ID"), + cmds.StringArg("newAddress", true, false, "new signer"), + cmds.StringArg("increaseThreshold", true, false, "whether the number of required signers should be increased"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 5 { + return fmt.Errorf("must pass multisig address, proposer address, transaction id, new signer address, whether to increase threshold") + } + ctx := ReqContext(req.Context) + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + prop, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[2], 10, 64) + if err != nil { + return err + } + + newAdd, err := address.NewFromString(req.Arguments[3]) + if err != nil { + return err + } + + inc, err := strconv.ParseBool(req.Arguments[4]) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + msgCid, err := env.(*node.Env).MultiSigAPI.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("add approval returned exit %d", wait.Receipt.ExitCode) + } + + return re.Emit(fmt.Sprintf("sent add approval in message: %s", msgCid)) + }, +} + +var msigAddCancelCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Cancel a message to add a signer", + Usage: "[multisigAddress txId newAddress increaseThreshold]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the approve message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("txId", true, false, "proposed message ID"), + cmds.StringArg("newAddress", true, false, "new signer"), + cmds.StringArg("increaseThreshold", true, false, "whether the number of required signers should be increased"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 4 { + return fmt.Errorf("must pass multisig address, transaction id, new signer address, whether to increase threshold") + } + ctx := ReqContext(req.Context) + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + newAdd, err := address.NewFromString(req.Arguments[2]) + if err != nil { + return err + } + + inc, err := strconv.ParseBool(req.Arguments[3]) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + + msgCid, err := env.(*node.Env).MultiSigAPI.MsigAddCancel(ctx, msig, from, txid, newAdd, inc) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("add cancellation returned exit %d", wait.Receipt.ExitCode) + } + return re.Emit(fmt.Sprintf("sent add cancellation in message: %s", msgCid)) + }, +} + +var msigCancelCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Cancel a multisig message", + Usage: " [destination value [methodId methodParams]]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the propose message from)"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", false, false, "a multisig address which contains from"), + cmds.StringArg("txId", false, false, "msig transaction id"), + cmds.StringArg("destinationAddress", false, false, "recipient address"), + cmds.StringArg("value", false, false, "value to transfer"), + cmds.StringArg("methodId", false, false, "method to call in the proposed message"), + cmds.StringArg("methodParams", false, false, "params to include in the proposed message"), + }, + Run: func(req *cmds.Request, emitter cmds.ResponseEmitter, env cmds.Environment) error { + argLen := len(req.Arguments) + if argLen < 2 { + return fmt.Errorf("must pass at least multisig address and message ID") + } + + if argLen > 2 && argLen < 4 { + return fmt.Errorf("usage: msig cancel ") + } + + if argLen > 4 && argLen < 6 { + return fmt.Errorf("usage: msig cancel [ ]") + } + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + + api := env.(*node.Env) + ctx := ReqContext(req.Context) + var msgCid cid.Cid + if argLen == 2 { + if msgCid, err = api.MultiSigAPI.MsigCancel(ctx, msig, txid, from); err != nil { + return err + } + } else { + dest, err := address.NewFromString(req.Arguments[2]) + if err != nil { + return err + } + + value, err := types.ParseFIL(req.Arguments[3]) + if err != nil { + return err + } + + var method uint64 + var params []byte + if argLen == 6 { + if method, err = strconv.ParseUint(req.Arguments[4], 10, 64); err != nil { + return err + } + if params, err = hex.DecodeString(req.Arguments[5]); err != nil { + return err + } + } + if msgCid, err = api.MultiSigAPI.MsigCancelTxnHash(ctx, msig, txid, dest, types.BigInt(value), + from, method, params); err != nil { + return err + } + } + fmt.Println("sent cancel in message: ", msgCid.String()) + wait, err := api.ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.Finality, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("cancel returned exit %d:%s", wait.Receipt.ExitCode, + wait.Receipt.ExitCode.String()) + } + return nil + }, +} + +var msigSwapProposeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Propose to swap signers", + Usage: "[multisigAddress oldAddress newAddress]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the propose message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("oldAddress", true, false, "sender address of the cancel msg"), + cmds.StringArg("newAddress", true, false, "new signer"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 3 { + return fmt.Errorf("must pass multisig address, old signer address, new signer address") + } + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + oldAdd, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + newAdd, err := address.NewFromString(req.Arguments[2]) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + msgCid, err := env.(*node.Env).MultiSigAPI.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("swap proposal returned exit %d", wait.Receipt.ExitCode) + } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)) + if err != nil { + return fmt.Errorf("decoding proposal return: %w", err) + } + cliw := new(bytes.Buffer) + fmt.Fprintf(cliw, "sent swap singer proposal in message: %s\n", msgCid) + fmt.Fprintf(cliw, "TxnID: %d\n", ret.TxnID) + return re.Emit(cliw) + }, +} + +var msigSwapApproveCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Approve a message to swap signers", + Usage: "[multisigAddress proposerAddress txId oldAddress newAddress]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the approve message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("proposerAddress", true, false, "sender address of the approve msg"), + cmds.StringArg("txId", true, false, "proposed message ID"), + cmds.StringArg("oldAddress", true, false, "old signer"), + cmds.StringArg("newAddress", true, false, "new signer"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 5 { + return fmt.Errorf("must pass multisig address, proposer address, transaction id, old signer address, new signer address") + } + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + prop, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[2], 10, 64) + if err != nil { + return err + } + + oldAdd, err := address.NewFromString(req.Arguments[3]) + if err != nil { + return err + } + + newAdd, err := address.NewFromString(req.Arguments[4]) + if err != nil { + return err + } + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + msgCid, err := env.(*node.Env).MultiSigAPI.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("swap approval returned exit %d", wait.Receipt.ExitCode) + } + + return re.Emit(fmt.Sprintf("sent swap approval in message: %s", msgCid)) + }, +} + +var msigSwapCancelCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Cancel a message to swap signers", + Usage: "[multisigAddress txId oldAddress newAddress]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the approve message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("txId", true, false, "proposed message ID"), + cmds.StringArg("oldAddress", true, false, "old signer"), + cmds.StringArg("newAddress", true, false, "new signer"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 4 { + return fmt.Errorf("must pass multisig address, transaction id, old signer address, new signer address") + } + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + oldAdd, err := address.NewFromString(req.Arguments[2]) + if err != nil { + return err + } + + newAdd, err := address.NewFromString(req.Arguments[3]) + if err != nil { + return err + } + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + msgCid, err := env.(*node.Env).MultiSigAPI.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("swap cancellation returned exit %d", wait.Receipt.ExitCode) + } + + return re.Emit(fmt.Sprintf("sent swap cancellation in message: %s", msgCid)) + }, +} + +var msigLockProposeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Propose to lock up some balance", + Usage: "[multisigAddress startEpoch unlockDuration amount]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the propose message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("startEpoch", true, false, "start epoch"), + cmds.StringArg("unlockDuration", true, false, "the locked block period"), + cmds.StringArg("amount", true, false, "amount of FIL"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 4 { + return fmt.Errorf("must pass multisig address, start epoch, unlock duration, and amount") + } + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + start, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + duration, err := strconv.ParseUint(req.Arguments[2], 10, 64) + if err != nil { + return err + } + + amount, err := types.ParseFIL(req.Arguments[3]) + if err != nil { + return err + } + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ + StartEpoch: abi.ChainEpoch(start), + UnlockDuration: abi.ChainEpoch(duration), + Amount: big.Int(amount), + }) + + if actErr != nil { + return actErr + } + msgCid, err := env.(*node.Env).MultiSigAPI.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("lock proposal returned exit %d", wait.Receipt.ExitCode) + } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)) + if err != nil { + return fmt.Errorf("decoding proposal return: %w", err) + } + cliw := new(bytes.Buffer) + fmt.Fprintf(cliw, "sent lock balance proposal in message: %s\n", msgCid) + fmt.Fprintf(cliw, "TxnID: %d\n", ret.TxnID) + return re.Emit(cliw) + }, +} + +var msigLockApproveCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Approve a message to lock up some balance", + Usage: "[multisigAddress proposerAddress txId startEpoch unlockDuration amount]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the propose message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("proposerAddress", true, false, "proposed address"), + cmds.StringArg("txId", true, false, "proposed message ID"), + cmds.StringArg("startEpoch", true, false, "start epoch"), + cmds.StringArg("unlockDuration", true, false, "the locked block period"), + cmds.StringArg("amount", true, false, "amount of FIL"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 6 { + return fmt.Errorf("must pass multisig address, proposer address, tx id, start epoch, unlock duration, and amount") + } + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + prop, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[2], 10, 64) + if err != nil { + return err + } + + start, err := strconv.ParseUint(req.Arguments[3], 10, 64) + if err != nil { + return err + } + + duration, err := strconv.ParseUint(req.Arguments[4], 10, 64) + if err != nil { + return err + } + + amount, err := types.ParseFIL(req.Arguments[5]) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + + params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ + StartEpoch: abi.ChainEpoch(start), + UnlockDuration: abi.ChainEpoch(duration), + Amount: big.Int(amount), + }) + + if actErr != nil { + return actErr + } + + msgCid, err := env.(*node.Env).MultiSigAPI.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("lock approval returned exit %d", wait.Receipt.ExitCode) + } + return re.Emit(fmt.Sprintf("sent lock approval in message: %s", msgCid)) + }, +} + +var msigLockCancelCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Cancel a message to lock up some balance", + Usage: "[multisigAddress txId startEpoch unlockDuration amount]", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the propose message from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("txId", true, false, "proposed transaction ID"), + cmds.StringArg("startEpoch", true, false, "start epoch"), + cmds.StringArg("unlockDuration", true, false, "the locked block period"), + cmds.StringArg("amount", true, false, "amount of FIL"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 5 { + return fmt.Errorf("must pass multisig address, tx id, start epoch, unlock duration, and amount") + } + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + start, err := strconv.ParseUint(req.Arguments[2], 10, 64) + if err != nil { + return err + } + + duration, err := strconv.ParseUint(req.Arguments[3], 10, 64) + if err != nil { + return err + } + + amount, err := types.ParseFIL(req.Arguments[4]) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + + params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ + StartEpoch: abi.ChainEpoch(start), + UnlockDuration: abi.ChainEpoch(duration), + Amount: big.Int(amount), + }) + if actErr != nil { + return actErr + } + + msgCid, err := env.(*node.Env).MultiSigAPI.MsigCancelTxnHash(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + if err != nil { + return err + } + + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("lock cancellation returned exit %d", wait.Receipt.ExitCode) + } + + return re.Emit(fmt.Sprintf("sent lock cancellation in message: %s", msgCid)) + }, +} + +var msigVestedCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Gets the amount vested in an msig between two epochs", + Usage: "[multisigAddress]", + }, + Options: []cmds.Option{ + cmds.Int64Option("start-epoch", "start epoch to measure vesting from").WithDefault(int64(0)), + cmds.Int64Option("end-epoch", "end epoch to measure vesting at").WithDefault(int64(-1)), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + /*defer func() { + if err := recover(); err != nil { + re.Emit(err) + } + }()*/ + if len(req.Arguments) != 1 { + return fmt.Errorf("must pass multisig address") + } + ctx := ReqContext(req.Context) + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + start, err := env.(*node.Env).ChainAPI.ChainGetTipSetByHeight(ctx, reqChainEpochOption(req, "start-epoch"), types.EmptyTSK) + if err != nil { + return err + } + var end *types.TipSet + endEpoch := reqChainEpochOption(req, "end-epoch") + if endEpoch < 0 { + end, err = env.(*node.Env).ChainAPI.ChainHead(ctx) + if err != nil { + return err + } + } else { + end, err = env.(*node.Env).ChainAPI.ChainGetTipSetByHeight(ctx, endEpoch, types.EmptyTSK) + if err != nil { + return err + } + } + + ret, err := env.(*node.Env).MultiSigAPI.MsigGetVested(ctx, msig, start.Key(), end.Key()) + if err != nil { + return err + } + return re.Emit(fmt.Sprintf("Vested: %s between %d and %d", types.FIL(ret), start.Height(), end.Height())) + }, +} + +var msigProposeThresholdCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Propose setting a different signing threshold on the account", + Usage: "", + }, + Options: []cmds.Option{ + cmds.StringOption("from", "account to send the proposal from"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("multisigAddress", true, false, "multisig address"), + cmds.StringArg("newM", true, false, "number of signature required"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 2 { + return fmt.Errorf("must pass multisig address and new threshold value") + } + ctx := ReqContext(req.Context) + + msig, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + newM, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + from, err := reqFromWithDefault(req, env) + if err != nil { + return err + } + + params, actErr := actors.SerializeParams(&msig2.ChangeNumApprovalsThresholdParams{ + NewThreshold: newM, + }) + + if actErr != nil { + return actErr + } + + msgCid, err := env.(*node.Env).MultiSigAPI.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params) + if err != nil { + return fmt.Errorf("failed to propose change of threshold: %w", err) + } + wait, err := env.(*node.Env).ChainAPI.StateWaitMsg(ctx, msgCid, reqConfidence(req), constants.LookbackNoLimit, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("change threshold proposal returned exit %d", wait.Receipt.ExitCode) + } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)) + if err != nil { + return fmt.Errorf("decoding proposal return: %w", err) + } + cliw := new(bytes.Buffer) + fmt.Fprintf(cliw, "sent change threshold proposal in message: %s\n", msgCid) + fmt.Fprintf(cliw, "TxnID: %d\n", ret.TxnID) + return re.Emit(cliw) + }, +} diff --git a/cmd/multisig_helper.go b/cmd/multisig_helper.go new file mode 100644 index 0000000000..57785a0caf --- /dev/null +++ b/cmd/multisig_helper.go @@ -0,0 +1,65 @@ +package cmd + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/constants" + cmds "github.com/ipfs/go-ipfs-cmds" +) + +func reqConfidence(req *cmds.Request) uint64 { + confidence, ok := req.Options["confidence"] + if ok { + return confidence.(uint64) + } + return 0 +} + +func reqFromWithDefault(req *cmds.Request, env cmds.Environment) (address.Address, error) { + f, ok := req.Options["from"] + if ok { + from, err := address.NewFromString(f.(string)) + if err != nil { + return address.Undef, err + } + return from, nil + } + defaddr, err := env.(*node.Env).WalletAPI.WalletDefaultAddress(req.Context) + if err != nil { + return address.Undef, err + } + return defaddr, nil +} + +func reqBoolOption(req *cmds.Request, cmd string) bool { + tmp, ok := req.Options[cmd] + if ok { + return tmp.(bool) + } + return false +} + +func reqUint64Option(req *cmds.Request, cmd string) uint64 { + tmp, ok := req.Options[cmd] + if ok { + return tmp.(uint64) + } + return 0 +} + +func reqStringOption(req *cmds.Request, cmd string) string { + tmp, ok := req.Options[cmd] + if ok { + return tmp.(string) + } + return constants.StringEmpty +} + +func reqChainEpochOption(req *cmds.Request, cmd string) abi.ChainEpoch { + v, ok := req.Options[cmd] + if ok { + return abi.ChainEpoch(v.(int64)) + } + return 0 +} diff --git a/cmd/paych.go b/cmd/paych.go new file mode 100644 index 0000000000..50f3f5acbe --- /dev/null +++ b/cmd/paych.go @@ -0,0 +1,475 @@ +package cmd + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/paychmgr" + lpaych "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + "github.com/filecoin-project/venus/venus-shared/types" + cmds "github.com/ipfs/go-ipfs-cmds" +) + +var paychCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manage payment channels", + }, + Subcommands: map[string]*cmds.Command{ + "add-funds": addFundsCmd, + "list": listCmd, + "voucher": voucherCmd, + "settle": settleCmd, + "status": statusCmd, + "status-by-from-to": sbftCmd, + "collect": collectCmd, + }, +} + +var addFundsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("from_addr", true, false, "From Address is the payment channel sender"), + cmds.StringArg("to_addr", true, false, "To Address is the payment channel recipient"), + cmds.StringArg("amount", true, false, "Amount is the deposits funds in the payment channel"), + }, + Options: []cmds.Option{ + cmds.BoolOption("restart-retrievals", "restart stalled retrieval deals on this payment channel").WithDefault(true), + cmds.BoolOption("reserve", "mark funds as reserved"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + fromAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + toAddr, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + amt, err := types.ParseFIL(req.Arguments[2]) + if err != nil { + return err + } + var chanInfo *types.ChannelInfo + if reserve, _ := req.Options["reserve"].(bool); reserve { + chanInfo, err = env.(*node.Env).PaychAPI.PaychGet(req.Context, fromAddr, toAddr, types.BigInt(amt), types.PaychGetOpts{ + OffChain: false, + }) + } else { + chanInfo, err = env.(*node.Env).PaychAPI.PaychFund(req.Context, fromAddr, toAddr, types.BigInt(amt)) + } + if err != nil { + return err + } + + chAddr, err := env.(*node.Env).PaychAPI.PaychGetWaitReady(req.Context, chanInfo.WaitSentinel) + if err != nil { + return err + } + return re.Emit(chAddr) + }, +} + +var listCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List all locally registered payment channels", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + addrs, err := env.(*node.Env).PaychAPI.PaychList(req.Context) + if err != nil { + return err + } + return re.Emit(addrs) + }, +} + +var voucherCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Interact with payment channel vouchers", + }, + Subcommands: map[string]*cmds.Command{ + "create": voucherCreateCmd, + "check": voucherCheckCmd, + "add": voucherAddCmd, + "list": voucherListCmd, + "best-spendable": voucherBestSpendableCmd, + "submit": voucherSubmitCmd, + }, +} + +var settleCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Settle a payment channel", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + mcid, err := env.(*node.Env).PaychAPI.PaychSettle(req.Context, chanAddr) + if err != nil { + return err + } + if err != nil { + return err + } + mwait, err := env.(*node.Env).ChainAPI.StateWaitMsg(req.Context, mcid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + if mwait.Receipt.ExitCode != 0 { + return fmt.Errorf("settle message execution failed (exit code %d)", mwait.Receipt.ExitCode) + } + return re.Emit(fmt.Sprintf("Settled channel %s", chanAddr)) + }, +} + +var statusCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show the status of an outbound payment channel", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + av, err := env.(*node.Env).PaychAPI.PaychAvailableFunds(req.Context, chanAddr) + if err != nil { + return err + } + // re.Emit(av) + w := bytes.NewBuffer(nil) + paychStatus(w, av) + return re.Emit(w) + }, +} + +var sbftCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show the status of an active outbound payment channel by from/to addresses", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("from_addr", true, false, "Gets a channel accessor for a given from / to pair"), + cmds.StringArg("to_addr", true, false, "Gets a channel accessor for a given from / to pair"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + fromAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + toAddr, err := address.NewFromString(req.Arguments[1]) + if err != nil { + return err + } + av, err := env.(*node.Env).PaychAPI.PaychAvailableFundsByFromTo(req.Context, fromAddr, toAddr) + if err != nil { + return err + } + w := bytes.NewBuffer(nil) + paychStatus(w, av) + return re.Emit(w) + }, +} + +var collectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Create a signed payment channel voucher", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + mcid, err := env.(*node.Env).PaychAPI.PaychCollect(req.Context, chanAddr) + if err != nil { + return err + } + mwait, err := env.(*node.Env).ChainAPI.StateWaitMsg(req.Context, mcid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + if mwait.Receipt.ExitCode != 0 { + return fmt.Errorf("collect message execution failed (exit code %d)", mwait.Receipt.ExitCode) + } + + return re.Emit(fmt.Sprintf("Collected funds for channel %s", chanAddr)) + }, +} + +var voucherCreateCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Create a signed payment channel voucher", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + cmds.StringArg("amount", true, false, "The value that will be used to create the voucher"), + cmds.StringArg("lane", true, false, "Specify payment channel lane to use"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + amtFil, err := types.ParseFIL(req.Arguments[1]) + if err != nil { + return err + } + lane, err := strconv.ParseUint(req.Arguments[2], 10, 64) + if err != nil { + return err + } + res, err := env.(*node.Env).PaychAPI.PaychVoucherCreate(req.Context, chanAddr, big.NewFromGo(amtFil.Int), lane) + if err != nil { + return err + } + if res.Voucher == nil { + return fmt.Errorf("could not create voucher: insufficient funds in channel, shortfall: %d", res.Shortfall) + } + enc, err := encodedString(res.Voucher) + if err != nil { + return err + } + + return re.Emit(enc) + }, +} + +var voucherCheckCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Check validity of payment channel voucher", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + cmds.StringArg("voucher", true, false, "The voucher in the payment channel"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + voucher, err := lpaych.DecodeSignedVoucher(req.Arguments[1]) + if err != nil { + return err + } + err = env.(*node.Env).PaychAPI.PaychVoucherCheckValid(req.Context, chanAddr, voucher) + if err != nil { + return err + } + return re.Emit("voucher is valid") + }, +} + +var voucherAddCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Add payment channel voucher to local datastore", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + cmds.StringArg("voucher", true, false, "The voucher in the payment channel"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + voucher, err := lpaych.DecodeSignedVoucher(req.Arguments[1]) + if err != nil { + return err + } + _, err = env.(*node.Env).PaychAPI.PaychVoucherAdd(req.Context, chanAddr, voucher, nil, big.NewInt(0)) + if err != nil { + return err + } + return re.Emit("add voucher successfully") + }, +} + +var voucherListCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List stored vouchers for a given payment channel", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + vs, err := env.(*node.Env).PaychAPI.PaychVoucherList(req.Context, chanAddr) + if err != nil { + return err + } + buff := bytes.NewBuffer(nil) + for _, v := range sortVouchers(vs) { + str, err := encodedString(v) + if err != nil { + return err + } + fmt.Fprintf(buff, "Lane %d, Nonce %d: %s, voucher: %s\n", v.Lane, v.Nonce, v.Amount.String(), str) + } + + return re.Emit(buff) + }, +} + +var voucherBestSpendableCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print vouchers with highest value that is currently spendable for each lane", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + vouchersByLane, err := paychmgr.BestSpendableByLane(req.Context, env.(*node.Env).PaychAPI, chanAddr) + if err != nil { + return err + } + + var vouchers []*paych.SignedVoucher + for _, vchr := range vouchersByLane { + vouchers = append(vouchers, vchr) + } + buff := bytes.NewBuffer(nil) + for _, v := range sortVouchers(vouchers) { + str, err := encodedString(v) + if err != nil { + return err + } + fmt.Fprintf(buff, "Lane %d, Nonce %d: %s, voucher: %s\n", v.Lane, v.Nonce, v.Amount.String(), str) + } + return re.Emit(buff) + }, +} + +var voucherSubmitCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Submit voucher to chain to update payment channel state", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("channel_addr", true, false, "The given payment channel address"), + cmds.StringArg("voucher", true, false, "The voucher in the payment channel"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + chanAddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + voucher, err := lpaych.DecodeSignedVoucher(req.Arguments[1]) + if err != nil { + return err + } + mcid, err := env.(*node.Env).PaychAPI.PaychVoucherSubmit(req.Context, chanAddr, voucher, nil, nil) + if err != nil { + return err + } + mwait, err := env.(*node.Env).ChainAPI.StateWaitMsg(req.Context, mcid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + if mwait.Receipt.ExitCode != 0 { + return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode) + } + return re.Emit("channel updated successfully") + }, +} + +func encodedString(sv *paych.SignedVoucher) (string, error) { + buf := new(bytes.Buffer) + if err := sv.MarshalCBOR(buf); err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(buf.Bytes()), nil +} + +func sortVouchers(vouchers []*paych.SignedVoucher) []*paych.SignedVoucher { + sort.Slice(vouchers, func(i, j int) bool { + if vouchers[i].Lane == vouchers[j].Lane { + return vouchers[i].Nonce < vouchers[j].Nonce + } + return vouchers[i].Lane < vouchers[j].Lane + }) + return vouchers +} + +func paychStatus(writer io.Writer, avail *types.ChannelAvailableFunds) { + if avail.Channel == nil { + if avail.PendingWaitSentinel != nil { + fmt.Fprint(writer, "Creating channel\n") + fmt.Fprintf(writer, " From: %s\n", avail.From) + fmt.Fprintf(writer, " To: %s\n", avail.To) + fmt.Fprintf(writer, " Pending Amt: %s\n", types.FIL(avail.PendingAmt)) + fmt.Fprintf(writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel) + return + } + fmt.Fprint(writer, "Channel does not exist\n") + fmt.Fprintf(writer, " From: %s\n", avail.From) + fmt.Fprintf(writer, " To: %s\n", avail.To) + return + } + + if avail.PendingWaitSentinel != nil { + fmt.Fprint(writer, "Adding Funds to channel\n") + } else { + fmt.Fprint(writer, "Channel exists\n") + } + nameValues := [][]string{ + {"Channel", avail.Channel.String()}, + {"From", avail.From.String()}, + {"To", avail.To.String()}, + {"Confirmed Amt", fmt.Sprintf("%s", types.FIL(avail.ConfirmedAmt))}, + {"Available Amt", fmt.Sprintf("%s", types.FIL(avail.NonReservedAmt))}, + {"Voucher Redeemed Amt", fmt.Sprintf("%s", types.FIL(avail.VoucherReedeemedAmt))}, + {"Pending Amt", fmt.Sprintf("%s", types.FIL(avail.PendingAmt))}, + {"Pending Available Amt", fmt.Sprintf("%s", types.FIL(avail.PendingAvailableAmt))}, + {"Queued Amt", fmt.Sprintf("%s", types.FIL(avail.QueuedAmt))}, + } + if avail.PendingWaitSentinel != nil { + nameValues = append(nameValues, []string{ + "Add Funds Wait Sentinel", + avail.PendingWaitSentinel.String(), + }) + } + fmt.Fprint(writer, formatNameValues(nameValues)) +} + +func formatNameValues(nameValues [][]string) string { + maxLen := 0 + for _, nv := range nameValues { + if len(nv[0]) > maxLen { + maxLen = len(nv[0]) + } + } + out := make([]string, len(nameValues)) + for i, nv := range nameValues { + namePad := strings.Repeat(" ", maxLen-len(nv[0])) + out[i] = " " + nv[0] + ": " + namePad + nv[1] + } + return strings.Join(out, "\n") + "\n" +} diff --git a/cmd/paych_test.go b/cmd/paych_test.go new file mode 100644 index 0000000000..4d7a4ad5ef --- /dev/null +++ b/cmd/paych_test.go @@ -0,0 +1,42 @@ +package cmd + +import ( + "testing" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/runtime" + tutil "github.com/filecoin-project/specs-actors/support/testing" + "github.com/stretchr/testify/assert" +) + +func TestEncodedString(t *testing.T) { + mnum := builtin.MethodsPaych.UpdateChannelState + fakeParams := runtime.CBORBytes([]byte{1, 2, 3, 4}) + otherAddr := tutil.NewIDAddr(t, 104) + ex := &paych.ModVerifyParams{ + Actor: otherAddr, + Method: mnum, + Data: fakeParams, + } + chanAddr, _ := addr.NewFromString("t15ihq5ibzwki2b4ep2f46avlkrqzhpqgtga7pdrq") + sv := &paych.SignedVoucher{ + ChannelAddr: chanAddr, + TimeLockMin: 1, + TimeLockMax: 100, + SecretHash: []byte("ProfesrXXXXXXXXXXXXXXXXXXXXXXXXX"), + Extra: ex, + Lane: 1, + Nonce: 1, + Amount: big.NewInt(10), + MinSettleHeight: 1000, + Merges: nil, + } + str, err := encodedString(sv) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, str, "i1UB6g8OoDmykaDwj9F54FVqjDJ3wNMBGGRYIFByb2Zlc3JYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYg0IAaAJEAQIDBAEBQgAKGQPogPY") +} diff --git a/cmd/seed.go b/cmd/seed.go new file mode 100644 index 0000000000..84ddb8a970 --- /dev/null +++ b/cmd/seed.go @@ -0,0 +1,684 @@ +package cmd + +import ( + "encoding/csv" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" + "github.com/google/uuid" + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/mitchellh/go-homedir" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/venus/fixtures/networks" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/gen" + "github.com/filecoin-project/venus/pkg/gen/genesis" + "github.com/filecoin-project/venus/tools/seed" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var seedCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Seal sectors for genesis miner.", + }, + Subcommands: map[string]*cmds.Command{ + "genesis": genesisCmd, + + "pre-seal": preSealCmd, + "aggregate-manifests": aggregateManifestsCmd, + }, +} + +var genesisCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "manipulate genesis template", + }, + Subcommands: map[string]*cmds.Command{ + "new": genesisNewCmd, + "add-miner": genesisAddMinerCmd, + "add-msis": genesisAddMsigsCmd, + "set-vrk": genesisSetVRKCmd, + "set-remainder": genesisSetRemainderCmd, + "set-network-version": genesisSetActorVersionCmd, + }, +} + +var genesisNewCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "create new genesis template", + }, + Options: []cmds.Option{ + cmds.StringOption("network-name", "network name"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("file", true, true, "The file to write genesis info"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + fileName := req.Arguments[0] + if fileName == "" { + return errors.New("seed genesis new [genesis.json]") + } + networkName, _ := req.Options["network-name"].(string) + out := genesis.Template{ + NetworkVersion: networks.Net2k().Network.GenesisNetworkVersion, + Accounts: []genesis.Actor{}, + Miners: []genesis.Miner{}, + VerifregRootKey: gen.DefaultVerifregRootkeyActor, + RemainderAccount: gen.DefaultRemainderAccountActor, + NetworkName: networkName, + } + if out.NetworkName == "" { + out.NetworkName = "localnet-" + uuid.New().String() + } + + genb, err := json.MarshalIndent(&out, "", " ") + if err != nil { + return err + } + + genf, err := homedir.Expand(fileName) + if err != nil { + return err + } + + return os.WriteFile(genf, genb, 0o644) + }, +} + +var genesisAddMinerCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "add genesis miner", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("genesis-file", true, true, "genesis file"), + cmds.StringArg("preseal-file", true, true, "preseal file"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 2 { + return errors.New("seed genesis add-miner [genesis.json] [preseal.json]") + } + + genf, err := homedir.Expand(req.Arguments[0]) + if err != nil { + return err + } + + var template genesis.Template + genb, err := os.ReadFile(genf) + if err != nil { + return fmt.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(genb, &template); err != nil { + return fmt.Errorf("unmarshal genesis template: %w", err) + } + + minf, err := homedir.Expand(req.Arguments[1]) + if err != nil { + return fmt.Errorf("expand preseal file path: %w", err) + } + miners := map[string]genesis.Miner{} + minb, err := os.ReadFile(minf) + if err != nil { + return fmt.Errorf("read preseal file: %w", err) + } + if err := json.Unmarshal(minb, &miners); err != nil { + return fmt.Errorf("unmarshal miner info: %w", err) + } + + for mn, miner := range miners { + log.Infof("Adding miner %s to genesis template", mn) + { + id := uint64(genesis.MinerStart) + uint64(len(template.Miners)) + maddr, err := address.NewFromString(mn) + if err != nil { + return fmt.Errorf("parsing miner address: %w", err) + } + mid, err := address.IDFromAddress(maddr) + if err != nil { + return fmt.Errorf("getting miner id from address: %w", err) + } + if mid != id { + return fmt.Errorf("tried to set miner t0%d as t0%d", mid, id) + } + } + + template.Miners = append(template.Miners, miner) + log.Infof("Giving %s some initial balance", miner.Owner) + template.Accounts = append(template.Accounts, genesis.Actor{ + Type: genesis.TAccount, + Balance: big.Mul(big.NewInt(50_000_000), big.NewInt(int64(constants.FilecoinPrecision))), + Meta: (&genesis.AccountMeta{Owner: miner.Owner}).ActorMeta(), + }) + } + + genb, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + return os.WriteFile(genf, genb, 0o644) + }, +} + +var genesisAddMsigsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("templateFile", true, true, ""), + cmds.StringArg("csvFile", true, true, ""), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) < 2 { + return fmt.Errorf("must specify template file and csv file with accounts") + } + + genf, err := homedir.Expand(req.Arguments[0]) + if err != nil { + return err + } + + csvf, err := homedir.Expand(req.Arguments[1]) + if err != nil { + return err + } + + var template genesis.Template + b, err := os.ReadFile(genf) + if err != nil { + return fmt.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return fmt.Errorf("unmarshal genesis template: %w", err) + } + + entries, err := seed.ParseMultisigCsv(csvf) + if err != nil { + return fmt.Errorf("parsing multisig csv file: %w", err) + } + + for i, e := range entries { + if len(e.Addresses) != e.N { + return fmt.Errorf("entry %d had mismatch between 'N' and number of addresses", i) + } + + msig := &genesis.MultisigMeta{ + Signers: e.Addresses, + Threshold: e.M, + VestingDuration: monthsToBlocks(e.VestingMonths), + VestingStart: 0, + } + + template.Accounts = append(template.Accounts, genesis.Actor{ + Type: genesis.TMultisig, + Balance: abi.TokenAmount(e.Amount), + Meta: msig.ActorMeta(), + }) + } + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + return os.WriteFile(genf, b, 0o644) + }, +} + +func monthsToBlocks(nmonths int) int { + days := uint64((365 * nmonths) / 12) + return int(days * 24 * 60 * 60 / constants.MainNetBlockDelaySecs) +} + +func parseMultisigCsv(csvf string) ([]seed.GenAccountEntry, error) { + fileReader, err := os.Open(csvf) + if err != nil { + return nil, fmt.Errorf("read multisig csv: %w", err) + } + defer fileReader.Close() //nolint:errcheck + r := csv.NewReader(fileReader) + records, err := r.ReadAll() + if err != nil { + return nil, fmt.Errorf("read multisig csv: %w", err) + } + var entries []seed.GenAccountEntry + for i, e := range records[1:] { + var addrs []address.Address + addrStrs := strings.Split(strings.TrimSpace(e[7]), ":") + for j, a := range addrStrs { + addr, err := address.NewFromString(a) + if err != nil { + return nil, fmt.Errorf("failed to parse address %d in row %d (%q): %w", j, i, a, err) + } + addrs = append(addrs, addr) + } + + balance, err := types.ParseFIL(strings.TrimSpace(e[2])) + if err != nil { + return nil, fmt.Errorf("failed to parse account balance: %w", err) + } + + vesting, err := strconv.Atoi(strings.TrimSpace(e[3])) + if err != nil { + return nil, fmt.Errorf("failed to parse vesting duration for record %d: %w", i, err) + } + + custodianID, err := strconv.Atoi(strings.TrimSpace(e[4])) + if err != nil { + return nil, fmt.Errorf("failed to parse custodianID in record %d: %w", i, err) + } + threshold, err := strconv.Atoi(strings.TrimSpace(e[5])) + if err != nil { + return nil, fmt.Errorf("failed to parse multisigM in record %d: %w", i, err) + } + num, err := strconv.Atoi(strings.TrimSpace(e[6])) + if err != nil { + return nil, fmt.Errorf("number of addresses be integer: %w", err) + } + if e[0] != "1" { + return nil, fmt.Errorf("record version must be 1") + } + entries = append(entries, seed.GenAccountEntry{ + Version: 1, + ID: e[1], + Amount: balance, + CustodianID: custodianID, + VestingMonths: vesting, + M: threshold, + N: num, + Type: e[8], + Sig1: e[9], + Sig2: e[10], + Addresses: addrs, + }) + } + + return entries, nil +} + +var genesisSetVRKCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Set the verified registry's root key", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("templateFile", true, true, ""), + }, + Options: []cmds.Option{ + cmds.StringOption("multisig", "CSV file to parse the multisig that will be set as the root key"), + cmds.StringOption("account", "pubkey address that will be set as the root key (must NOT be declared anywhere else, since it must be given ID 80)"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) < 1 { + return fmt.Errorf("must specify template file and csv file with accounts") + } + + genf, err := homedir.Expand(req.Arguments[0]) + if err != nil { + return err + } + + var template genesis.Template + b, err := os.ReadFile(genf) + if err != nil { + return fmt.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return fmt.Errorf("unmarshal genesis template: %w", err) + } + + account, _ := req.Options["account"].(string) + multisig, _ := req.Options["multisig"].(string) + if len(account) > 0 { + addr, err := address.NewFromString(account) + if err != nil { + return err + } + + am := genesis.AccountMeta{Owner: addr} + + template.VerifregRootKey = genesis.Actor{ + Type: genesis.TAccount, + Balance: big.Zero(), + Meta: am.ActorMeta(), + } + } else if len(multisig) > 0 { + csvf, err := homedir.Expand(multisig) + if err != nil { + return err + } + + entries, err := parseMultisigCsv(csvf) + if err != nil { + return fmt.Errorf("parsing multisig csv file: %w", err) + } + + if len(entries) == 0 { + return fmt.Errorf("no msig entries in csv file: %w", err) + } + + e := entries[0] + if len(e.Addresses) != e.N { + return fmt.Errorf("entry had mismatch between 'N' and number of addresses") + } + + msig := &genesis.MultisigMeta{ + Signers: e.Addresses, + Threshold: e.M, + VestingDuration: monthsToBlocks(e.VestingMonths), + VestingStart: 0, + } + + act := genesis.Actor{ + Type: genesis.TMultisig, + Balance: abi.TokenAmount(e.Amount), + Meta: msig.ActorMeta(), + } + + template.VerifregRootKey = act + } else { + return fmt.Errorf("must include either --account or --multisig flag") + } + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + return os.WriteFile(genf, b, 0o644) + }, +} + +var genesisSetRemainderCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Set the remainder actor", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("templateFile", true, true, ""), + }, + Options: []cmds.Option{ + cmds.StringOption("multisig", "CSV file to parse the multisig that will be set as the root key"), + cmds.StringOption("account", "pubkey address that will be set as the root key (must NOT be declared anywhere else, since it must be given ID 80)"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) < 1 { + return fmt.Errorf("must specify template file and csv file with accounts") + } + + genf, err := homedir.Expand(req.Arguments[0]) + if err != nil { + return err + } + + var template genesis.Template + b, err := os.ReadFile(genf) + if err != nil { + return fmt.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return fmt.Errorf("unmarshal genesis template: %w", err) + } + + account, _ := req.Options["account"].(string) + multisig, _ := req.Options["multisig"].(string) + if account != "" { + addr, err := address.NewFromString(account) + if err != nil { + return err + } + + am := genesis.AccountMeta{Owner: addr} + + template.RemainderAccount = genesis.Actor{ + Type: genesis.TAccount, + Balance: big.Zero(), + Meta: am.ActorMeta(), + } + } else if multisig != "" { + csvf, err := homedir.Expand(multisig) + if err != nil { + return err + } + + entries, err := parseMultisigCsv(csvf) + if err != nil { + return fmt.Errorf("parsing multisig csv file: %w", err) + } + + if len(entries) == 0 { + return fmt.Errorf("no msig entries in csv file: %w", err) + } + + e := entries[0] + if len(e.Addresses) != e.N { + return fmt.Errorf("entry had mismatch between 'N' and number of addresses") + } + + msig := &genesis.MultisigMeta{ + Signers: e.Addresses, + Threshold: e.M, + VestingDuration: monthsToBlocks(e.VestingMonths), + VestingStart: 0, + } + + act := genesis.Actor{ + Type: genesis.TMultisig, + Balance: abi.TokenAmount(e.Amount), + Meta: msig.ActorMeta(), + } + + template.RemainderAccount = act + } else { + return fmt.Errorf("must include either --account or --multisig flag") + } + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + return os.WriteFile(genf, b, 0o644) + }, +} + +var genesisSetActorVersionCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Set the version that this network will start from", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("genesisFile", true, true, ""), + cmds.StringArg("actorVersion", true, true, ""), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) < 2 { + return fmt.Errorf("must specify genesis file and network version (e.g. '0'") + } + + genf, err := homedir.Expand(req.Arguments[0]) + if err != nil { + return err + } + + var template genesis.Template + b, err := os.ReadFile(genf) + if err != nil { + return fmt.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return fmt.Errorf("unmarshal genesis template: %w", err) + } + + nv, err := strconv.ParseUint(req.Arguments[1], 10, 64) + if err != nil { + return fmt.Errorf("parsing network version: %w", err) + } + + if nv > uint64(constants.TestNetworkVersion) { + return fmt.Errorf("invalid network version: %d", nv) + } + + template.NetworkVersion = network.Version(nv) + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + return os.WriteFile(genf, b, 0o644) + }, +} + +var preSealCmd = &cmds.Command{ + Options: []cmds.Option{ + cmds.StringOption("sector-dir", "sector directory").WithDefault("~/.genesis-sectors"), + cmds.StringOption("miner-addr", "specify the future address of your miner").WithDefault("t01000"), + cmds.StringOption("sector-size", "specify size of sectors to pre-seal").WithDefault("2KiB"), + cmds.StringOption("ticket-preimage", "set the ticket preimage for sealing randomness").WithDefault("venus is fire"), + cmds.IntOption("num-sectors", "select number of sectors to pre-seal").WithDefault(int(1)), + cmds.IntOption("sector-offset", "how many sector ids to skip when starting to seal").WithDefault(int(0)), + cmds.StringOption("key", "(optional) Key to use for signing / owner/worker addresses").WithDefault(""), + cmds.BoolOption("fake-sectors", "").WithDefault(false), + cmds.IntOption("network-version", "specify network version").WithDefault(int(networks.Net2k().Network.GenesisNetworkVersion)), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + sdir, _ := req.Options["sector-dir"].(string) + sbroot, err := homedir.Expand(sdir) + if err != nil { + return err + } + + addr, _ := req.Options["miner-addr"].(string) + maddr, err := address.NewFromString(addr) + if err != nil { + return err + } + + var ki *crypto.KeyInfo + if key, _ := req.Options["key"].(string); key != "" { + ki = new(crypto.KeyInfo) + kh, err := os.ReadFile(key) + if err != nil { + return err + } + kb, err := hex.DecodeString(string(kh)) + if err != nil { + return err + } + if err := json.Unmarshal(kb, ki); err != nil { + return err + } + } + + ssize, _ := req.Options["sector-size"].(string) + sectorSizeInt, err := units.RAMInBytes(ssize) + if err != nil { + return err + } + sectorSize := abi.SectorSize(sectorSizeInt) + + nv := networks.Net2k().Network.GenesisNetworkVersion + ver, _ := req.Options["network-version"].(int) + if ver >= 0 { + nv = network.Version(ver) + } + + spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv) + if err != nil { + return err + } + + sectorOffset, _ := req.Options["sector-offset"].(int) + numSectors, _ := req.Options["num-sectors"].(int) + ticketPreimage, _ := req.Options["ticket-preimage"].(string) + fakeSectors, _ := req.Options["fake-sectors"].(bool) + gm, key, err := seed.PreSeal(maddr, spt, abi.SectorNumber(uint64(sectorOffset)), numSectors, sbroot, []byte(ticketPreimage), ki, fakeSectors) + if err != nil { + return err + } + + return seed.WriteGenesisMiner(maddr, sbroot, gm, key) + }, +} + +var aggregateManifestsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "aggregate a set of preseal manifests into a single file", + }, + Options: []cmds.Option{ + cmds.StringsOption("file", "file path"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + var inputs []map[string]genesis.Miner + files, _ := req.Options["file"].([]string) + for _, infi := range files { + fi, err := os.Open(infi) + if err != nil { + return err + } + var val map[string]genesis.Miner + if err := json.NewDecoder(fi).Decode(&val); err != nil { + return err + } + + inputs = append(inputs, val) + if err := fi.Close(); err != nil { + return err + } + } + + output := make(map[string]genesis.Miner) + for _, in := range inputs { + for maddr, val := range in { + if gm, ok := output[maddr]; ok { + tmp, err := mergeGenMiners(gm, val) + if err != nil { + return err + } + output[maddr] = tmp + } else { + output[maddr] = val + } + } + } + + blob, err := json.MarshalIndent(output, "", " ") + if err != nil { + return err + } + + return re.Emit(string(blob)) + }, +} + +func mergeGenMiners(a, b genesis.Miner) (genesis.Miner, error) { + if a.SectorSize != b.SectorSize { + return genesis.Miner{}, fmt.Errorf("sector sizes mismatch, %d != %d", a.SectorSize, b.SectorSize) + } + + return genesis.Miner{ + Owner: a.Owner, + Worker: a.Worker, + PeerID: a.PeerID, + MarketBalance: big.Zero(), + PowerBalance: big.Zero(), + SectorSize: a.SectorSize, + Sectors: append(a.Sectors, b.Sectors...), + }, nil +} diff --git a/cmd/state.go b/cmd/state.go new file mode 100644 index 0000000000..b40bb1d335 --- /dev/null +++ b/cmd/state.go @@ -0,0 +1,669 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "path/filepath" + "strconv" + + "github.com/filecoin-project/venus/app/paths" + "github.com/filecoin-project/venus/cmd/tablewriter" + + "github.com/filecoin-project/venus/pkg/config" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// ActorView represents a generic way to represent details about any actor to the user. +type ActorView struct { + Address string `json:"address"` + Code cid.Cid `json:"code,omitempty"` + Nonce uint64 `json:"nonce"` + Balance abi.TokenAmount `json:"balance"` + Head cid.Cid `json:"head,omitempty"` +} + +var stateCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Interact with and query venus chain state", + }, + Subcommands: map[string]*cmds.Command{ + "wait-msg": stateWaitMsgCmd, + "search-msg": stateSearchMsgCmd, + "power": statePowerCmd, + "sectors": stateSectorsCmd, + "active-sectors": stateActiveSectorsCmd, + "sector": stateSectorCmd, + "get-actor": stateGetActorCmd, + "lookup": stateLookupIDCmd, + "sector-size": stateSectorSizeCmd, + "get-deal": stateGetDealSetCmd, + "miner-info": stateMinerInfo, + "network-version": stateNtwkVersionCmd, + "list-actor": stateListActorCmd, + "actor-cids": stateSysActorCIDsCmd, + }, +} + +var stateWaitMsgCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Wait for a message to appear on chain", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "CID of message to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + cid, err := cid.Decode(req.Arguments[0]) + if err != nil { + return err + } + + mw, err := env.(*node.Env).ChainAPI.StateWaitMsg(req.Context, cid, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + if mw != nil { + writer.Printf("message was executed in tipset: %s\n", mw.TipSet.Cids()) + writer.Printf("Exit Code: %d\n", mw.Receipt.ExitCode) + writer.Printf("Gas Used: %d\n", mw.Receipt.GasUsed) + writer.Printf("Return: %x\n", mw.Receipt.Return) + } else { + writer.Printf("Unable to find message recepit of %s", cid) + } + + return re.Emit(buf) + }, +} + +var stateSearchMsgCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Search to see whether a message has appeared on chain", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "CID of message to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + cid, err := cid.Decode(req.Arguments[0]) + if err != nil { + return err + } + + mw, err := env.(*node.Env).ChainAPI.StateSearchMsg(req.Context, types.EmptyTSK, cid, constants.LookbackNoLimit, true) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + if mw != nil { + writer.Printf("message was executed in tipset: %s", mw.TipSet.Cids()) + writer.Printf("\nExit Code: %d", mw.Receipt.ExitCode) + writer.Printf("\nGas Used: %d", mw.Receipt.GasUsed) + writer.Printf("\nReturn: %x", mw.Receipt.Return) + } else { + writer.Print("message was not found on chain") + } + + return re.Emit(buf) + }, +} + +var statePowerCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Query network or miner power", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", false, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + var maddr address.Address + var err error + + if len(req.Arguments) == 1 { + maddr, err = address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + power, err := env.(*node.Env).ChainAPI.StateMinerPower(req.Context, maddr, ts.Key()) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + tp := power.TotalPower + if len(req.Arguments) == 1 { + mp := power.MinerPower + if !power.HasMinPower { + mp.QualityAdjPower = big.NewInt(0) + } + percI := big.Div(big.Mul(mp.QualityAdjPower, big.NewInt(1000000)), tp.QualityAdjPower) + writer.Printf("%s(%s) / %s(%s) ~= %0.4f%"+ + "%\n", mp.QualityAdjPower.String(), types.SizeStr(mp.QualityAdjPower), tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower), float64(percI.Int64())/10000) + } else { + writer.Printf("%s(%s)\n", tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower)) + } + + return re.Emit(buf) + }, +} + +var stateSectorsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Query the sector set of a miner", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + sectors, err := env.(*node.Env).ChainAPI.StateMinerSectors(req.Context, maddr, nil, ts.Key()) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + for _, s := range sectors { + writer.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) + } + + return re.Emit(buf) + }, +} + +var stateActiveSectorsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Query the active sector set of a miner", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + sectors, err := env.(*node.Env).ChainAPI.StateMinerActiveSectors(req.Context, maddr, ts.Key()) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + for _, s := range sectors { + writer.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) + } + + return re.Emit(buf) + }, +} + +var stateSectorCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get miner sector info", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + cmds.StringArg("sector-id", true, false, "Number of actor to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) != 2 { + return fmt.Errorf("expected 2 params") + } + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + sid, err := strconv.ParseInt(req.Arguments[1], 10, 64) + if err != nil { + return err + } + + blockDelay, err := blockDelay(req) + if err != nil { + return err + } + + si, err := env.(*node.Env).ChainAPI.StateSectorGetInfo(req.Context, maddr, abi.SectorNumber(sid), ts.Key()) + if err != nil { + return err + } + if si == nil { + return fmt.Errorf("sector %d for miner %s not found", sid, maddr) + } + + height := ts.Height() + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + writer.Println("SectorNumber: ", si.SectorNumber) + writer.Println("SealProof: ", si.SealProof) + writer.Println("SealedCID: ", si.SealedCID) + writer.Println("DealIDs: ", si.DealIDs) + writer.Println() + writer.Println("Activation: ", EpochTime(height, si.Activation, blockDelay)) + writer.Println("Expiration: ", EpochTime(height, si.Expiration, blockDelay)) + writer.Println() + writer.Println("DealWeight: ", si.DealWeight) + writer.Println("VerifiedDealWeight: ", si.VerifiedDealWeight) + writer.Println("InitialPledge: ", types.FIL(si.InitialPledge)) + writer.Println("ExpectedDayReward: ", types.FIL(si.ExpectedDayReward)) + writer.Println("ExpectedStoragePledge: ", types.FIL(si.ExpectedStoragePledge)) + writer.Println() + + sp, err := env.(*node.Env).ChainAPI.StateSectorPartition(req.Context, maddr, abi.SectorNumber(sid), ts.Key()) + if err != nil { + return err + } + + writer.Println("Deadline: ", sp.Deadline) + writer.Println("Partition: ", sp.Partition) + + return re.Emit(buf) + }, +} + +func blockDelay(req *cmds.Request) (uint64, error) { + var err error + repoDir, _ := req.Options[OptionRepoDir].(string) + repoDir, err = paths.GetRepoPath(repoDir) + if err != nil { + return 0, err + } + cfgPath := filepath.Join(repoDir, "config.json") + cfg, err := config.ReadFile(cfgPath) + if err != nil { + return 0, err + } + + return cfg.NetworkParams.BlockDelay, nil +} + +type ActorInfo struct { + Address string + Balance string + Nonce uint64 + Code string + Head string +} + +var stateGetActorCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print actor information", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of actor to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + addr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + a, err := env.(*node.Env).ChainAPI.StateGetActor(req.Context, addr, ts.Key()) + if err != nil { + return err + } + + strtype := builtin.ActorNameByCode(a.Code) + + return re.Emit(ActorInfo{ + Address: addr.String(), + Balance: fmt.Sprintf("%s", types.FIL(a.Balance)), + Nonce: a.Nonce, + Code: fmt.Sprintf("%s (%s)", a.Code, strtype), + Head: a.Head.String(), + }) + }, + Type: ActorInfo{}, +} + +var stateLookupIDCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Find corresponding ID address", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of actor to show"), + }, + Options: []cmds.Option{ + cmds.BoolOption("r", "Perform reverse lookup"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + addr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + var a address.Address + if ok, _ := req.Options["r"].(bool); ok { + a, err = env.(*node.Env).ChainAPI.StateAccountKey(req.Context, addr, ts.Key()) + } else { + a, err = env.(*node.Env).ChainAPI.StateLookupID(req.Context, addr, ts.Key()) + } + + if err != nil { + return err + } + + return re.Emit(a.String()) + }, + Type: "", +} + +var stateSectorSizeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Look up miners sector size", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + maddr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + mi, err := env.(*node.Env).ChainAPI.StateMinerInfo(req.Context, maddr, ts.Key()) + if err != nil { + return err + } + + return re.Emit(fmt.Sprintf("%s (%d)", types.SizeStr(big.NewInt(int64(mi.SectorSize))), mi.SectorSize)) + }, + Type: "", +} + +var stateGetDealSetCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "View on-chain deal info", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("dealID", true, false, "Deal id to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + dealid, err := strconv.ParseUint(req.Arguments[0], 10, 64) + if err != nil { + return fmt.Errorf("parsing deal ID: %w", err) + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + deal, err := env.(*node.Env).ChainAPI.StateMarketStorageDeal(req.Context, abi.DealID(dealid), ts.Key()) + if err != nil { + return err + } + + return re.Emit(deal) + }, + Type: types.MarketDeal{}, +} + +var stateMinerInfo = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Retrieve miner information", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, false, "Address of miner to show"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + addr, err := address.NewFromString(req.Arguments[0]) + if err != nil { + return err + } + + blockDelay, err := blockDelay(req) + if err != nil { + return err + } + + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + mi, err := env.(*node.Env).ChainAPI.StateMinerInfo(req.Context, addr, ts.Key()) + if err != nil { + return err + } + + availableBalance, err := env.(*node.Env).ChainAPI.StateMinerAvailableBalance(req.Context, addr, ts.Key()) + if err != nil { + return fmt.Errorf("getting miner available balance: %w", err) + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + writer.Printf("Available Balance: %s\n", types.FIL(availableBalance)) + writer.Printf("Owner:\t%s\n", mi.Owner) + writer.Printf("Worker:\t%s\n", mi.Worker) + for i, controlAddress := range mi.ControlAddresses { + writer.Printf("Control %d: \t%s\n", i, controlAddress) + } + + writer.Printf("PeerID:\t%s\n", mi.PeerId) + writer.Printf("Multiaddrs:\t") + + for _, addr := range mi.Multiaddrs { + a, err := multiaddr.NewMultiaddrBytes(addr) + if err != nil { + return fmt.Errorf("undecodable listen address: %v", err) + } + writer.Printf("%s ", a) + } + writer.Println() + writer.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed) + + writer.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(big.NewInt(int64(mi.SectorSize))), mi.SectorSize) + pow, err := env.(*node.Env).ChainAPI.StateMinerPower(req.Context, addr, ts.Key()) + if err != nil { + return err + } + + rpercI := big.Div(big.Mul(pow.MinerPower.RawBytePower, big.NewInt(1000000)), pow.TotalPower.RawBytePower) + qpercI := big.Div(big.Mul(pow.MinerPower.QualityAdjPower, big.NewInt(1000000)), pow.TotalPower.QualityAdjPower) + + writer.Printf("Byte Power: %s / %s (%0.4f%%)\n", + types.SizeStr(pow.MinerPower.RawBytePower), + types.SizeStr(pow.TotalPower.RawBytePower), + float64(rpercI.Int64())/10000) + + writer.Printf("Actual Power: %s / %s (%0.4f%%)\n", + types.DeciStr(pow.MinerPower.QualityAdjPower), + types.DeciStr(pow.TotalPower.QualityAdjPower), + float64(qpercI.Int64())/10000) + + writer.Println() + + cd, err := env.(*node.Env).ChainAPI.StateMinerProvingDeadline(req.Context, addr, ts.Key()) + if err != nil { + return fmt.Errorf("getting miner info: %w", err) + } + + writer.Printf("Proving Period Start:\t%s\n", EpochTime(cd.CurrentEpoch, cd.PeriodStart, blockDelay)) + + return re.Emit(buf) + }, +} + +var stateNtwkVersionCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Returns the network version", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ts, err := env.(*node.Env).ChainAPI.ChainHead(req.Context) + if err != nil { + return err + } + + nv, err := env.(*node.Env).ChainAPI.StateNetworkVersion(req.Context, ts.Key()) + if err != nil { + return err + } + + return re.Emit(fmt.Sprintf("Network Version: %d", nv)) + }, + Type: "", +} + +var stateListActorCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "list all actors", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + results, err := env.(*node.Env).ChainAPI.ListActor(req.Context) + if err != nil { + return err + } + + for addr, actor := range results { + output := makeActorView(actor, addr) + if err := re.Emit(output); err != nil { + return err + } + } + return nil + }, + Type: &ActorView{}, + Encoders: cmds.EncoderMap{ + cmds.JSON: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, a *ActorView) error { + marshaled, err := json.Marshal(a) + if err != nil { + return err + } + _, err = w.Write(marshaled) + if err != nil { + return err + } + _, err = w.Write([]byte("\n")) + return err + }), + }, +} + +var stateSysActorCIDsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Returns the built-in actor bundle manifest ID & system actor cids", + }, + Options: []cmds.Option{ + cmds.UintOption("network-version", "specify network version"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + + var nv network.Version + var err error + targetNV, ok := req.Options["network-version"].(uint) + if ok { + nv = network.Version(targetNV) + } else { + nv, err = env.(*node.Env).ChainAPI.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return err + } + } + + buf := new(bytes.Buffer) + buf.WriteString(fmt.Sprintf("Network Version: %d\n", nv)) + + actorVersion, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return err + } + buf.WriteString(fmt.Sprintf("Actor Version: %d\n", actorVersion)) + + tw := tablewriter.New(tablewriter.Col("Actor"), tablewriter.Col("CID")) + + actorsCids, err := env.(*node.Env).ChainAPI.StateActorCodeCIDs(ctx, nv) + if err != nil { + return err + } + for name, cid := range actorsCids { + tw.Write(map[string]interface{}{ + "Actor": name, + "CID": cid.String(), + }) + } + + if err := tw.Flush(buf); err != nil { + return err + } + + return re.Emit(buf) + }, +} + +func makeActorView(act *types.Actor, addr address.Address) *ActorView { + return &ActorView{ + Address: addr.String(), + Code: act.Code, + Nonce: act.Nonce, + Balance: act.Balance, + Head: act.Head, + } +} diff --git a/cmd/state_test.go b/cmd/state_test.go new file mode 100644 index 0000000000..87ac002f08 --- /dev/null +++ b/cmd/state_test.go @@ -0,0 +1,41 @@ +package cmd_test + +import ( + "bytes" + "context" + "encoding/json" + "testing" + + "github.com/filecoin-project/venus/cmd" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/app/node/test" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestActorDaemon(t *testing.T) { + tf.IntegrationTest(t) + ctx := context.Background() + t.Run("state ls --enc json returns NDJSON containing all actors in the state tree", func(t *testing.T) { + builder := test.NewNodeBuilder(t) + + _, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + op1 := cmdClient.RunSuccess(ctx, "state", "list-actor", "--enc", "json") + result1 := op1.ReadStdoutTrimNewlines() + + var avs []cmd.ActorView + for _, line := range bytes.Split([]byte(result1), []byte{'\n'}) { + // unmarshall JSON to actor view an add to slice + var av cmd.ActorView + err := json.Unmarshal(line, &av) + require.NoError(t, err) + avs = append(avs, av) + } + + assert.NotZero(t, len(avs)) + }) +} diff --git a/cmd/swarm.go b/cmd/swarm.go new file mode 100644 index 0000000000..8e54dd24c7 --- /dev/null +++ b/cmd/swarm.go @@ -0,0 +1,768 @@ +package cmd + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + "text/tabwriter" + "time" + + "github.com/dustin/go-humanize" + "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/pkg/net" +) + +const ( + dhtVerboseOptionName = "verbose" + numProvidersOptionName = "num-providers" +) + +var netCmdLog = logging.Logger("net-cmd") + +// swarmCmd contains swarm commands. +var swarmCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Interact with the swarm", + ShortDescription: ` +'venus swarm' is a tool to manipulate the libp2p swarm. The swarm is the +component that opens, listens for, and maintains connections to other +libp2p peers on the internet. +`, + }, + Subcommands: map[string]*cmds.Command{ + "id": idCmd, + "query": queryDhtCmd, + "peers": swarmPeersCmd, + "connect": swarmConnectCmd, + "findpeer": findPeerDhtCmd, + "findprovs": findProvidersDhtCmd, + "bandwidth": statsBandwidthCmd, + "ping": swarmPingCmd, + "disconnect": disconnectCmd, + "reachability": reachabilityCmd, + "protect": protectAddCmd, + "unprotect": protectRemoveCmd, + "list-protected": protectListCmd, + "scores": swarmScoresCmd, + }, +} + +var swarmPeersCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List peers with open connections.", + ShortDescription: ` +'venus swarm peers' lists the set of peers this node is connected to. +`, + }, + Options: []cmds.Option{ + cmds.BoolOption("agent", "a", "Print agent name"), + cmds.BoolOption("extended", "x", "Print extended peer information in json"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + needAgent, _ := req.Options["agent"].(bool) + extended, _ := req.Options["extended"].(bool) + + peers, err := env.(*node.Env).NetworkAPI.NetPeers(ctx) + if err != nil { + return err + } + + sort.Slice(peers, func(i, j int) bool { + return strings.Compare(string(peers[i].ID), string(peers[j].ID)) > 0 + }) + + buf := &bytes.Buffer{} + writer := NewSilentWriter(buf) + + if extended { + // deduplicate + seen := make(map[peer.ID]struct{}) + + for _, peer := range peers { + _, dup := seen[peer.ID] + if dup { + continue + } + seen[peer.ID] = struct{}{} + + info, err := env.(*node.Env).NetworkAPI.NetPeerInfo(ctx, peer.ID) + if err != nil { + netCmdLog.Warnf("error getting extended peer info: %s", err) + } else { + bytes, err := json.Marshal(&info) + if err != nil { + netCmdLog.Warnf("error marshalling extended peer info: %s", err) + } else { + writer.Println(string(bytes)) + } + } + } + } else { + for _, peer := range peers { + var agent string + if needAgent { + agent, err = env.(*node.Env).NetworkAPI.NetAgentVersion(ctx, peer.ID) + if err != nil { + netCmdLog.Warnf("getting agent version: %s", err) + } else { + agent = ", " + agent + } + } + writer.Printf("%s, %s%s\n", peer.ID, peer.Addrs, agent) + } + } + + return re.Emit(buf) + }, +} + +var swarmPingCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Ping peers", + ShortDescription: ` +'venus swarm ping' ping peers. +`, + }, + Options: []cmds.Option{ + cmds.IntOption("count", "c", "specify the number of times it should ping").WithDefault(10), + cmds.IntOption("internal", "minimum time between pings").WithDefault(1), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("peerMultiaddr", true, true, "peer multiaddr"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) < 1 { + return re.Emit("please provide a peerID") + } + ctx := req.Context + count, _ := req.Options["count"].(int) + interval, _ := req.Options["internal"].(int) + + pis, err := net.ParseAddresses(ctx, req.Arguments) + if err != nil { + return err + } + + for _, pi := range pis { + err := env.(*node.Env).NetworkAPI.NetConnect(ctx, pi) + if err != nil { + return fmt.Errorf("connect: %w", err) + } + + var avg time.Duration + var successful int + + for i := 0; i < count && ctx.Err() == nil; i++ { + start := time.Now() + + rtt, err := env.(*node.Env).NetworkAPI.NetPing(ctx, pi.ID) + if err != nil { + if ctx.Err() != nil { + break + } + log.Errorf("Ping failed: error=%v", err) + continue + } + if err := re.Emit(fmt.Sprintf("Pong received: time=%v", rtt)); err != nil { + return err + } + avg = avg + rtt + successful++ + + wctx, cancel := context.WithTimeout(ctx, time.Until(start.Add(time.Duration(interval)*time.Second))) + <-wctx.Done() + cancel() + } + + if successful > 0 { + if err := re.Emit(fmt.Sprintf("Average latency: %v", avg/time.Duration(successful))); err != nil { + return err + } + } + } + + return nil + }, +} + +var swarmConnectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Open connection to a given address.", + ShortDescription: ` +'venus swarm connect' opens a new direct connection to a peer address. + +The address format is a multiaddr: + +venus swarm connect /ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("address", true, true, "address of peer to connect to.").EnableStdin(), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + pis, err := net.ParseAddresses(req.Context, req.Arguments) + if err != nil { + return err + } + + for _, pi := range pis { + err := env.(*node.Env).NetworkAPI.NetConnect(req.Context, pi) + if err != nil { + return err + } + if err := re.Emit(pi.ID.Pretty()); err != nil { + return err + } + } + + return nil + }, +} + +var queryDhtCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Find the closest Peer IDs to a given Peer ID by querying the DHT.", + ShortDescription: "Outputs a list of newline-delimited Peer IDs.", + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("peerID", true, false, "The peerID to run the query against."), + }, + Options: []cmds.Option{ + cmds.BoolOption(dhtVerboseOptionName, "v", "Print extra information."), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + id, err := peer.Decode(req.Arguments[0]) + if err != nil { + return cmds.ClientError("invalid peer ID") + } + + ctx, cancel := context.WithCancel(req.Context) + ctx, events := routing.RegisterForQueryEvents(ctx) + + closestPeers, err := env.(*node.Env).NetworkAPI.NetGetClosestPeers(ctx, string(id)) + if err != nil { + cancel() + return err + } + + go func() { + defer cancel() + for _, p := range closestPeers { + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + ID: p, + Type: routing.FinalPeer, + }) + } + }() + + for e := range events { + if err := res.Emit(e); err != nil { + return err + } + } + + return nil + }, + Type: routing.QueryEvent{}, +} + +var findProvidersDhtCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Find peers that can provide a given key's value.", + ShortDescription: "Outputs a list of newline-delimited provider Peer IDs for a given key.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("key", true, false, "The key whose provider Peer IDs are output.").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.BoolOption(dhtVerboseOptionName, "v", "Print extra information."), + cmds.IntOption(numProvidersOptionName, "n", "The max number of providers to find.").WithDefault(20), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + numProviders, _ := req.Options[numProvidersOptionName].(int) + if numProviders < 1 { + return fmt.Errorf("number of providers must be greater than 0") + } + + c, err := cid.Parse(req.Arguments[0]) + if err != nil { + return err + } + + ctx, cancel := context.WithTimeout(req.Context, time.Minute) + ctx, events := routing.RegisterForQueryEvents(ctx) + + pchan := env.(*node.Env).NetworkAPI.NetFindProvidersAsync(ctx, c, numProviders) + + go func() { + defer cancel() + for p := range pchan { + np := p + // Note that the peer IDs in these Provider + // events are the main output of this command. + // These results are piped back into the event + // system so that they can be read alongside + // other routing events which are output in + // verbose mode but otherwise filtered. + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.Provider, + Responses: []*peer.AddrInfo{&np}, + }) + } + }() + for e := range events { + if err := res.Emit(e); err != nil { + return err + } + } + + return nil + }, + Type: routing.QueryEvent{}, +} + +var findPeerDhtCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Find the multiaddresses associated with a Peer ID.", + ShortDescription: "Outputs a list of newline-delimited multiaddresses.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("peerID", true, false, "The ID of the peer to search for."), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + peerID, err := peer.Decode(req.Arguments[0]) + if err != nil { + return err + } + + out, err := env.(*node.Env).NetworkAPI.NetFindPeer(req.Context, peerID) + if err != nil { + return err + } + + for _, addr := range out.Addrs { + if err := res.Emit(addr.String()); err != nil { + return err + } + } + return nil + }, +} + +var statsBandwidthCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "View bandwidth usage metrics", + }, + Options: []cmds.Option{ + cmds.BoolOption("by-peer", "list bandwidth usage by peer"), + cmds.BoolOption("by-protocol", "list bandwidth usage by protocol"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + netAPI := env.(*node.Env).NetworkAPI + + bypeer, _ := req.Options["by-peer"].(bool) + byproto, _ := req.Options["by-protocol"].(bool) + + buf := &bytes.Buffer{} + + tw := tabwriter.NewWriter(buf, 4, 4, 2, ' ', 0) + + fmt.Fprintf(tw, "Segment\tTotalIn\tTotalOut\tRateIn\tRateOut\n") + + if bypeer { + bw, err := netAPI.NetBandwidthStatsByPeer(ctx) + if err != nil { + return err + } + + var peers []string + for p := range bw { + peers = append(peers, p) + } + + sort.Slice(peers, func(i, j int) bool { + return peers[i] < peers[j] + }) + + for _, p := range peers { + s := bw[p] + fmt.Fprintf(tw, "%s\t%s\t%s\t%s/s\t%s/s\n", p, humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + } + } else if byproto { + bw, err := netAPI.NetBandwidthStatsByProtocol(ctx) + if err != nil { + return err + } + + var protos []protocol.ID + for p := range bw { + protos = append(protos, p) + } + + sort.Slice(protos, func(i, j int) bool { + return protos[i] < protos[j] + }) + + for _, p := range protos { + s := bw[p] + if p == "" { + p = "" + } + fmt.Fprintf(tw, "%s\t%s\t%s\t%s/s\t%s/s\n", p, humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + } + } else { + + s, err := netAPI.NetBandwidthStats(ctx) + if err != nil { + return err + } + + fmt.Fprintf(tw, "Total\t%s\t%s\t%s/s\t%s/s\n", humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + } + + if err := tw.Flush(); err != nil { + return err + } + + return re.Emit(buf) + }, +} + +var idCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show info about the network peers", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + addrs, err := env.(*node.Env).NetworkAPI.NetAddrsListen(req.Context) + if err != nil { + return err + } + + hostID := addrs.ID + details := IDDetails{ + Addresses: make([]ma.Multiaddr, len(addrs.Addrs)), + ID: hostID, + } + + for i, addr := range addrs.Addrs { + subAddr, err := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", hostID.Pretty())) + if err != nil { + return err + } + details.Addresses[i] = addr.Encapsulate(subAddr) + } + + return re.Emit(&details) + }, + Type: &IDDetails{}, +} + +var disconnectCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Disconnect from a peer", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("peers", true, true, "The peers to disconnect for"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + if len(req.Arguments) == 0 { + return fmt.Errorf("must pass peer id") + } + ctx := req.Context + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + ids := req.Arguments + for _, id := range ids { + pid, err := peer.Decode(id) + if err != nil { + return err + } + writer.Printf("disconnect %s", pid.Pretty()) + err = env.(*node.Env).NetworkAPI.NetDisconnect(ctx, pid) + if err != nil { + return err + } + writer.Println(" success") + } + return re.Emit(buf) + }, +} + +var reachabilityCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print information about reachability from the internet", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + + i, err := env.(*node.Env).NetworkAPI.NetAutoNatStatus(ctx) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + writer.Println("AutoNAT status: ", i.Reachability.String()) + if i.PublicAddr != "" { + writer.Println("Public address: ", i.PublicAddr) + } + + return re.Emit(buf) + }, +} + +var protectAddCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Add one or more peer IDs to the list of protected peer connections", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("peers", true, true, "need protect peers"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + netAPI := env.(*node.Env).NetworkAPI + + pids, err := decodePeerIDsFromArgs(req) + if err != nil { + return err + } + + err = netAPI.NetProtectAdd(ctx, pids) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + writer.Println("added to protected peers:") + for _, pid := range pids { + writer.Printf(" %s\n", pid) + } + return re.Emit(buf) + }, +} + +var protectRemoveCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Remove one or more peer IDs from the list of protected peer connections.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("peers", true, true, "need unprotect peers"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + netAPI := env.(*node.Env).NetworkAPI + + pids, err := decodePeerIDsFromArgs(req) + if err != nil { + return err + } + + err = netAPI.NetProtectRemove(ctx, pids) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + writer.Println("removed from protected peers:") + for _, pid := range pids { + writer.Printf(" %s\n", pid) + } + return re.Emit(buf) + }, +} + +// decodePeerIDsFromArgs decodes all the arguments present in cli.Context.Args as peer.ID. +// +// This function requires at least one argument to be present, and arguments must not be empty +// string. Otherwise, an error is returned. +func decodePeerIDsFromArgs(req *cmds.Request) ([]peer.ID, error) { + pidArgs := req.Arguments + if len(pidArgs) == 0 { + return nil, fmt.Errorf("must specify at least one peer ID as an argument") + } + var pids []peer.ID + for _, pidStr := range pidArgs { + if pidStr == "" { + return nil, fmt.Errorf("peer ID must not be empty") + } + pid, err := peer.Decode(pidStr) + if err != nil { + return nil, err + } + pids = append(pids, pid) + } + return pids, nil +} + +var protectListCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List the peer IDs with protected connection.", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + ctx := req.Context + netAPI := env.(*node.Env).NetworkAPI + + pids, err := netAPI.NetProtectList(ctx) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + + for _, pid := range pids { + writer.Printf("%s\n", pid) + } + + return re.Emit(buf) + }, +} + +var swarmScoresCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Print peers' pubsub scores", + }, + Options: []cmds.Option{ + cmds.BoolOption("extended", "x", "print extended peer scores in json"), + cmds.BoolOption("sort", "s", "sort by peer score"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + scores, err := env.(*node.Env).NetworkAPI.NetPubsubScores(req.Context) + if err != nil { + return err + } + + sorted, _ := req.Options["sort"].(bool) + extended, _ := req.Options["extended"].(bool) + + if sorted { + sort.Slice(scores, func(i, j int) bool { + return scores[i].Score.Score > scores[j].Score.Score + }) + } + + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + if extended { + for _, peer := range scores { + data, err := json.Marshal(peer) + if err != nil { + return err + } + writer.Printf("%s\n", string(data)) + } + } else { + for _, peer := range scores { + writer.Printf("%s, %f\n", peer.ID, peer.Score.Score) + } + } + + return re.Emit(buf) + }, +} + +// IDDetails is a collection of information about a node. +type IDDetails struct { + Addresses []ma.Multiaddr + ID peer.ID + AgentVersion string + ProtocolVersion string + PublicKey []byte // raw bytes +} + +// MarshalJSON implements json.Marshaler +func (idd IDDetails) MarshalJSON() ([]byte, error) { + addressStrings := make([]string, len(idd.Addresses)) + for i, addr := range idd.Addresses { + addressStrings[i] = addr.String() + } + + v := map[string]interface{}{ + "Addresses": addressStrings, + } + + if idd.ID != "" { + v["ID"] = idd.ID.Pretty() + } + if idd.AgentVersion != "" { + v["AgentVersion"] = idd.AgentVersion + } + if idd.ProtocolVersion != "" { + v["ProtocolVersion"] = idd.ProtocolVersion + } + if idd.PublicKey != nil { + // Base64-encode the public key explicitly. + // This is what the built-in JSON encoder does to []byte too. + v["PublicKey"] = base64.StdEncoding.EncodeToString(idd.PublicKey) + } + return json.Marshal(v) +} + +// UnmarshalJSON implements Unmarshaler +func (idd *IDDetails) UnmarshalJSON(data []byte) error { + var v map[string]*json.RawMessage + var err error + if err = json.Unmarshal(data, &v); err != nil { + return err + } + + var addresses []string + if err := decode(v, "Addresses", &addresses); err != nil { + return err + } + idd.Addresses = make([]ma.Multiaddr, len(addresses)) + for i, addr := range addresses { + a, err := ma.NewMultiaddr(addr) + if err != nil { + return err + } + idd.Addresses[i] = a + } + + var id string + if err := decode(v, "ID", &id); err != nil { + return err + } + if idd.ID, err = peer.Decode(id); err != nil { + return err + } + + if err := decode(v, "AgentVersion", &idd.AgentVersion); err != nil { + return err + } + if err := decode(v, "ProtocolVersion", &idd.ProtocolVersion); err != nil { + return err + } + return decode(v, "PublicKey", &idd.PublicKey) +} + +func decode(idd map[string]*json.RawMessage, key string, dest interface{}) error { + if raw := idd[key]; raw != nil { + if err := json.Unmarshal(*raw, &dest); err != nil { + return err + } + } + return nil +} diff --git a/cmd/swarm_integration_test.go b/cmd/swarm_integration_test.go new file mode 100644 index 0000000000..8f24be6539 --- /dev/null +++ b/cmd/swarm_integration_test.go @@ -0,0 +1,116 @@ +package cmd_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/venus/app/node/test" + th "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/stretchr/testify/assert" +) + +func TestSwarmConnectPeersValid(t *testing.T) { + tf.IntegrationTest(t) + + ctx := context.Background() + builder := test.NewNodeBuilder(t) + + n1 := builder.BuildAndStart(ctx) + defer n1.Stop(ctx) + n2 := builder.BuildAndStart(ctx) + defer n2.Stop(ctx) + + test.ConnectNodes(t, n1, n2) +} + +func TestSwarmConnectPeersInvalid(t *testing.T) { + tf.IntegrationTest(t) + + ctx := context.Background() + builder := test.NewNodeBuilder(t) + + _, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + cmdClient.RunFail(ctx, "failed to parse ip4 addr", + "swarm", "connect", "/ip4/hello", + ) +} + +func TestId(t *testing.T) { + tf.IntegrationTest(t) + + ctx := context.Background() + + builder := test.NewNodeBuilder(t) + _, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + id := cmdClient.RunSuccess(ctx, "swarm", "id") + + idContent := id.ReadStdout() + assert.Containsf(t, idContent, "/ip4/127.0.0.1/tcp/", "default addr") + assert.Contains(t, idContent, "ID") +} + +func TestPersistId(t *testing.T) { + tf.IntegrationTest(t) + + // we need to control this + dir := t.TempDir() + + // Start a demon in dir + d1 := th.NewDaemon(t, th.ContainerDir(dir)).Start() + + // get the id and kill it + id1 := d1.GetID() + d1.Stop() + + // restart the daemon + d2 := th.NewDaemon(t, th.ContainerDir(dir)).Start() + + // get the id and compare to previous + id2 := d2.GetID() + d2.ShutdownSuccess() + t.Logf("d1: %s", d1.ReadStdout()) + t.Logf("d2: %s", d2.ReadStdout()) + assert.Equal(t, id1, id2) +} + +func TestDhtFindPeer(t *testing.T) { + tf.IntegrationTest(t) + ctx := context.Background() + + builder1 := test.NewNodeBuilder(t) + n1 := builder1.BuildAndStart(ctx) + defer n1.Stop(ctx) + cmdClient, done := test.RunNodeAPI(ctx, n1, t) + defer done() + + builder2 := test.NewNodeBuilder(t) + n2 := builder2.BuildAndStart(ctx) + defer n2.Stop(ctx) + + test.ConnectNodes(t, n1, n2) + + pi, err := n2.Network().API().NetAddrsListen(ctx) + assert.Nil(t, err) + findpeerOutput := cmdClient.RunSuccess(ctx, "swarm", "findpeer", pi.ID.Pretty()).ReadStdoutTrimNewlines() + + assert.Contains(t, findpeerOutput, pi.Addrs[0].String()) +} + +func TestStatsBandwidth(t *testing.T) { + tf.IntegrationTest(t) + ctx := context.Background() + builder := test.NewNodeBuilder(t) + + _, cmdClient, done := builder.BuildAndStartAPI(ctx) + defer done() + + stats := cmdClient.RunSuccess(ctx, "swarm", "bandwidth").ReadStdoutTrimNewlines() + + assert.Equal(t, "Segment TotalIn TotalOut RateIn RateOut\nTotal 0 B 0 B 0 B/s 0 B/s", stats) +} diff --git a/cmd/sync.go b/cmd/sync.go new file mode 100644 index 0000000000..f1ec9995a3 --- /dev/null +++ b/cmd/sync.go @@ -0,0 +1,162 @@ +// Package cmd implements the command to print the blockchain. +package cmd + +import ( + "bytes" + "strconv" + + "github.com/filecoin-project/venus/venus-shared/types" + + cmds "github.com/ipfs/go-ipfs-cmds" + + "github.com/filecoin-project/venus/app/node" + syncTypes "github.com/filecoin-project/venus/pkg/chainsync/types" +) + +var syncCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Inspect the sync", + }, + Subcommands: map[string]*cmds.Command{ + "status": storeStatusCmd, + "history": historyCmd, + "concurrent": getConcurrent, + "set-concurrent": setConcurrent, + }, +} + +var getConcurrent = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "get concurrent of sync thread", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + concurrent := env.(*node.Env).SyncerAPI.Concurrent(req.Context) + return printOneString(re, strconv.Itoa(int(concurrent))) + }, +} + +var setConcurrent = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "set concurrent of sync thread", + }, + Options: []cmds.Option{ + cmds.Int64Option("concurrent", "coucurrent of sync thread"), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("concurrent", true, false, "coucurrent of sync thread"), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + concurrent, err := strconv.Atoi(req.Arguments[0]) + if err != nil { + return cmds.ClientError("invalid number") + } + env.(*node.Env).SyncerAPI.SetConcurrent(req.Context, int64(concurrent)) //nolint + return nil + }, +} + +var storeStatusCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show status of chain sync operation.", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + tracker := env.(*node.Env).SyncerAPI.SyncerTracker(req.Context) + targets := tracker.Buckets + + var inSyncing []*types.Target + var waitTarget []*types.Target + for _, t := range targets { + if t.State == types.SyncStateStage(syncTypes.StateInSyncing) { + inSyncing = append(inSyncing, t) + } else { + waitTarget = append(waitTarget, t) + } + } + + w := bytes.NewBufferString("") + writer := NewSilentWriter(w) + + if len(inSyncing) == 0 && len(waitTarget) == 0 { + lenTH := len(tracker.History) + if lenTH > 0 { + writer.Println(tracker.History[lenTH-1].String()) + } + + writer.Println("Done!") + return re.Emit(w) + } + + count := 1 + if len(inSyncing) > 0 { + writer.Println("Syncing:") + for _, t := range inSyncing { + writer.Println("SyncTarget:", strconv.Itoa(count)) + writer.Println("\tBase:", t.Base.Height(), t.Base.Key().String()) + writer.Println("\tTarget:", t.Head.Height(), t.Head.Key().String()) + writer.Println("\tCurrent:", t.Current.Height(), t.Current.Key().String()) + + HeightDiff := t.Head.Height() - t.Current.Height() + writer.Println("\tHeightDiff:", HeightDiff) + + writer.Println("\tStatus:", t.State.String()) + writer.Println("\tErr:", t.Err) + writer.Println() + count++ + } + } + + if len(waitTarget) > 0 { + writer.Println("Waiting:") + for _, t := range waitTarget { + writer.Println("SyncTarget:", strconv.Itoa(count)) + writer.Println("\tBase:", t.Base.Height(), t.Base.Key().String()) + writer.Println("\tTarget:", t.Head.Height(), t.Head.Key().String()) + writer.Println("\tCurrent:", t.Current.Height(), t.Current.Key().String()) + + HeightDiff := t.Head.Height() - t.Current.Height() + writer.Println("\tHeightDiff:", HeightDiff) + + writer.Println("\tStatus:", t.State.String()) + writer.Println("\tErr:", t.Err) + writer.Println() + count++ + } + } + + return re.Emit(w) + }, +} + +var historyCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show history of chain sync.", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + tracker := env.(*node.Env).SyncerAPI.SyncerTracker(req.Context) + w := bytes.NewBufferString("") + writer := NewSilentWriter(w) + + writer.Println("History:") + history := tracker.History + count := 1 + for _, t := range history { + writer.Println("SyncTarget:", strconv.Itoa(count)) + writer.Println("\tBase:", t.Base.Height(), t.Base.Key().String()) + + writer.Println("\tTarget:", t.Head.Height(), t.Head.Key().String()) + + if t.Current != nil { + writer.Println("\tCurrent:", t.Current.Height(), t.Current.Key().String()) + } else { + writer.Println("\tCurrent:") + } + writer.Println("\tTime:", t.End.Sub(t.Start).Milliseconds()) + writer.Println("\tStatus:", t.State.String()) + writer.Println("\tErr:", t.Err) + writer.Println() + count++ + } + + return re.Emit(w) + }, +} diff --git a/cmd/tablewriter/tablewriter.go b/cmd/tablewriter/tablewriter.go new file mode 100755 index 0000000000..a84d8ddb60 --- /dev/null +++ b/cmd/tablewriter/tablewriter.go @@ -0,0 +1,141 @@ +package tablewriter + +import ( + "fmt" + "io" + "strings" + "unicode/utf8" + + "github.com/acarl005/stripansi" +) + +type Column struct { + Name string + SeparateLine bool + Lines int +} + +type TableWriter struct { + cols []Column + rows []map[int]string +} + +func Col(name string) Column { + return Column{ + Name: name, + SeparateLine: false, + } +} + +func NewLineCol(name string) Column { + return Column{ + Name: name, + SeparateLine: true, + } +} + +// Unlike text/tabwriter, this works with CLI escape codes, and allows for info +// +// in separate lines +func New(cols ...Column) *TableWriter { + return &TableWriter{ + cols: cols, + } +} + +func (w *TableWriter) Write(r map[string]interface{}) { + // this can cause columns to be out of order, but will at least work + byColID := map[int]string{} + +cloop: + for col, val := range r { + for i, column := range w.cols { + if column.Name == col { + byColID[i] = fmt.Sprint(val) + w.cols[i].Lines++ + continue cloop + } + } + + byColID[len(w.cols)] = fmt.Sprint(val) + w.cols = append(w.cols, Column{ + Name: col, + SeparateLine: false, + Lines: 1, + }) + } + + w.rows = append(w.rows, byColID) +} + +func (w *TableWriter) Flush(out io.Writer) error { + colLengths := make([]int, len(w.cols)) + + header := map[int]string{} + for i, col := range w.cols { + if col.SeparateLine { + continue + } + header[i] = col.Name + } + + w.rows = append([]map[int]string{header}, w.rows...) + + for col, c := range w.cols { + if c.Lines == 0 { + continue + } + + for _, row := range w.rows { + val, found := row[col] + if !found { + continue + } + + if cliStringLength(val) > colLengths[col] { + colLengths[col] = cliStringLength(val) + } + } + } + + for _, row := range w.rows { + cols := make([]string, len(w.cols)) + + for ci, col := range w.cols { + if col.Lines == 0 { + continue + } + + e := row[ci] + pad := colLengths[ci] - cliStringLength(e) + 2 + if !col.SeparateLine && col.Lines > 0 { + e = e + strings.Repeat(" ", pad) + if _, err := fmt.Fprint(out, e); err != nil { + return err + } + } + + cols[ci] = e + } + + if _, err := fmt.Fprintln(out); err != nil { + return err + } + + for ci, col := range w.cols { + if !col.SeparateLine || len(cols[ci]) == 0 { + continue + } + + if _, err := fmt.Fprintf(out, " %s: %s\n", col.Name, cols[ci]); err != nil { + return err + } + } + } + + return nil +} + +func cliStringLength(s string) (n int) { + return utf8.RuneCountInString(stripansi.Strip(s)) +} diff --git a/cmd/tablewriter/tablewriter_test.go b/cmd/tablewriter/tablewriter_test.go new file mode 100755 index 0000000000..c6bf7be17f --- /dev/null +++ b/cmd/tablewriter/tablewriter_test.go @@ -0,0 +1,37 @@ +package tablewriter + +import ( + "os" + "testing" + + "github.com/fatih/color" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestTableWriter(t *testing.T) { + tf.UnitTest(t) + tw := New(Col("C1"), Col("X"), Col("C333"), NewLineCol("Thing")) + tw.Write(map[string]interface{}{ + "C1": "234", + "C333": "ou", + }) + tw.Write(map[string]interface{}{ + "C1": "23uieui4", + "C333": "ou", + "X": color.GreenString("#"), + "Thing": "a very long thing, annoyingly so", + }) + tw.Write(map[string]interface{}{ + "C1": "ttttttttt", + "C333": "eui", + }) + tw.Write(map[string]interface{}{ + "C1": "1", + "C333": "2", + "SurpriseColumn": "42", + }) + if err := tw.Flush(os.Stdout); err != nil { + t.Fatal(err) + } +} diff --git a/cmd/go-filecoin/util_test.go b/cmd/util_test.go similarity index 88% rename from cmd/go-filecoin/util_test.go rename to cmd/util_test.go index ce7eac1b0b..e9b293d0ef 100644 --- a/cmd/go-filecoin/util_test.go +++ b/cmd/util_test.go @@ -1,4 +1,4 @@ -package commands +package cmd import ( "testing" @@ -8,14 +8,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" ) func TestOptionalAddr(t *testing.T) { tf.UnitTest(t) t.Run("when option is specified", func(t *testing.T) { - opts := make(cmdkit.OptMap) specifiedAddr, err := address.NewSecp256k1Address([]byte("a new test address")) @@ -28,7 +27,6 @@ func TestOptionalAddr(t *testing.T) { }) t.Run("when no option specified return empty", func(t *testing.T) { - opts := make(cmdkit.OptMap) addr, err := optionalAddr(opts["from"]) diff --git a/cmd/utils.go b/cmd/utils.go new file mode 100644 index 0000000000..041149b799 --- /dev/null +++ b/cmd/utils.go @@ -0,0 +1,197 @@ +package cmd + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/signal" + "syscall" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/hako/durafmt" + "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// SilentWriter writes to a stream, stopping after the first error and discarding output until +// the error is cleared. +// No printing methods return an error (to avoid warnings about ignoring it), but they do return +// a boolean indicating whether an error is waiting to be cleared. +// Example usage: +// +// sw := NewSilentWriter(w) +// sw.Println("A line") +// sw.Println("Another line") +// return sw.Error() +type SilentWriter struct { + w io.Writer + err error +} + +// NewSilentWriter returns a new writer backed by `w`. +func NewSilentWriter(w io.Writer) *SilentWriter { + return &SilentWriter{w: w} +} + +// Error returns any error encountered while writing. +func (sw *SilentWriter) Error() error { + return sw.err +} + +// ClearError clears and returns any error encountered while writing. +// Subsequent writes will attempt to write to the underlying writer again. +func (sw *SilentWriter) ClearError() error { + err := sw.err + sw.err = nil + return err +} + +// Write writes with io.Writer.Write and returns true if there was no error. +func (sw *SilentWriter) Write(p []byte) bool { + if sw.err == nil { + _, sw.err = sw.w.Write(p) + } + return sw.err == nil +} + +// WriteString writes with io.WriteString and returns true if there was no error. +func (sw *SilentWriter) WriteString(str string) bool { + if sw.err == nil { + _, sw.err = io.WriteString(sw.w, str) + } + return sw.err == nil +} + +// WriteString writes with io.WriteString and returns true if there was no error. +func (sw *SilentWriter) WriteStringln(str string) bool { + if sw.err == nil { + _, sw.err = io.WriteString(sw.w, str+"\n") + } + return sw.err == nil +} + +// Print writes with fmt.Fprint and returns true if there was no error. +func (sw *SilentWriter) Print(a ...interface{}) bool { + if sw.err == nil { + _, sw.err = fmt.Fprint(sw.w, a...) + } + return sw.err == nil +} + +// Println writes with fmt.Fprintln and returns true if there was no error. +func (sw *SilentWriter) Println(a ...interface{}) bool { + if sw.err == nil { + _, sw.err = fmt.Fprintln(sw.w, a...) + } + return sw.err == nil +} + +// Printf writes with fmt.Fprintf and returns true if there was no error. +func (sw *SilentWriter) Printf(format string, a ...interface{}) bool { + if sw.err == nil { + _, sw.err = fmt.Fprintf(sw.w, format, a...) + } + return sw.err == nil +} + +// PrintString prints a given Stringer to the writer. +func PrintString(w io.Writer, s fmt.Stringer) error { + _, err := fmt.Fprintln(w, s.String()) + return err +} + +func optionalAddr(o interface{}) (ret address.Address, err error) { + if o != nil { + ret, err = address.NewFromString(o.(string)) + if err != nil { + err = errors.Wrap(err, "invalid from address") + } + } + return +} + +func fromAddrOrDefault(req *cmds.Request, env cmds.Environment) (address.Address, error) { + addr, err := optionalAddr(req.Options["from"]) + if err != nil { + return address.Undef, err + } + if addr.Empty() { + return env.(*node.Env).WalletAPI.WalletDefaultAddress(req.Context) + } + return addr, nil +} + +func cidsFromSlice(args []string) ([]cid.Cid, error) { + out := make([]cid.Cid, len(args)) + for i, arg := range args { + c, err := cid.Decode(arg) + if err != nil { + return nil, err + } + out[i] = c + } + return out, nil +} + +func EpochTime(curr, e abi.ChainEpoch, blockDelay uint64) string { + switch { + case curr > e: + return fmt.Sprintf("%d (%s ago)", e, durafmt.Parse(time.Second*time.Duration(int64(blockDelay)*int64(curr-e))).LimitFirstN(2)) + case curr == e: + return fmt.Sprintf("%d (now)", e) + case curr < e: + return fmt.Sprintf("%d (in %s)", e, durafmt.Parse(time.Second*time.Duration(int64(blockDelay)*int64(e-curr))).LimitFirstN(2)) + } + + panic("math broke") +} + +func printOneString(re cmds.ResponseEmitter, str string) error { + buf := new(bytes.Buffer) + writer := NewSilentWriter(buf) + writer.Println(str) + + return re.Emit(buf) +} + +func ReqContext(cctx context.Context) context.Context { + var ( + ctx context.Context + done context.CancelFunc + ) + if cctx != nil { + ctx = cctx + } else { + ctx = context.Background() + } + ctx, done = context.WithCancel(ctx) + sigChan := make(chan os.Signal, 2) + go func() { + <-sigChan + done() + }() + signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) + return ctx +} + +func isController(mi types.MinerInfo, addr address.Address) bool { + if addr == mi.Owner || addr == mi.Worker { + return true + } + + for _, ca := range mi.ControlAddresses { + if addr == ca { + return true + } + } + + return false +} diff --git a/cmd/version.go b/cmd/version.go new file mode 100644 index 0000000000..5e7c6366a7 --- /dev/null +++ b/cmd/version.go @@ -0,0 +1,23 @@ +package cmd + +import ( + "github.com/filecoin-project/venus/pkg/constants" + cmds "github.com/ipfs/go-ipfs-cmds" +) + +type versionInfo struct { + // Commit, is the git sha that was used to build this version of venus. + Version string +} + +var versionCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Show venus version information", + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + return re.Emit(&versionInfo{ + Version: constants.UserVersion(), + }) + }, + Type: versionInfo{}, +} diff --git a/cmd/version_daemon_test.go b/cmd/version_daemon_test.go new file mode 100644 index 0000000000..0a1884d9cd --- /dev/null +++ b/cmd/version_daemon_test.go @@ -0,0 +1,71 @@ +package cmd_test + +import ( + "fmt" + "io" + "net/http" + "os/exec" + "strings" + "testing" + + "github.com/filecoin-project/venus/pkg/constants" + + th "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + manet "github.com/multiformats/go-multiaddr/net" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestVersion(t *testing.T) { + tf.IntegrationTest(t) + + commit := getCodeCommit(t) + verOut, err := exec.Command(th.MustGetFilecoinBinary(), "version").Output() + require.NoError(t, err) + + version := string(verOut) + assert.Contains(t, version, fmt.Sprintf("%s+git.%s", constants.BuildVersion, commit[0:7])) +} + +func TestVersionOverHttp(t *testing.T) { + tf.IntegrationTest(t) + + td := th.NewDaemon(t).Start() + defer td.ShutdownSuccess() + + maddr, err := td.CmdAddr() + require.NoError(t, err) + + _, host, err := manet.DialArgs(maddr) // nolint + require.NoError(t, err) + + url := fmt.Sprintf("http://%s/api/version", host) + req, err := http.NewRequest("POST", url, nil) + require.NoError(t, err) + + token, _ := td.CmdToken() + req.Header.Add("Authorization", "Bearer "+token) + + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusOK, res.StatusCode) + + commit := getCodeCommit(t)[0:7] + + defer res.Body.Close() // nolint: errcheck + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + require.Contains(t, string(body), constants.BuildVersion+"+git."+commit) +} + +func getCodeCommit(t *testing.T) string { + var gitOut []byte + var err error + gitArgs := []string{"rev-parse", "--verify", "HEAD"} + if gitOut, err = exec.Command("git", gitArgs...).Output(); err != nil { + assert.NoError(t, err) + } + return strings.TrimSpace(string(gitOut)) +} diff --git a/designdocs.md b/designdocs.md index b1ad6a9197..7e0592bb97 100644 --- a/designdocs.md +++ b/designdocs.md @@ -2,20 +2,20 @@ ## Goal -Articulate the reasons why we write designdocs and propose that the definition of a plan is that it is written down. +Listed the reasons why we write designdocs and propose that the importance of designdocs. ## Problem Statement -If we don't capture, on paper, in the open, the design intent behind important parts of Filecoin we experience at least the following negative consequences: +If we don't write it down and shared it publicly, the purpose of a specific design within the Filecoin network would have the following negative consequences: - - **Wasted time**: when we don't take the time to formalize designs we often end up building the wrong thing or supporting a bunch of stuff that we don't want. We also waste time at a lower level, working within systems that are painful to use because we didn't take the time to fully think through how they should work up-front. - - **Missed opportunities to engage the community**: when we don't write designs we miss benefitting from the many points of view and specialized knowledge in the community. We also miss the opportunity to come together *as* a community. Engaging on design proposals brings us together. It’s exciting and energizing. - - **Inability for the community to engage.** This is less obvious flip side of the above. Designs have to exist on paper so that they are accessible to current and future collaborators. It can be frustrating to build, change, or work with something you don't understand the design of. You spend time trying to reverse engineer design intent instead of doing the thing you want to. The worst part is that unless there is a design on paper then nobody can run with or extend it. + - **Wasted time**: when we don't take the time to formalize designs we often end up building the wrong thing or supporting a bunch of stuff that we don't want. We would also waste time with minor issues, working within systems that are painful to use because we didn't take the time to fully thinking through how they should work up-front. + - **Missed opportunities to engage the community**: when we don't write designdocs we miss benefitting from the many points of view and specialized knowledge in the community. We also miss the opportunity to come together *as* a community. Engaging on design proposals brings us together. It’s exciting and energizing. + - **Inability for the community to engage.** This is less obvious flip side of the above. Designs have to exist on paper so that they are accessible to current and future collaborators. It can be frustrating to build, change, or work with something you don't understand the design of. You spend time trying to reverse engineering intent instead of doing the thing you want to. ## Proposal 1. Write designdocs for important components and features, not just ahead of time but in circumstances where there is uncertainty or confusion. -1. Adopt the attitude that there is no plan (design) unless it is written down and circulated in the open. The norm is: __show me the plan or it doesn’t exist__. +1. Adopt the attitude that there is no plan (design) unless it is written down and shared with the community. The spec is: __show me the plan or it doesn’t exist__. ## What is a designdoc? @@ -35,25 +35,25 @@ It is important to understand that the goal of a design doc is not to defend a p Capturing design intent on paper, in the open, in a designdoc: - **Subjects it to thoughtful criticism**. -- **Clearly articulates what is and is not important**. +- **Clearly articulates what is/not important**. - **Synchronizes the community on important questions**. - **Disseminates information in a scalable fashion**. -## What is a designdoc not? +## What is not a designdoc? A designdoc is not a spec: you use a designdoc to rationalize a plan ahead of implementation and it should have less detail and more discussion than a spec, and it should capture far more of the *why* than a spec would. A designdoc is not a github issue, though it may be captured in one: a designdoc typically ties together a number of open questions or problems into a coherent whole. A designdoc *does not* have to: -- be kept up to date. It's a tool for capturing knowledge, formalizing our thinking, and synchronizing the team. Once it has served those purposes we don't necessarily need to tend to it. -- be long. In fact it should not be long. Several pages, by six or eight it starts to feel bloated. -- solve every problem. It should contain sufficient detail to convince readers that it contains a good idea, but not so much as to obscure the big picture or risk bike shedding on details that can fall out later. +- be kept up to date. It's a way for capturing knowledge, formalizing our thinking, and synchronizing the team. Once it has served those purposes we don't necessarily need to tend to it. +- be long. In fact it should not be long. The importance here is always have a clearly description. +- solve all issues. It should contain sufficient detail to convince readers that it contains a good idea, but do not depend on a designdoc to solve all kinds of issues. ## Alternatives ##### Something like BIP or EIP. [Protocol-level changes](https://github.com/filecoin-project/specs/blob/master/process.md) go through a process -like that. Possibly we should adopt something similar for go-filecoin. But for now, we have designdocs. +like that. Possibly we should adopt something similar for venus. But for now, we have designdocs. ##### Use specifications. @@ -63,7 +63,7 @@ Specs do something different than designdocs—they enable interop. Compare the Specs are a contract between implementations and users. And like all contracts, they are dense and detail-oriented. This makes them a difficult way to understand the big picture of a design. It is tempting to use specs to serve both this contractual use case and also the proposal-evaluating use case we're talking about here. You could think of this doc as a proposal to use designdocs for the proposal-evaluating use case, as they seem to be a more appropriate tool for that task. -Obviously, we need specs too. But they often come later in the lifecycle, and are less useful for articulating system-level design than a designdoc. +Obviously, we need specs too. But specs often arrive late, and are less useful for articulating system-level design than a designdoc. ##### Github issues. @@ -81,7 +81,7 @@ A designdoc is a tool so like any apply it judiciously. If you’re building som With probability 1 it will be wrong. The point is to explicitly communicate what we are and are not trying to accomplish and to rationalize a proposal. It’s a planning tool and as soon as it is finished reality will start to diverge. But as they say: plans are worthless but planning is indispensable. ##### What if I’m not sure what the solution to this problem should be? -That’s OK. We are never sure. Try really hard to take a position. Nobody is going to hold it against you if we all discover together that the position was wrong. The hardest part of design is understanding the problem well enough to propose any solution. It facilitates collaboration and progress to write down a clear description of a problem and one hypothesized best solution. +That’s ok, keep working hard. Nobody is going to hold it against you if we find it wrong in the end. The hardest part of a design is be clear with the problem well enough to propose any solution. It facilitates collaboration and progress to write down a clear description of a problem and with a proposed solution. ##### I don’t like / want to write design docs. I want to write code. -OK, but you’re just limiting yourself. Getting good at writing designs enables you to increase your leverage to accomplish bigger, better things. You’re also giving up influence over outcomes if you don’t get good at articulating your ideas. But if you really don’t want to, OK, find someone to work with who does, and collaborate. Or do the prototyping, dump state, and hand off. +OK, but you’re just limiting yourself. Getting good at writing designdocs enables you to increase your leverage to accomplish bigger, better projects. You’re also giving up the practice about articulating your ideas. But if you really don’t want to, OK, find someone to work with who does, and collaborate. diff --git a/documentation/en/venus-market-design-roadmap.md b/documentation/en/venus-market-design-roadmap.md new file mode 100644 index 0000000000..402597d5c1 --- /dev/null +++ b/documentation/en/venus-market-design-roadmap.md @@ -0,0 +1,113 @@ +## venus-market module design & roadmap + +by Venus team + +Sep 2021 + +## Background + +As the rebranding of filecoin terminology spearheaded by [FIP0018](https://github.com/filecoin-project/FIPs/blob/master/_fips/fip-0018.md) settled, consensus has been reached across communities (developers, providers, ecosystem partners and etc) to push for taking on more storage deals to improve the public perception on the fact that most of the network storages are still commited capacities (CCs). Given the above sentiment, design and implemenation of venus-market module has been put into the spot light. A clear long-term roadmap is due for Venus community to dissus and iterate on, also as a means for better communications with filecoin eocsystem in general. + +While Lotus is leading the way of implementing a full-fledged market module according to the [spec](https://spec.filecoin.io/#section-systems.filecoin_markets), Venus has been making much efforts to catch up and closing the gap in regard to [markets](https://github.com/filecoin-project/venus/discussions/4532). Right now, Lotus supports a 1-to-1 (client-to-provider) storage and retrieval model where burdens of discoveries are mostly on storage clients and match making services like Estuary. Negotiation is a fairly mannual process and does not support much flexibility. As venus-team is picking up the reminiscences of the [Filecoin Component Architecture](https://docs.google.com/document/d/1ukPD8j6plLEbbzUjxfo7eCauIrOeC_tqxqYK_ls9xbc/edit#), emergent ways of how market could facilitate the dynamics between storage providers and storage clients are constatntly being intergrated into the long-term vision of Venus filecoin. + +## Goals + +Current roadmap for venus-market are loosely broken into the following phases. + +### Phase 1: peer-to-peer model (short-term) + +For phase 1, venus-market will deliver a complete deal making experience as what lotus offers. This includes compatibility with lotus client where one can make deal with venus-market using lotus client, retrieve deal/data in the same way as lotus retrieves its data, setup storage ask and etc. + +![image-20210910170740850](https://i.loli.net/2021/09/10/seIgEWBiko6AKc2.png) + +- Implementation of the one-to-one model of lotus market like module and fully interoperable with lotus implementation, which means compatibility with lotus client and more +- venus-market deployed as independent module, like venus-sealer and venus-wallet +- Implementation of a reliable market module that runs a seperate process from the main storage process +- A clear module boundary that allows interoperability and user customizations +- Flexibilities of market module to interact with existing venus infrastructures using RPC APIs +- Supports for mainnet, calibration and Nerpa +- Lightweight client: compatibility with Lotus and support for venus-market unique features including client running seperately as a process and remove dependencies for node; great for bootstraping tests on deal making process + +### Phase 2: platform model + +For phase 2, venus-market is taking the following approach. + +**platform-to-peer**: venus-market as deal making backend for middle-man services like Estuary connecting client and provider. As deal market matures, instead of ineffectively advertising one's storage system in #fil-deal-market, storage middleman services like Estuary and Filswan are taking up the roles for distributing datacap more effectively to storage providers looking for deals. Given venus' unique architecture where multiple providers are sharing same infrastructure (chain services), venus-market is in a good position to provide before mentioned deal making backend for a storage middle-man service. + +**platform-to-platform**: venus-market as storage backend for a storage integrator (a storage provider who offers different kinds of storage products to its end user, for example, filecoin, S3, tape and etc). + +![image-20210910160837732](https://i.loli.net/2021/09/10/sRY5u6Bw9aj713H.png) + +- Taking advantages of Venus' distributed architectural nature, a gateway service backend built on top of current infrastructure +- Compact API: seperation of node and venus-market data enabling local storage of some of the deal related meta data +- Data transfer support for different protocols in addition to `Graphsync` [*](https://docs.google.com/document/d/1XWcTp2MEOVtKLpcpiFeeDvc_gTwQ0Bc6yABCTzDmeP0/edit#heading=h.1oxn84bcd1n1) +- Meta data stored locally in HA database like mySQL by venus-market +- venus-market as deal gateway for storage providers using venus chain services (venus shared modules) +- Deal match making: multiple copies for store and faster retrieval + +### Phase 3: Decentrialized market (Dp2p) model (long-term vision) + +For phase 3, venus-market will look into ways to automate deal flow between client and provider using a peer-to-peer approach, giving up its role as a gateway in phase 2. Additionally, venus pool can be positioned as a retrieval node which is fully aware of deal meta that chain services helped to record. + +![image-20210910171104881](https://i.loli.net/2021/09/10/VE6BLpaARrMck9x.png) + +- Goals for phase 3 is not as clear cut; require more iterations as filecoin develops smart contracts and others +- auto-match deal market: a service to provide algorithmically (as opposed to manually verifying data using current fil-plus framework) verified data storing/retrieval from peer to peer +- venus-market as gateway for IPFS: options for paid IPFS node +- In time, a platform model as an easy and quick way for matchmaking might fall out of favour and a faster layer 2 protocol could be built on top of Venus to make true p2p data storage with standardized storage services governed by blockchain +- New econ market on layer 2 + +## Design ideas + +Design draws inspirations from the original [filecoin component document](https://docs.google.com/document/d/1ukPD8j6plLEbbzUjxfo7eCauIrOeC_tqxqYK_ls9xbc/edit#) and [filecoin storage market module](https://docs.google.com/document/d/1FfMUpW8vanR9FrXsybxBBbba7DzeyuCIN2uAXgE7J8U/edit#heading=h.uq51khvyisgr). + +### Terminology + +- module and component: "A **module** means a code library that may be compiled and linked against. A **component** means a distinct process presenting or consuming RPC API." In this document, the distinction is not as clear. Will need revamping of the Venus documentation to redefine all terms. +- **GraphSync**: "The default underlying transfer protocol used by the Scheduler. The full graphsync specification can be found at [here](https://github.com/ipld/specs/blob/master/block-layer/graphsync/graphsync.md)." + +### Modules and processes + +> In a multi-process architecture, the storage component would form the miner operator’s entry point for all mining and market operations (but not basic node operations) pertaining to a single storage miner actor. It depends on a node to mediate blockchain interactions. **The storage component drives these interactions**. If viewed as a system of services, the storage component is the consumer of a service provided by a node. Thus, the **storage component will depend on an RPC API be provided by a node**. This API is likely to include streaming operations in order to provide continually changing blockchain state to the component. The [mimblewimble/grin project](https://github.com/mimblewimble) is another example of this multi-process node/miner architecture. + +Similar to what is described for storage component above, venus-market will be dependent on a RPC API provided by a node ie. chain services, venus shared modules. Blockchain interactions will be handled by venus chain services which can also be extended to handle authentications among others. + +### Deal flow + +In phase 1, louts market deal flow will be mirrored in Venus. Maintainace of the market and evolution with the network. + +In phase 2, proposing and accepting a deal will work as following. + +- [Provider] Add storage ask bidding policy along with other deal acceptance policy +- [Client] Query asks from venus-market with filters like geo locations, redundancy, deal lifespan and etc +- [venus-market] Aggregate requirements from both providers and clients, matchmaking on demand +- [venus-market] Provider(s) and client go through rounds of real-time biding to match-make +- Once matched, provider proceeds to store data as in the one-to-one model + +### Meta data + +Platform model implementation of venus-market may store metadata on the deals it distributes to providers under its wings. Like Airbnb, it may include metrics that a repututation system of both client and provider can be built upon. Metrics like storage success rate, retrieval success rate, fast retrieval enabled? and etc. + +### Dependencies + +1. `venus` module to provide node services +2. `venus-messager` module to provide data services +3. `venus-gateway` to provide signature services +4. `venus-sealer` to provide sealing and data lookup services +5. `go-fil-market` compatible with lotus one-to-one model (For compatbility with lotus only) +6. piece data from external deals +7. datastore using HA databases for deal meta data + +*Note that `go-fil-market` included as dependencies is sololy for the use of compatibility with lotus. venus-market will be bundling other unique features along with compatibility with lotus.* + +![模块图](https://i.loli.net/2021/09/08/7UxfVujcNPmszyR.jpg) + +### Interactions + + + +## Risks + +- Community's concerns on [infrastructure failure](https://filecoinproject.slack.com/archives/CEHHJNJS3/p1627872429033000?thread_ts=1627864468.030900&cid=CEHHJNJS3), node redundancy can be setup +- Data transfer complexities +- Some of the goals in later phases may be moving target as ecosystem evolve diff --git a/documentation/images/venus_logo_big2.jpg b/documentation/images/venus_logo_big2.jpg new file mode 100644 index 0000000000..8d8be38721 Binary files /dev/null and b/documentation/images/venus_logo_big2.jpg differ diff --git a/documentation/misc/release-issue-template.md b/documentation/misc/release-issue-template.md new file mode 100644 index 0000000000..6cb7ba5835 --- /dev/null +++ b/documentation/misc/release-issue-template.md @@ -0,0 +1,89 @@ +# Venus X.Y.Z Release + +## 🚢 预计发布时间 + + + +## 🤔 版本注意事项 + + + + + +## ✅ 常规检查项 + +### 准备: + + + + - [ ] 确保当前`master`功能与其他组件不冲突,不破坏兼容性 + - [ ] 从`master`拉出发布`release/vX.Y.Z`分支 + - [ ] 依照[发版规则](https://github.com/ipfs-force-community/dev-guidances/blob/master/%E9%A1%B9%E7%9B%AE%E7%AE%A1%E7%90%86/Venus/%E7%89%88%E6%9C%AC%E5%8F%91%E5%B8%83%E7%AE%A1%E7%90%86.md)递进`master`上的版本号,并更新发布分支中`version.go`的版本号 + - [ ] 发布分支中功能冻结;如有重大`bug`修复需要从`master`中并入分支,可以通过[backport](https://github.com/filecoin-project/lotus/pull/8847)的方式合并至`release/vX.Y.Z`分支 + + + +### 测试: + +- [ ] **阶段 0 - 自动化测试** + - 自动化测试 + - [ ] CI: 通过所有CI + +- [ ] **阶段 1 - 自测** + - 升级dev测试环境 + - [ ] 检查节点同步情况 + - 升级预生产环境 + - [ ] (可选)检查节点同步情况 + - 确认以下工作流 (如是Z版本,此项可选;如是X、Y版本,此项为必选) + - [ ] 封装一个扇区 + - [ ] 发一个存储订单 + - [ ] 提交一个PoSt + - [ ] 出块验证,出一个块 + - [ ] Snapdeal验证 + - [ ] (可选)让一个扇区变成faulty,观察是否恢复 +- [ ] **阶段 2 - 社区Beta测试** + - [ ] (可选)社区[Venus Master](https://filecoinproject.slack.com/archives/C03B30M20N7)测试 + - [ ] 新功能特性,配置变化等等的文档撰写 + +- [ ] **阶段 3 - 发版** + - [ ] 最终准备 + - [ ] 确认`version.go`已更新新版本号 + - [ ] 准备changelog + - [ ] `tag`版本(3选1) + - [ ] 正式版`vX.Y.Z` + - [ ] rc版`vX.Y.Z-rc[x]`,并标记为`pre-release` + - [ ] pre-rc版`vX.Y.Z-pre-rc[x]`,并标记为`pre-release` + - [ ] 版本发布至`releases` + - [ ] (可选)检查是否有`PR`单独提交至`release/vX.Y.Z`分支,并提交`Release back to master`的`PR` + - [ ] (可选)创建新版本的discussion讨论帖 + + + +### 发版后: + +- [ ] (可选)按需更新[release-issue-templat.md](https://github.com/filecoin-project/venus/blob/master/documentation/misc/release-issue-template.md)模版 +- [ ] (可选)使用[release-issue-templat.md](https://github.com/filecoin-project/venus/blob/master/documentation/misc/release-issue-templat.md)模版创建下一个发版issue diff --git a/documentation/zh/refactor_notes.md b/documentation/zh/refactor_notes.md new file mode 100644 index 0000000000..0a388efc1c --- /dev/null +++ b/documentation/zh/refactor_notes.md @@ -0,0 +1,13 @@ +# 重构日记 + +- [ ] 关于 venus-devtool 部分,也就是开发过程中需要用到的各类辅助工具放置位置:从现在来看,以一个单独的、内部的 module 模式比较好,可以避免在 venus/v2 中显式声明对新的 module 进行 replace,从而也就避免了出现依赖关系上的“难以厘清”。 + +- [ ] 在抽取 `chain.BlockHeader` 的过程中,出现了同样的序列化过程反复手写, `cachedBytes` 与 `cachedCid` 用上了又没完全用上的情况,这个应该是在处理历史遗留问题时追求“快速实现”的结果。同时,在写好实现之后立刻进行基本的测试编写帮助找出了 `SignatureData` 方法中,由于结构体复制后没有清除 cache 导致的bug。 + + 由此可见,测试用例,尤其是基础功能性的测试用例,应当在逻辑代码实现后立即着手编写,这样做一方面是可以尽快找出问题,避免系统复杂之后更为困难的 debug 过程;另一方面则是可以趁着记忆仍然“新鲜”,保障有一个较好的测试覆盖率。 + +- [ ] 在处理 `chain.BlockHeader` 中的 `cachedBytes` 和 `cachedCid` 字段时,出现了反复。最开始认为可以抽象出一类 `cborCache` 类型,专门进行 data 和 cid 的缓存。接着发现,在 `Message` 这类对象中如果使用这样一个缓存,很有可能出现修改了 Message 的属性之后得不到正确的序列化结果和 cid 的问题。 + + 因此,将 `chain.BlockHeader` 中的 cache 字段也做了移除处理。如果仔细去想,这里 cache 字段出现的意义到底是什么?我认为,这类对象提供了太多需要进行雷同的序列化过程的方法。如 `Cid` 这个方法,甚至是先序列化再计算数据的哈希值。这导致序列化被反复无序地使用,我想这才是加上 cache 字段的初衷。 + + 那么实际上,我们真正要做的是减少序列化被使用的位置,并尽可能让且有序,如不提供一个只返回 Cid 的方法,而同时返回序列化结果和 Cid diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi new file mode 160000 index 0000000000..20f104e880 --- /dev/null +++ b/extern/filecoin-ffi @@ -0,0 +1 @@ +Subproject commit 20f104e88065aae59fd212d64be5bed108604e78 diff --git a/extern/test-vectors b/extern/test-vectors new file mode 160000 index 0000000000..d9a75a7873 --- /dev/null +++ b/extern/test-vectors @@ -0,0 +1 @@ +Subproject commit d9a75a7873aee0db28b87e3970d2ea16a2f37c6a diff --git a/_assets/arch-diagram.monopic b/fixtures/assets/arch-diagram.monopic similarity index 100% rename from _assets/arch-diagram.monopic rename to fixtures/assets/arch-diagram.monopic diff --git a/fixtures/assets/asset.go b/fixtures/assets/asset.go new file mode 100644 index 0000000000..14ebc8d6e1 --- /dev/null +++ b/fixtures/assets/asset.go @@ -0,0 +1,42 @@ +package assets + +import ( + "embed" + "path/filepath" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +//go:embed genesis-car +var carFS embed.FS + +func GetGenesis(networkType types.NetworkType) ([]byte, error) { + fileName := "" + switch networkType { + case types.NetworkForce: + fileName = "forcenet.car" + case types.NetworkNerpa: + fileName = "nerpanet.car" + case types.NetworkInterop: + fileName = "interopnet.car" + case types.NetworkButterfly: + fileName = "butterflynet.car" + case types.NetworkCalibnet: + fileName = "calibnet.car" + default: + fileName = "mainnet.car" + } + + return carFS.ReadFile(filepath.Join("genesis-car", fileName)) +} + +//go:embed proof-params +var paramsFS embed.FS + +func GetProofParams() ([]byte, error) { + return paramsFS.ReadFile(filepath.Join("proof-params", "parameters.json")) +} + +func GetSrs() ([]byte, error) { + return paramsFS.ReadFile(filepath.Join("proof-params", "srs-inner-product.json")) +} diff --git a/fixtures/assets/genesis-car/butterflynet.car b/fixtures/assets/genesis-car/butterflynet.car new file mode 100644 index 0000000000..71a1c684e3 Binary files /dev/null and b/fixtures/assets/genesis-car/butterflynet.car differ diff --git a/fixtures/assets/genesis-car/calibnet.car b/fixtures/assets/genesis-car/calibnet.car new file mode 100644 index 0000000000..775cdf7907 Binary files /dev/null and b/fixtures/assets/genesis-car/calibnet.car differ diff --git a/fixtures/assets/genesis-car/forcenet.car b/fixtures/assets/genesis-car/forcenet.car new file mode 100644 index 0000000000..cc0eb71763 Binary files /dev/null and b/fixtures/assets/genesis-car/forcenet.car differ diff --git a/fixtures/assets/genesis-car/interopnet.car b/fixtures/assets/genesis-car/interopnet.car new file mode 100644 index 0000000000..2dadae61eb Binary files /dev/null and b/fixtures/assets/genesis-car/interopnet.car differ diff --git a/fixtures/assets/genesis-car/mainnet.car b/fixtures/assets/genesis-car/mainnet.car new file mode 100644 index 0000000000..f1b3f342a6 Binary files /dev/null and b/fixtures/assets/genesis-car/mainnet.car differ diff --git a/fixtures/assets/genesis-car/nerpanet.car b/fixtures/assets/genesis-car/nerpanet.car new file mode 100644 index 0000000000..c32e0171bc Binary files /dev/null and b/fixtures/assets/genesis-car/nerpanet.car differ diff --git a/fixtures/assets/proof-params/parameters.json b/fixtures/assets/proof-params/parameters.json new file mode 100644 index 0000000000..88bb0bfa34 --- /dev/null +++ b/fixtures/assets/proof-params/parameters.json @@ -0,0 +1,202 @@ +{ + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": { + "cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q", + "digest": "c3ad7bb549470b82ad52ed070aebb4f4", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": { + "cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv", + "digest": "994c5b7d450ca9da348c910689f2dc7f", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": { + "cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S", + "digest": "5aedd2cf3e5c0a15623d56a1b43110ad", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": { + "cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i", + "digest": "abd80269054d391a734febdac0d2e687", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": { + "cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9", + "digest": "311f92a3e75036ced01b1c0025f1fa0c", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": { + "cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P", + "digest": "eadad9784969890d30f2749708c79771", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": { + "cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS", + "digest": "1b3cfd761a961543f9eb273e435a06a2", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": { + "cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN", + "digest": "3a6941983754737fde880d29c7094905", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": { + "cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp", + "digest": "1a392e7b759fb18e036c7559b5ece816", + "sector_size": 68719476736 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": { + "cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg", + "digest": "80e366df2f1011953c2d01c7b7c9ee8e", + "sector_size": 68719476736 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { + "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", + "digest": "7610b9f82bfc88405b7a832b651ce2f6", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { + "cid": "QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X", + "digest": "0e0958009936b9d5e515ec97b8cb792d", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { + "cid": "QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR", + "digest": "1a7d4a9c8a502a497ed92a54366af33f", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { + "cid": "QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV", + "digest": "4dae975de4f011f101f5a2f86d1daaba", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { + "cid": "QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS", + "digest": "82c88066be968bb550a05e30ff6c2413", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { + "cid": "QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU", + "digest": "ffd79788d614d27919ae5bd2d94eacb6", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { + "cid": "QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP", + "digest": "700171ecf7334e3199437c930676af82", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { + "cid": "QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG", + "digest": "79ebb55f56fda427743e35053edad8fc", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { + "cid": "QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx", + "digest": "c49499bb76a0762884896f9683403f55", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { + "cid": "QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc", + "digest": "34d4feeacd9abf788d69ef1bb4d8fd00", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { + "cid": "QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT", + "digest": "827359440349fe8f5a016e7598993b79", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { + "cid": "QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN", + "digest": "bd2cd62f65c1ab84f19ca27e97b7c731", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { + "cid": "QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ", + "digest": "2cf49eb26f1fee94c85781a390ddb4c8", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { + "cid": "QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE", + "digest": "0f8ec542485568fa3468c066e9fed82b", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { + "cid": "Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i", + "digest": "d84f79a16fe40e9e25a36e2107bb1ba0", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { + "cid": "QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF", + "digest": "fc02943678dd119e69e7fab8420e8819", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { + "cid": "QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V", + "digest": "3810b7780ac0e299b22ae70f1f94c9bc", + "sector_size": 68719476736 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { + "cid": "QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7", + "digest": "59d2bf1857adc59a4f08fcf2afaa916b", + "sector_size": 68719476736 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { + "cid": "QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz", + "digest": "2170a91ad5bae22ea61f2ea766630322", + "sector_size": 68719476736 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { + "cid": "QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm", + "digest": "6d3789148fb6466d07ee1e24d6292fd6", + "sector_size": 68719476736 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { + "cid": "QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h", + "digest": "434fb1338ecfaf0f59256f30dde4968f", + "sector_size": 2048 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { + "cid": "QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr", + "digest": "dc1ade9929ade1708238f155343044ac", + "sector_size": 2048 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { + "cid": "QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC", + "digest": "6c77597eb91ab936c1cef4cf19eba1b3", + "sector_size": 536870912 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { + "cid": "QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH", + "digest": "065179da19fbe515507267677f02823e", + "sector_size": 536870912 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { + "cid": "QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH", + "digest": "09e612e4eeb7a0eb95679a88404f960c", + "sector_size": 8388608 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { + "cid": "QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99", + "digest": "b687beb9adbd9dabe265a7e3620813e4", + "sector_size": 8388608 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { + "cid": "QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ", + "digest": "6a388072a518cf46ebd661f5cc46900a", + "sector_size": 34359738368 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { + "cid": "Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb", + "digest": "0c7b4aac1c40fdb7eb82bc355b41addf", + "sector_size": 34359738368 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { + "cid": "QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX", + "digest": "1801f8a6e1b00bceb00cc27314bb5ce3", + "sector_size": 68719476736 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { + "cid": "QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN", + "digest": "a89884252c04c298d0b3c81bfd884164", + "sector_size": 68719476736 + } +} diff --git a/fixtures/assets/proof-params/srs-inner-product.json b/fixtures/assets/proof-params/srs-inner-product.json new file mode 100644 index 0000000000..8566bf5fd8 --- /dev/null +++ b/fixtures/assets/proof-params/srs-inner-product.json @@ -0,0 +1,7 @@ +{ + "v28-fil-inner-product-v1.srs": { + "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g", + "digest": "ae20310138f5ba81451d723f858e3797", + "sector_size": 0 + } +} diff --git a/fixtures/fortest/constants.go b/fixtures/fortest/constants.go index 726250f466..065c1accdc 100644 --- a/fixtures/fortest/constants.go +++ b/fixtures/fortest/constants.go @@ -3,30 +3,30 @@ package fortest import ( "encoding/json" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "sort" "github.com/filecoin-project/go-address" + th "github.com/filecoin-project/venus/pkg/testhelpers" cid "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-filecoin/build/project" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - gen "github.com/filecoin-project/go-filecoin/tools/gengen/util" + "github.com/filecoin-project/venus/pkg/crypto" + gen "github.com/filecoin-project/venus/tools/gengen/util" ) // The file used to build these addresses can be found in: -// $GOPATH/src/github.com/filecoin-project/go-filecoin/fixtures/setup.json +// $GOPATH/src/github.com/filecoin-project/venus/fixtures/setup.json // // If said file is modified these addresses will need to change as well // rebuild using // TODO: move to build script -// https://github.com/filecoin-project/go-filecoin/issues/921 +// https://github.com/filecoin-project/venus/issues/921 // cat ./fixtures/setup.json | ./tools/gengen/gengen --json --keypath fixtures > fixtures/genesis.car 2> fixtures/gen.json // // The fake cids used for commX in setup.json are generated using this tool: -// $GOPATH/src/github.com/filecoin-project/go-filecoin/tools/gengen/gencfg +// $GOPATH/src/github.com/filecoin-project/venus/tools/gengen/gencfg // TestAddresses is a list of pregenerated addresses. var TestAddresses []address.Address @@ -51,7 +51,7 @@ type detailsStruct struct { } func init() { - root := project.Root() + root := th.Root() genConfigPath := filepath.Join(root, "fixtures/setup.json") genConfigFile, err := os.Open(genConfigPath) @@ -63,7 +63,7 @@ func init() { panic(err) } }() - genConfigBytes, err := ioutil.ReadAll(genConfigFile) + genConfigBytes, err := io.ReadAll(genConfigFile) if err != nil { panic(err) } @@ -74,7 +74,7 @@ func init() { } func init() { - root := project.Root() + root := th.Root() detailspath := filepath.Join(root, "fixtures/test/gen.json") detailsFile, err := os.Open(detailspath) @@ -86,7 +86,7 @@ func init() { panic(err) } }() - detailsFileBytes, err := ioutil.ReadAll(detailsFile) + detailsFileBytes, err := io.ReadAll(detailsFile) if err != nil { panic(err) } @@ -119,7 +119,7 @@ func init() { // KeyFilePaths returns the paths to the wallets of the testaddresses func KeyFilePaths() []string { - root := project.Root() + root := th.Root() folder := filepath.Join(root, "fixtures/test") res := make([]string, len(testKeys)) diff --git a/fixtures/genesis-sectors/README.md b/fixtures/genesis-sectors/README.md deleted file mode 100644 index d06dde474a..0000000000 --- a/fixtures/genesis-sectors/README.md +++ /dev/null @@ -1,27 +0,0 @@ -## Generating genesis-sectors - -Genesis sectors are generated using the [Lotus](https://docs.lotu.sh/en+setup-local-dev-net) seed tool. -Follow the directions there to install lotus and then run: -``` -./lotus-seed pre-seal --sector-size 2048 --num-sectors 2 --miner-addr=t0106 -``` -The sector-size must be one of the sizes in use by the proof system. And the miner address should be the address -of the bootstrap miner to be created with gengen. By default in gengen this address starts at t0106 and the id address number is incremented for every additional miner. -This will create a `.genesis-sectors` directory in your home directory. - -### Configure setup.json for gengen - -The genesis setup.json file will need to be updated to match the data in the presealed directory. - -1. Unencode the preseal key using `xxd -r -p ~/.genesis-sectors/pre-seal-t0106.key` and copy the private key to the importKeys. -2. Use the information provided in `pre-seal-t0106.json` to update the sector size, commR, commD, sectorNum, commP, pieceSize, and endEpoch of the sectors in setup.json. - -### Initialize with presealed sectors - -To initialize a bootstrap miner using presealed sectors: -```bash -./go-filecoin init --genesisfile=genesis.car --wallet-keyfile=[miner key] --miner-actor-address=t0106 --presealed-sectordir=[preseal directory] -``` -Where `miner key` is the key file imported for the miner. This will be the keyfile whose name is the `minerOwner` specified in setup.json -(e.g. if setup.json has `keysToGen` set to 5, then the miner owner key file will be `fixtures/test/5.key).` -The presealed sector directory should be set to `[path to home directory]/.genesis-sectors` by default. \ No newline at end of file diff --git a/fixtures/genesis-sectors/badger/000000.vlog b/fixtures/genesis-sectors/badger/000000.vlog deleted file mode 100644 index a377411190..0000000000 Binary files a/fixtures/genesis-sectors/badger/000000.vlog and /dev/null differ diff --git a/fixtures/genesis-sectors/badger/000004.sst b/fixtures/genesis-sectors/badger/000004.sst deleted file mode 100644 index c81359c7ba..0000000000 Binary files a/fixtures/genesis-sectors/badger/000004.sst and /dev/null differ diff --git a/fixtures/genesis-sectors/badger/KEYREGISTRY b/fixtures/genesis-sectors/badger/KEYREGISTRY deleted file mode 100644 index a33c51867d..0000000000 --- a/fixtures/genesis-sectors/badger/KEYREGISTRY +++ /dev/null @@ -1 +0,0 @@ - J=WFHello Badger \ No newline at end of file diff --git a/fixtures/genesis-sectors/badger/MANIFEST b/fixtures/genesis-sectors/badger/MANIFEST deleted file mode 100644 index 35dfa8f217..0000000000 Binary files a/fixtures/genesis-sectors/badger/MANIFEST and /dev/null differ diff --git a/fixtures/genesis-sectors/cache/s-t0106-0/p_aux b/fixtures/genesis-sectors/cache/s-t0106-0/p_aux new file mode 100644 index 0000000000..22d45690a8 --- /dev/null +++ b/fixtures/genesis-sectors/cache/s-t0106-0/p_aux @@ -0,0 +1,2 @@ +5 +y ; #dRcҜz0t /lqmpbӫwK! \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-0/sc-02-data-tree-r-last.dat b/fixtures/genesis-sectors/cache/s-t0106-0/sc-02-data-tree-r-last.dat new file mode 100644 index 0000000000..22f3f0fc1a --- /dev/null +++ b/fixtures/genesis-sectors/cache/s-t0106-0/sc-02-data-tree-r-last.dat @@ -0,0 +1 @@ +lqmpbӫwK! \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-0/t_aux b/fixtures/genesis-sectors/cache/s-t0106-0/t_aux new file mode 100644 index 0000000000..d70c530b4e Binary files /dev/null and b/fixtures/genesis-sectors/cache/s-t0106-0/t_aux differ diff --git a/fixtures/genesis-sectors/cache/s-t0106-1/p_aux b/fixtures/genesis-sectors/cache/s-t0106-1/p_aux new file mode 100644 index 0000000000..e84e41204b --- /dev/null +++ b/fixtures/genesis-sectors/cache/s-t0106-1/p_aux @@ -0,0 +1 @@ +Ѫ7PKQ$z8PcGr8ZWl1fLFHES+0|Z{xI&%ց9 \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-1/sc-02-data-tree-r-last.dat b/fixtures/genesis-sectors/cache/s-t0106-1/sc-02-data-tree-r-last.dat new file mode 100644 index 0000000000..0f8e078693 --- /dev/null +++ b/fixtures/genesis-sectors/cache/s-t0106-1/sc-02-data-tree-r-last.dat @@ -0,0 +1 @@ +fLFHES+0|Z{xI&%ց9 \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-1/t_aux b/fixtures/genesis-sectors/cache/s-t0106-1/t_aux new file mode 100644 index 0000000000..c831762597 Binary files /dev/null and b/fixtures/genesis-sectors/cache/s-t0106-1/t_aux differ diff --git a/fixtures/genesis-sectors/cache/s-t0106-3/p_aux b/fixtures/genesis-sectors/cache/s-t0106-3/p_aux deleted file mode 100644 index 7d6fb5ad6c..0000000000 --- a/fixtures/genesis-sectors/cache/s-t0106-3/p_aux +++ /dev/null @@ -1,2 +0,0 @@ - 0Ȁc (3(_N8`YD˱2DքK -QA$-P \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-3/sc-02-data-tree-d.dat b/fixtures/genesis-sectors/cache/s-t0106-3/sc-02-data-tree-d.dat deleted file mode 100644 index 68c9e4bcf3..0000000000 Binary files a/fixtures/genesis-sectors/cache/s-t0106-3/sc-02-data-tree-d.dat and /dev/null differ diff --git a/fixtures/genesis-sectors/cache/s-t0106-3/sc-02-data-tree-r-last.dat b/fixtures/genesis-sectors/cache/s-t0106-3/sc-02-data-tree-r-last.dat deleted file mode 100644 index 5642960bbc..0000000000 --- a/fixtures/genesis-sectors/cache/s-t0106-3/sc-02-data-tree-r-last.dat +++ /dev/null @@ -1,2 +0,0 @@ -YD˱2DքK -QA$-P \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-3/t_aux b/fixtures/genesis-sectors/cache/s-t0106-3/t_aux deleted file mode 100644 index 4d1c4e94c6..0000000000 Binary files a/fixtures/genesis-sectors/cache/s-t0106-3/t_aux and /dev/null differ diff --git a/fixtures/genesis-sectors/cache/s-t0106-4/p_aux b/fixtures/genesis-sectors/cache/s-t0106-4/p_aux deleted file mode 100644 index e1cb959f5d..0000000000 --- a/fixtures/genesis-sectors/cache/s-t0106-4/p_aux +++ /dev/null @@ -1 +0,0 @@ -oI>>2 3)c㦺ESc2 =g!Wpy8쥣RAW_+ \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-4/sc-02-data-tree-d.dat b/fixtures/genesis-sectors/cache/s-t0106-4/sc-02-data-tree-d.dat deleted file mode 100644 index 1e23cbb0b4..0000000000 Binary files a/fixtures/genesis-sectors/cache/s-t0106-4/sc-02-data-tree-d.dat and /dev/null differ diff --git a/fixtures/genesis-sectors/cache/s-t0106-4/sc-02-data-tree-r-last.dat b/fixtures/genesis-sectors/cache/s-t0106-4/sc-02-data-tree-r-last.dat deleted file mode 100644 index 664f890539..0000000000 --- a/fixtures/genesis-sectors/cache/s-t0106-4/sc-02-data-tree-r-last.dat +++ /dev/null @@ -1 +0,0 @@ -g!Wpy8쥣RAW_+ \ No newline at end of file diff --git a/fixtures/genesis-sectors/cache/s-t0106-4/t_aux b/fixtures/genesis-sectors/cache/s-t0106-4/t_aux deleted file mode 100644 index e853ad25ec..0000000000 Binary files a/fixtures/genesis-sectors/cache/s-t0106-4/t_aux and /dev/null differ diff --git a/fixtures/genesis-sectors/pre-seal-t0106.json b/fixtures/genesis-sectors/pre-seal-t0106.json index a6bc74f649..0a24b9eb0c 100644 --- a/fixtures/genesis-sectors/pre-seal-t0106.json +++ b/fixtures/genesis-sectors/pre-seal-t0106.json @@ -1,56 +1,62 @@ { "t0106": { - "Owner": "t3qx4zrflimpjboxn3ivs6t4eel7frijxhcdvae6z6dtnocvz54yeop2kfdxggn7dfhl72cqqhsf6nf5fgkl3a", - "Worker": "t3qx4zrflimpjboxn3ivs6t4eel7frijxhcdvae6z6dtnocvz54yeop2kfdxggn7dfhl72cqqhsf6nf5fgkl3a", + "ID": "t0106", + "Owner": "t3s3g5ktor5jgqpwfhxufvpk5g7nfsoif5rhny33ibmxpkndalckm6gbknvyuaymfnc764s6mixu7z62xm7yza", + "Worker": "t3s3g5ktor5jgqpwfhxufvpk5g7nfsoif5rhny33ibmxpkndalckm6gbknvyuaymfnc764s6mixu7z62xm7yza", + "PeerId": "12D3KooWQrDvsWxU1dN5udhbJbxyNgj4wr4mFvrkjC9HJfhEFypj", "MarketBalance": "0", "PowerBalance": "0", "SectorSize": 2048, "Sectors": [ { "CommR": { - "/": "bafk4ehzaqugph7z7yajugw6wz6nn7x3akr3e32wecwexllfgqndnjt3nsaya" + "/": "bagboea4b5abcaziacbne3v3hh6bhgatc3dop72pnztj4xtvmppj4sq3jnieqoesx" }, "CommD": { - "/": "bafk4chzavlx2s3zsomp6beohyevpbql477qyr7qnvgkvfwbbcqzfijo62arq" + "/": "baga6ea4seaqc3dgscuewdd76uubtaki6usrdki7b4os7otho2downpjspp2ruii" }, - "SectorID": 3, + "SectorID": 0, "Deal": { "PieceCID": { - "/": "bafk4chzavlx2s3zsomp6beohyevpbql477qyr7qnvgkvfwbbcqzfijo62arq" + "/": "baga6ea4seaqc3dgscuewdd76uubtaki6usrdki7b4os7otho2downpjspp2ruii" }, "PieceSize": 2048, - "Client": "t3qx4zrflimpjboxn3ivs6t4eel7frijxhcdvae6z6dtnocvz54yeop2kfdxggn7dfhl72cqqhsf6nf5fgkl3a", + "VerifiedDeal": false, + "Client": "t3s3g5ktor5jgqpwfhxufvpk5g7nfsoif5rhny33ibmxpkndalckm6gbknvyuaymfnc764s6mixu7z62xm7yza", "Provider": "t0106", + "Label": "0", "StartEpoch": 0, "EndEpoch": 9001, "StoragePricePerEpoch": "0", "ProviderCollateral": "0", "ClientCollateral": "0" }, - "ProofType": 4 + "ProofType": 0 }, { "CommR": { - "/": "bafk4ehza24k4j256kulsu34e4szjvmsbytibrx7kjzajc6c6rlcn2jfosada" + "/": "bagboea4b5abcal4itszbnmkiad3iasaqrndnejqqooxiiidu5ntgkybviipf5lid" }, "CommD": { - "/": "bafk4chzaqfni3wpij32pzgv2vuf63utnufburrjzrcoud75oz3pxkfisligq" + "/": "baga6ea4seaqmxjiljjcnscjhp2rc45tfjx5itjhpszsnaims7rzsdo5zlafqspq" }, - "SectorID": 4, + "SectorID": 1, "Deal": { "PieceCID": { - "/": "bafk4chzaqfni3wpij32pzgv2vuf63utnufburrjzrcoud75oz3pxkfisligq" + "/": "baga6ea4seaqmxjiljjcnscjhp2rc45tfjx5itjhpszsnaims7rzsdo5zlafqspq" }, "PieceSize": 2048, - "Client": "t3qx4zrflimpjboxn3ivs6t4eel7frijxhcdvae6z6dtnocvz54yeop2kfdxggn7dfhl72cqqhsf6nf5fgkl3a", + "VerifiedDeal": false, + "Client": "t3s3g5ktor5jgqpwfhxufvpk5g7nfsoif5rhny33ibmxpkndalckm6gbknvyuaymfnc764s6mixu7z62xm7yza", "Provider": "t0106", + "Label": "1", "StartEpoch": 0, "EndEpoch": 9001, "StoragePricePerEpoch": "0", "ProviderCollateral": "0", "ClientCollateral": "0" }, - "ProofType": 4 + "ProofType": 0 } ] } diff --git a/fixtures/genesis-sectors/pre-seal-t0106.key b/fixtures/genesis-sectors/pre-seal-t0106.key index 921b8fa400..e001d3f563 100644 --- a/fixtures/genesis-sectors/pre-seal-t0106.key +++ b/fixtures/genesis-sectors/pre-seal-t0106.key @@ -1 +1 @@ -7b2254797065223a22626c73222c22507269766174654b6579223a222f38556f58767a47495532482f546754416d73777839366d32645643586b52716e49496c44684a7247436f3d227d \ No newline at end of file +7b2254797065223a22626c73222c22507269766174654b6579223a224536552b4f6d6f2b61746a73634e4f306f524a765a66646d4b336c466c39776f305963574456686e3654593d227d \ No newline at end of file diff --git a/fixtures/genesis-sectors/sealed/s-t0106-0 b/fixtures/genesis-sectors/sealed/s-t0106-0 new file mode 100644 index 0000000000..ed9ff3793c Binary files /dev/null and b/fixtures/genesis-sectors/sealed/s-t0106-0 differ diff --git a/fixtures/genesis-sectors/sealed/s-t0106-1 b/fixtures/genesis-sectors/sealed/s-t0106-1 new file mode 100644 index 0000000000..ac8ae86b1f Binary files /dev/null and b/fixtures/genesis-sectors/sealed/s-t0106-1 differ diff --git a/fixtures/genesis-sectors/sealed/s-t0106-3 b/fixtures/genesis-sectors/sealed/s-t0106-3 deleted file mode 100644 index 9b8d9bdaeb..0000000000 Binary files a/fixtures/genesis-sectors/sealed/s-t0106-3 and /dev/null differ diff --git a/fixtures/genesis-sectors/sealed/s-t0106-4 b/fixtures/genesis-sectors/sealed/s-t0106-4 deleted file mode 100644 index 5c3760ae76..0000000000 Binary files a/fixtures/genesis-sectors/sealed/s-t0106-4 and /dev/null differ diff --git a/fixtures/genesis-sectors/sectorstore.json b/fixtures/genesis-sectors/sectorstore.json old mode 100755 new mode 100644 index 46e3e33343..415dd92278 --- a/fixtures/genesis-sectors/sectorstore.json +++ b/fixtures/genesis-sectors/sectorstore.json @@ -1,5 +1,5 @@ { - "ID": "0baa0d34-115b-48c8-9f9e-4c03fc1e2896", + "ID": "25baacb2-e4eb-42a1-8e66-587910bcced5", "Weight": 0, "CanSeal": false, "CanStore": false diff --git a/fixtures/genesis-sectors/staging/s-t0106-3 b/fixtures/genesis-sectors/staging/s-t0106-3 deleted file mode 100644 index 412c078dfc..0000000000 Binary files a/fixtures/genesis-sectors/staging/s-t0106-3 and /dev/null differ diff --git a/fixtures/genesis-sectors/staging/s-t0106-4 b/fixtures/genesis-sectors/staging/s-t0106-4 deleted file mode 100644 index 162019d0c3..0000000000 Binary files a/fixtures/genesis-sectors/staging/s-t0106-4 and /dev/null differ diff --git a/fixtures/networks/butterfly.go b/fixtures/networks/butterfly.go new file mode 100644 index 0000000000..c9ee32ab22 --- /dev/null +++ b/fixtures/networks/butterfly.go @@ -0,0 +1,62 @@ +package networks + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func ButterflySnapNet() *NetworkConf { + return &NetworkConf{ + Bootstrap: config.BootstrapConfig{ + Addresses: []string{ + "/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKeDMuJbouvypr1nL2qRruhNVXzv4QiLsZRh6gnvLkc7p", + "/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWSsACNHLGoJbPqeitNY7tom19Nxq8x5ag36eTwmgcAeLo", + }, + MinPeerThreshold: 0, + Period: "30s", + }, + Network: config.NetworkParamsConfig{ + DevNet: true, + NetworkType: types.NetworkButterfly, + GenesisNetworkVersion: network.Version16, + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg512MiBV1, + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + }, + BlockDelay: 30, + ConsensusMinerMinPower: 2 << 30, + MinVerifiedDealSize: 1 << 20, + PreCommitChallengeDelay: abi.ChainEpoch(150), + ForkUpgradeParam: &config.ForkUpgradeConfig{ + BreezeGasTampingDuration: 120, + UpgradeBreezeHeight: -1, + UpgradeSmokeHeight: -2, + UpgradeIgnitionHeight: -3, + UpgradeRefuelHeight: -4, + UpgradeAssemblyHeight: -5, + UpgradeTapeHeight: -6, + UpgradeLiftoffHeight: -7, + UpgradeKumquatHeight: -8, + UpgradeCalicoHeight: -9, + UpgradePersianHeight: -10, + UpgradeOrangeHeight: -11, + UpgradeClausHeight: -12, + UpgradeTrustHeight: -13, + UpgradeNorwegianHeight: -14, + UpgradeTurboHeight: -15, + UpgradeHyperdriveHeight: -16, + UpgradeChocolateHeight: -17, + UpgradeOhSnapHeight: -18, + UpgradeSkyrHeight: -19, + UpgradeSharkHeight: 600, + }, + DrandSchedule: map[abi.ChainEpoch]config.DrandEnum{0: 1}, + AddressNetwork: address.Testnet, + PropagationDelaySecs: 6, + }, + } +} diff --git a/fixtures/networks/calibration.go b/fixtures/networks/calibration.go new file mode 100644 index 0000000000..087c3a7968 --- /dev/null +++ b/fixtures/networks/calibration.go @@ -0,0 +1,68 @@ +package networks + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type NetworkConf struct { + Bootstrap config.BootstrapConfig + Network config.NetworkParamsConfig +} + +func Calibration() *NetworkConf { + return &NetworkConf{ + Bootstrap: config.BootstrapConfig{ + Addresses: []string{ + "/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWCi2w8U4DDB9xqrejb5KYHaQv2iA2AJJ6uzG3iQxNLBMy", + "/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWDTayrBojBn9jWNNUih4nNQQBGJD7Zo3gQCKgBkUsS6dp", + "/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWNRxTHUn8bf7jz1KEUPMc2dMgGfa4f8ZJTsquVSn3vHCG", + "/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWFWUqE9jgXvcKHWieYs9nhyp6NF4ftwLGAHm4sCv73jjK", + }, + MinPeerThreshold: 0, + Period: "30s", + }, + Network: config.NetworkParamsConfig{ + DevNet: true, + NetworkType: types.NetworkCalibnet, + GenesisNetworkVersion: network.Version0, + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + }, + BlockDelay: 30, + ConsensusMinerMinPower: 32 << 30, + MinVerifiedDealSize: 1 << 20, + PreCommitChallengeDelay: abi.ChainEpoch(150), + ForkUpgradeParam: &config.ForkUpgradeConfig{ + BreezeGasTampingDuration: 120, + UpgradeBreezeHeight: -1, + UpgradeSmokeHeight: -2, + UpgradeIgnitionHeight: -3, + UpgradeRefuelHeight: -4, + UpgradeAssemblyHeight: 30, + UpgradeTapeHeight: 60, + UpgradeLiftoffHeight: -5, + UpgradeKumquatHeight: 90, + UpgradeCalicoHeight: 120, + UpgradePersianHeight: 100 + (120 * 1), + UpgradeClausHeight: 270, + UpgradeOrangeHeight: 300, + UpgradeTrustHeight: 330, + UpgradeNorwegianHeight: 360, + UpgradeTurboHeight: 390, + UpgradeHyperdriveHeight: 420, + UpgradeChocolateHeight: 450, + UpgradeOhSnapHeight: 480, + UpgradeSkyrHeight: 510, + UpgradeSharkHeight: 16800, + }, + DrandSchedule: map[abi.ChainEpoch]config.DrandEnum{0: 1}, + AddressNetwork: address.Testnet, + PropagationDelaySecs: 10, + }, + } +} diff --git a/fixtures/networks/forcenet.go b/fixtures/networks/forcenet.go new file mode 100644 index 0000000000..fecbf7e3f8 --- /dev/null +++ b/fixtures/networks/forcenet.go @@ -0,0 +1,64 @@ +package networks + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func ForceNet() *NetworkConf { + return &NetworkConf{ + Bootstrap: config.BootstrapConfig{ + Addresses: []string{}, + + MinPeerThreshold: 0, + Period: "30s", + }, + Network: config.NetworkParamsConfig{ + DevNet: true, + NetworkType: types.NetworkForce, + GenesisNetworkVersion: network.Version16, + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg8MiBV1, + abi.RegisteredSealProof_StackedDrg512MiBV1, + abi.RegisteredSealProof_StackedDrg32GiBV1, + }, + BlockDelay: 30, + ConsensusMinerMinPower: 2048, + MinVerifiedDealSize: 256, + PreCommitChallengeDelay: abi.ChainEpoch(10), + ForkUpgradeParam: &config.ForkUpgradeConfig{ + UpgradeBreezeHeight: -1, + BreezeGasTampingDuration: 0, + UpgradeSmokeHeight: -2, + UpgradeIgnitionHeight: -3, + UpgradeRefuelHeight: -4, + UpgradeTapeHeight: -5, + UpgradeLiftoffHeight: -6, + // This signals our tentative epoch for mainnet launch. Can make it later, but not earlier. + // Miners, clients, developers, custodians all need time to prepare. + // We still have upgrades and state changes to do, but can happen after signaling timing here. + + UpgradeAssemblyHeight: -7, // critical: the network can bootstrap from v1 only + UpgradeKumquatHeight: -8, + UpgradeCalicoHeight: -9, + UpgradePersianHeight: -10, + UpgradeOrangeHeight: -11, + UpgradeClausHeight: -12, + UpgradeTrustHeight: -13, + UpgradeNorwegianHeight: -14, + UpgradeTurboHeight: -15, + UpgradeHyperdriveHeight: -16, + UpgradeChocolateHeight: -17, + UpgradeOhSnapHeight: -18, + UpgradeSkyrHeight: -19, + UpgradeSharkHeight: 100, + }, + DrandSchedule: map[abi.ChainEpoch]config.DrandEnum{0: config.DrandMainnet}, + AddressNetwork: address.Testnet, + PropagationDelaySecs: 1, + }, + } +} diff --git a/fixtures/networks/integrationtestnet.go b/fixtures/networks/integrationtestnet.go new file mode 100644 index 0000000000..c3cc6756be --- /dev/null +++ b/fixtures/networks/integrationtestnet.go @@ -0,0 +1,57 @@ +package networks + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func IntegrationNet() *NetworkConf { + return &NetworkConf{ + Bootstrap: config.BootstrapConfig{ + Addresses: []string{}, + MinPeerThreshold: 0, + Period: "30s", + }, + Network: config.NetworkParamsConfig{ + NetworkType: types.Integrationnet, + GenesisNetworkVersion: network.Version0, + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + }, + BlockDelay: 30, + ConsensusMinerMinPower: 10 << 40, + MinVerifiedDealSize: 1 << 20, + PreCommitChallengeDelay: abi.ChainEpoch(150), + ForkUpgradeParam: &config.ForkUpgradeConfig{ + BreezeGasTampingDuration: 120, + UpgradeBreezeHeight: 41280, + UpgradeSmokeHeight: 51000, + UpgradeIgnitionHeight: 94000, + UpgradeRefuelHeight: 130800, + UpgradeAssemblyHeight: 138720, + UpgradeTapeHeight: 140760, + UpgradeLiftoffHeight: 148888, + UpgradeKumquatHeight: 170000, + UpgradeCalicoHeight: 265200, + UpgradePersianHeight: 265200 + (120 * 60), + UpgradeOrangeHeight: 336458, + UpgradeClausHeight: 343200, + UpgradeTrustHeight: 550321, + UpgradeNorwegianHeight: 665280, + UpgradeTurboHeight: 712320, + UpgradeHyperdriveHeight: 892800, + UpgradeChocolateHeight: 1231620, + UpgradeOhSnapHeight: 1594680, + UpgradeSkyrHeight: 1960320, + UpgradeSharkHeight: 2383680, + }, + DrandSchedule: map[abi.ChainEpoch]config.DrandEnum{0: 5, 51000: 1}, + AddressNetwork: address.Testnet, + PropagationDelaySecs: 10, + }, + } +} diff --git a/fixtures/networks/interop.go b/fixtures/networks/interop.go deleted file mode 100644 index 2b884b134d..0000000000 --- a/fixtures/networks/interop.go +++ /dev/null @@ -1,80 +0,0 @@ -package networks - -import ( - "encoding/base64" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" -) - -type NetworkConf struct { - Bootstrap config.BootstrapConfig - Drand config.DrandConfig - Network config.NetworkParamsConfig -} - -func Interop() *NetworkConf { - const ( - interopBootstrap0 string = "/dns4/t01000.miner.interopnet.kittyhawk.wtf/tcp/1347/p2p/12D3KooWQfrGdBE8N2RzcnuHfyWZ4MBKMYZ6z1oPdhEbFxSNo1du" - interopBootstrap1 string = "/ip4/34.217.110.132/tcp/1347/p2p/12D3KooWQfrGdBE8N2RzcnuHfyWZ4MBKMYZ6z1oPdhEbFxSNo1du" - interopBootstrap2 string = "/dns4/peer0.interopnet.kittyhawk.wtf/tcp/1347/p2p/12D3KooWKmHh5mQofRhFr6f6qsT4ksL7qUtd2BWC24wPHVFL9gej" - interopBootstrap3 string = "/ip4/54.187.182.170/tcp/1347/p2p/12D3KooWKmHh5mQofRhFr6f6qsT4ksL7qUtd2BWC24wPHVFL9gej" - interopBootstrap4 string = "/dns4/peer1.interopnet.kittyhawk.wtf/tcp/1347/p2p/12D3KooWCWWtn3GMFVSn2PY7k9K7QkQTVA6p6wojUr5PgS5h1xtK" - interopBootstrap5 string = "/ip4/52.24.84.39/tcp/1347/p2p/12D3KooWCWWtn3GMFVSn2PY7k9K7QkQTVA6p6wojUr5PgS5h1xtK" - ) - - var interopDrandKeys = []string{ - "gsJ5zOdERQ5o3pjuCPlpigHdOPjjvjxT8rhA+50JrWKgtrh5geF54bFLyaLShMmF", - "gtUTCK00bGhvgbgJRVFZfXuWMpXL8xNAGpPfm69S1a6YqHdFvucIOaTW5lw0K9Fb", - "lO6/1T9LpqO4MEI2QAoS5ziF5aeBUJpcjUHS6LR2kj2OpgUmSbPBcoL1liF/lsXe", - "jcQjHkK07fOehu8VeUAWkkgGR5GCddp2fT5VjFINY3WtlTUwYQ/Sfa8RAYeHemXQ", - } - - var distKey [][]byte - for _, key := range interopDrandKeys { - bs, err := base64.StdEncoding.DecodeString(key) - if err != nil { - panic(err) - } - distKey = append(distKey, bs) - } - - return &NetworkConf{ - Bootstrap: config.BootstrapConfig{ - Addresses: []string{ - interopBootstrap0, - interopBootstrap1, - interopBootstrap2, - interopBootstrap3, - interopBootstrap4, - interopBootstrap5, - }, - MinPeerThreshold: 1, - Period: "10s", - }, - Drand: config.DrandConfig{ - Addresses: []string{ - "gabbi.drand.fil-test.net:443", - "linus.drand.fil-test.net:443", - "nicolas.drand.fil-test.net:443", - "mathilde.drand.fil-test.net:443", - "jeff.drand.fil-test.net:443", - "philipp.drand.fil-test.net:443", - "ludovic.drand.fil-test.net:443", - }, - Secure: true, - DistKey: distKey, - StartTimeUnix: 1588221360, - RoundSeconds: 30, - }, - Network: config.NetworkParamsConfig{ - ConsensusMinerMinPower: 2 << 30, - ReplaceProofTypes: []int64{ - int64(abi.RegisteredProof_StackedDRG512MiBSeal), - int64(abi.RegisteredProof_StackedDRG32GiBSeal), - int64(abi.RegisteredProof_StackedDRG64GiBSeal), - }, - }, - } -} diff --git a/fixtures/networks/interopnet.go b/fixtures/networks/interopnet.go new file mode 100644 index 0000000000..984ffa3c7a --- /dev/null +++ b/fixtures/networks/interopnet.go @@ -0,0 +1,62 @@ +package networks + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func InteropNet() *NetworkConf { + return &NetworkConf{ + Bootstrap: config.BootstrapConfig{ + Addresses: []string{ + "/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWDpppr8csCNvEPnD2Z83KTPdBTM7iJhL66qK8LK3bB5NU", + "/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWR3K1sXWoDYcXWqDF26mFEM1o1g7e7fcVR3NYE7rn24Gs", + }, + MinPeerThreshold: 0, + Period: "30s", + }, + Network: config.NetworkParamsConfig{ + DevNet: true, + NetworkType: types.NetworkInterop, + GenesisNetworkVersion: network.Version16, + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg2KiBV1, + abi.RegisteredSealProof_StackedDrg8MiBV1, + abi.RegisteredSealProof_StackedDrg512MiBV1, + }, + BlockDelay: 30, + ConsensusMinerMinPower: 2048, + MinVerifiedDealSize: 256, + PreCommitChallengeDelay: abi.ChainEpoch(10), + ForkUpgradeParam: &config.ForkUpgradeConfig{ + BreezeGasTampingDuration: 0, + UpgradeBreezeHeight: -1, + UpgradeSmokeHeight: -2, + UpgradeIgnitionHeight: -3, + UpgradeRefuelHeight: -4, + UpgradeAssemblyHeight: -5, + UpgradeTapeHeight: -6, + UpgradeLiftoffHeight: -7, + UpgradeKumquatHeight: -8, + UpgradeCalicoHeight: -9, + UpgradePersianHeight: -10, + UpgradeOrangeHeight: -11, + UpgradeClausHeight: -12, + UpgradeTrustHeight: -13, + UpgradeNorwegianHeight: -14, + UpgradeTurboHeight: -15, + UpgradeHyperdriveHeight: -16, + UpgradeChocolateHeight: -17, + UpgradeOhSnapHeight: -18, + UpgradeSkyrHeight: -19, + UpgradeSharkHeight: 99999999999999, + }, + DrandSchedule: map[abi.ChainEpoch]config.DrandEnum{0: 1}, + AddressNetwork: address.Testnet, + PropagationDelaySecs: 6, + }, + } +} diff --git a/fixtures/networks/mainnet.go b/fixtures/networks/mainnet.go new file mode 100644 index 0000000000..c42251d80d --- /dev/null +++ b/fixtures/networks/mainnet.go @@ -0,0 +1,79 @@ +package networks + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func Mainnet() *NetworkConf { + return &NetworkConf{ + Bootstrap: config.BootstrapConfig{ + Addresses: []string{ + "/dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj", + "/dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc", + "/dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4", + "/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R", + "/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc", + "/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH", + "/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ", + "/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf", + "/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR", + "/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C", + "/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz", + "/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u", + "/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt", + "/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d", + "/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP", + "/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt", + }, + MinPeerThreshold: 1, + Period: "30s", + }, + Network: config.NetworkParamsConfig{ + DevNet: false, + NetworkType: types.NetworkMainnet, + GenesisNetworkVersion: network.Version0, + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + }, + BlockDelay: 30, + ConsensusMinerMinPower: 10 << 40, + MinVerifiedDealSize: 1 << 20, + PreCommitChallengeDelay: abi.ChainEpoch(150), + ForkUpgradeParam: &config.ForkUpgradeConfig{ + BreezeGasTampingDuration: 120, + UpgradeBreezeHeight: 41280, + UpgradeSmokeHeight: 51000, + UpgradeIgnitionHeight: 94000, + UpgradeRefuelHeight: 130800, + UpgradeAssemblyHeight: 138720, + UpgradeTapeHeight: 140760, + UpgradeLiftoffHeight: 148888, + // This signals our tentative epoch for mainnet launch. Can make it later, but not earlier. + // Miners, clients, developers, custodians all need time to prepare. + // We still have upgrades and state changes to do, but can happen after signaling timing here. + UpgradeKumquatHeight: 170000, + UpgradeCalicoHeight: 265200, + UpgradePersianHeight: 265200 + (builtin2.EpochsInHour * 60), + UpgradeOrangeHeight: 336458, + UpgradeClausHeight: 343200, // 2020-12-22T02:00:00Z + UpgradeTrustHeight: 550321, // 2021-03-04T00:00:30Z + UpgradeNorwegianHeight: 665280, // 2021-04-12T22:00:00Z + UpgradeTurboHeight: 712320, // 2021-04-29T06:00:00Z + UpgradeHyperdriveHeight: 892800, // 2021-06-30T22:00:00Z + UpgradeChocolateHeight: 1231620, + UpgradeOhSnapHeight: 1594680, // 2022-03-01T15:00:00Z + UpgradeSkyrHeight: 1960320, // 2022-07-06T14:00:00Z + UpgradeSharkHeight: 2383680, // 2022-11-30T14:00:00Z + }, + DrandSchedule: map[abi.ChainEpoch]config.DrandEnum{0: 5, 51000: 1}, + AddressNetwork: address.Mainnet, + PropagationDelaySecs: 10, + }, + } +} diff --git a/fixtures/networks/net_2k.go b/fixtures/networks/net_2k.go new file mode 100644 index 0000000000..b865d6b781 --- /dev/null +++ b/fixtures/networks/net_2k.go @@ -0,0 +1,58 @@ +package networks + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func Net2k() *NetworkConf { + return &NetworkConf{ + Bootstrap: config.BootstrapConfig{ + Addresses: []string{}, + MinPeerThreshold: 0, + Period: "30s", + }, + Network: config.NetworkParamsConfig{ + DevNet: true, + NetworkType: types.Network2k, + GenesisNetworkVersion: network.Version16, + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg2KiBV1, + abi.RegisteredSealProof_StackedDrg8MiBV1, + }, + BlockDelay: 4, + ConsensusMinerMinPower: 2048, + MinVerifiedDealSize: 256, + PreCommitChallengeDelay: abi.ChainEpoch(10), + ForkUpgradeParam: &config.ForkUpgradeConfig{ + BreezeGasTampingDuration: 0, + UpgradeBreezeHeight: -1, + UpgradeSmokeHeight: -2, + UpgradeIgnitionHeight: -3, + UpgradeRefuelHeight: -4, + UpgradeTapeHeight: -5, + UpgradeAssemblyHeight: -6, + UpgradeLiftoffHeight: -7, + UpgradeKumquatHeight: -8, + UpgradeCalicoHeight: -9, + UpgradePersianHeight: -10, + UpgradeOrangeHeight: -11, + UpgradeClausHeight: -12, + UpgradeTrustHeight: -13, + UpgradeNorwegianHeight: -14, + UpgradeTurboHeight: -15, + UpgradeHyperdriveHeight: -16, + UpgradeChocolateHeight: -17, + UpgradeOhSnapHeight: -18, + UpgradeSkyrHeight: -19, + UpgradeSharkHeight: 100, + }, + DrandSchedule: map[abi.ChainEpoch]config.DrandEnum{0: 1}, + AddressNetwork: address.Testnet, + PropagationDelaySecs: 1, + }, + } +} diff --git a/fixtures/networks/network_parse.go b/fixtures/networks/network_parse.go new file mode 100644 index 0000000000..142d5ca44c --- /dev/null +++ b/fixtures/networks/network_parse.go @@ -0,0 +1,77 @@ +package networks + +import ( + "fmt" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/utils" +) + +func GetNetworkFromName(name string) (types.NetworkType, error) { + if name == "2k" { + return types.Network2k, nil + } + if name == "force" { + return types.NetworkForce, nil + } + nt, ok := utils.NetworkNameWithNetworkType[types.NetworkName(name)] + if !ok { + return types.NetworkDefault, fmt.Errorf("unknown network name %s", name) + } + return nt, nil +} + +func SetConfigFromOptions(cfg *config.Config, networkName string) error { + netcfg, err := GetNetworkConfig(networkName) + if err != nil { + return err + } + cfg.Bootstrap = &netcfg.Bootstrap + cfg.NetworkParams = &netcfg.Network + return nil +} + +func SetConfigFromNetworkType(cfg *config.Config, networkType types.NetworkType) error { + netcfg, err := GetNetworkConfig(networkType) + if err != nil { + return err + } + cfg.NetworkParams = &netcfg.Network + return nil +} + +func GetNetworkConfig(network interface{}) (*NetworkConf, error) { + var networkType types.NetworkType + var err error + + switch val := network.(type) { + case types.NetworkType: + networkType = val + case string: + networkType, err = GetNetworkFromName(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("not expect type %T %v", network, network) + } + + switch networkType { + case types.NetworkMainnet: + return Mainnet(), nil + case types.NetworkForce: + return ForceNet(), nil + case types.Integrationnet: + return IntegrationNet(), nil + case types.Network2k: + return Net2k(), nil + case types.NetworkCalibnet: + return Calibration(), nil + case types.NetworkInterop: + return InteropNet(), nil + case types.NetworkButterfly: + return ButterflySnapNet(), nil + } + return nil, fmt.Errorf("unknown network name %s", network) +} diff --git a/fixtures/networks/network_parse_test.go b/fixtures/networks/network_parse_test.go new file mode 100644 index 0000000000..2acde85168 --- /dev/null +++ b/fixtures/networks/network_parse_test.go @@ -0,0 +1,124 @@ +package networks + +import ( + "fmt" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/assert" +) + +func TestGetNetworkFromName(t *testing.T) { + tf.UnitTest(t) + + testCast := []struct { + name string + network types.NetworkType + err error + }{ + { + name: "mainnet", + network: types.NetworkMainnet, + err: nil, + }, + { + name: "force", + network: types.NetworkForce, + err: nil, + }, + { + name: "integrationnet", + network: types.Integrationnet, + err: nil, + }, + { + name: "2k", + network: types.Network2k, + err: nil, + }, + { + name: "calibrationnet", + network: types.NetworkCalibnet, + err: nil, + }, + { + name: "interopnet", + network: types.NetworkInterop, + err: nil, + }, + { + name: "butterflynet", + network: types.NetworkButterfly, + err: nil, + }, + { + name: "unknown", + network: 0, + err: fmt.Errorf("unknown network name %s", "unknown"), + }, + } + + for _, test := range testCast { + network, err := GetNetworkFromName(test.name) + assert.Equal(t, test.network, network) + assert.Equal(t, test.err, err) + } +} + +func TestGetNetworkConfig(t *testing.T) { + tf.UnitTest(t) + + testCast := []struct { + name string + network *NetworkConf + err error + }{ + { + name: "mainnet", + network: Mainnet(), + err: nil, + }, + { + name: "force", + network: ForceNet(), + err: nil, + }, + { + name: "integrationnet", + network: IntegrationNet(), + err: nil, + }, + { + name: "2k", + network: Net2k(), + err: nil, + }, + { + name: "calibrationnet", + network: Calibration(), + err: nil, + }, + { + name: "interopnet", + network: InteropNet(), + err: nil, + }, + { + name: "butterflynet", + network: ButterflySnapNet(), + err: nil, + }, + { + name: "unknown", + network: nil, + err: fmt.Errorf("unknown network name %s", "unknown"), + }, + } + + for _, test := range testCast { + network, err := GetNetworkConfig(test.name) + assert.Equal(t, test.network, network) + assert.Equal(t, test.err, err) + } +} diff --git a/fixtures/networks/testnet.go b/fixtures/networks/testnet.go deleted file mode 100644 index 67d3dd501c..0000000000 --- a/fixtures/networks/testnet.go +++ /dev/null @@ -1,85 +0,0 @@ -package networks - -import ( - "encoding/base64" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" -) - -func Testnet() *NetworkConf { - const ( - testnetBootstrap0 string = "/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs" - testnetBootstrap1 string = "/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs" - testnetBootstrap2 string = "/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr" - testnetBootstrap3 string = "/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr" - testnetBootstrap4 string = "/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym" - testnetBootstrap5 string = "/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym" - testnetBootstrap6 string = "/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN" - testnetBootstrap7 string = "/ip4/86.109.15.55/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN" - testnetBootstrap8 string = "/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD" - testnetBootstrap9 string = "/ip4/139.178.84.41/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD" - testnetBootstrap10 string = "/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ" - testnetBootstrap11 string = "/ip4/136.144.49.131/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ" - ) - - var testnetDrandKeys = []string{ - "gsJ5zOdERQ5o3pjuCPlpigHdOPjjvjxT8rhA+50JrWKgtrh5geF54bFLyaLShMmF", - "gtUTCK00bGhvgbgJRVFZfXuWMpXL8xNAGpPfm69S1a6YqHdFvucIOaTW5lw0K9Fb", - "lO6/1T9LpqO4MEI2QAoS5ziF5aeBUJpcjUHS6LR2kj2OpgUmSbPBcoL1liF/lsXe", - "jcQjHkK07fOehu8VeUAWkkgGR5GCddp2fT5VjFINY3WtlTUwYQ/Sfa8RAYeHemXQ", - } - - var distKey [][]byte - for _, key := range testnetDrandKeys { - bs, err := base64.StdEncoding.DecodeString(key) - if err != nil { - panic(err) - } - distKey = append(distKey, bs) - } - - return &NetworkConf{ - Bootstrap: config.BootstrapConfig{ - Addresses: []string{ - testnetBootstrap0, - testnetBootstrap1, - testnetBootstrap2, - testnetBootstrap3, - testnetBootstrap4, - testnetBootstrap5, - testnetBootstrap6, - testnetBootstrap7, - testnetBootstrap8, - testnetBootstrap9, - testnetBootstrap10, - testnetBootstrap11, - }, - MinPeerThreshold: 1, - Period: "10s", - }, - Drand: config.DrandConfig{ - Addresses: []string{ - "gabbi.drand.fil-test.net:443", - "linus.drand.fil-test.net:443", - "nicolas.drand.fil-test.net:443", - "mathilde.drand.fil-test.net:443", - "jeff.drand.fil-test.net:443", - "philipp.drand.fil-test.net:443", - "ludovic.drand.fil-test.net:443", - }, - Secure: true, - DistKey: distKey, - StartTimeUnix: 1588221360, - RoundSeconds: 30, - }, - Network: config.NetworkParamsConfig{ - ConsensusMinerMinPower: 1024 << 30, - ReplaceProofTypes: []int64{ - int64(abi.RegisteredProof_StackedDRG32GiBSeal), - int64(abi.RegisteredProof_StackedDRG64GiBSeal), - }, - }, - } -} diff --git a/fixtures/setup.json b/fixtures/setup.json index 578a35c618..2f6d62ad9a 100644 --- a/fixtures/setup.json +++ b/fixtures/setup.json @@ -1,7 +1,7 @@ { "seed": 4, "keysToGen": 5, - "importKeys": [{"privateKey":"/8UoXvzGIU2H/TgTAmswx96m2dVCXkRqnIIlDhJrGCo=","sigType":2}], + "importKeys": [{"privateKey":"E6U+Omo+atjscNO0oRJvZfdmK3lFl9wo0YcWDVhn6TY=","type":2}], "preallocatedFunds": [ "1000000", "1000000", @@ -13,27 +13,28 @@ "miners": [{ "owner": 5, "sealProofType": 3, + "marketBalance": "0", "committedSectors": [{ - "commR": {"/":"bafk4ehzaqugph7z7yajugw6wz6nn7x3akr3e32wecwexllfgqndnjt3nsaya"}, - "commD": {"/":"bafk4chzavlx2s3zsomp6beohyevpbql477qyr7qnvgkvfwbbcqzfijo62arq"}, + "commR": {"/":"bagboea4b5abcaziacbne3v3hh6bhgatc3dop72pnztj4xtvmppj4sq3jnieqoesx"}, + "commD": {"/":"baga6ea4seaqc3dgscuewdd76uubtaki6usrdki7b4os7otho2downpjspp2ruii"}, "sectorNum": 3, "proofType": 3, "dealCfg": { - "commP": {"/":"bafk4chzavlx2s3zsomp6beohyevpbql477qyr7qnvgkvfwbbcqzfijo62arq"}, + "commP": {"/":"baga6ea4seaqc3dgscuewdd76uubtaki6usrdki7b4os7otho2downpjspp2ruii"}, "pieceSize": 2048, "verified": false, - "endEpoch": 9001 + "endEpoch": 1000000 } },{ - "commR": {"/":"bafk4ehza24k4j256kulsu34e4szjvmsbytibrx7kjzajc6c6rlcn2jfosada"}, - "commD": {"/":"bafk4chzaqfni3wpij32pzgv2vuf63utnufburrjzrcoud75oz3pxkfisligq"}, + "commR": {"/":"bagboea4b5abcal4itszbnmkiad3iasaqrndnejqqooxiiidu5ntgkybviipf5lid"}, + "commD": {"/":"baga6ea4seaqmxjiljjcnscjhp2rc45tfjx5itjhpszsnaims7rzsdo5zlafqspq"}, "sectorNum": 4, "proofType": 3, "dealCfg": { - "commP": {"/":"bafk4chzaqfni3wpij32pzgv2vuf63utnufburrjzrcoud75oz3pxkfisligq"}, + "commP": {"/":"baga6ea4seaqmxjiljjcnscjhp2rc45tfjx5itjhpszsnaims7rzsdo5zlafqspq"}, "pieceSize": 2048, "verified": false, - "endEpoch": 9001 + "endEpoch": 1000000 } }] diff --git a/functional-tests/common.go b/functional-tests/common.go deleted file mode 100644 index 4a7aac73ce..0000000000 --- a/functional-tests/common.go +++ /dev/null @@ -1,91 +0,0 @@ -package functional - -import ( - "context" - "encoding/json" - "os" - "path/filepath" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -// setup presealed sectors and use these paths to run test against sectors with larger sector size -//genCfgPath := filepath.Join("./512", "setup.json") -//presealPath := "./512" -func fixtureGenCfg() string { - wd, _ := os.Getwd() - return filepath.Join(wd, "..", "fixtures/setup.json") -} - -func fixturePresealPath() string { - wd, _ := os.Getwd() - return filepath.Join(wd, "..", "fixtures/genesis-sectors") -} - -func loadGenesisConfig(t *testing.T, path string) *gengen.GenesisCfg { - configFile, err := os.Open(path) - if err != nil { - t.Errorf("failed to open config file %s: %s", path, err) - } - defer func() { _ = configFile.Close() }() - - var cfg gengen.GenesisCfg - if err := json.NewDecoder(configFile).Decode(&cfg); err != nil { - t.Errorf("failed to parse config: %s", err) - } - return &cfg -} - -func makeNode(ctx context.Context, t *testing.T, seed *node.ChainSeed, chainClock clock.ChainEpochClock, drand drand.IFace) *node.Node { - builder := test.NewNodeBuilder(t). - WithBuilderOpt(node.ChainClockConfigOption(chainClock)). - WithGenesisInit(seed.GenesisInitFunc). - WithBuilderOpt(node.MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)) - if drand != nil { - builder = builder.WithBuilderOpt(node.DrandConfigOption(drand)) - } - return builder.Build(ctx) -} - -func initNodeGenesisMiner(ctx context.Context, t *testing.T, nd *node.Node, seed *node.ChainSeed, minerIdx int, presealPath string) (address.Address, address.Address, error) { - seed.GiveKey(t, nd, minerIdx) - miner, owner := seed.GiveMiner(t, nd, 0) - - gen, err := nd.Chain().ChainReader.GetGenesisBlock(ctx) - require.NoError(t, err) - - c := nd.Repo.Config() - c.SectorBase.PreSealedSectorsDirPath = presealPath - err = nd.Repo.ReplaceConfig(c) - require.NoError(t, err) - - err = node.InitSectors(ctx, nd.Repo, gen) - require.NoError(t, err) - return miner, owner, err -} - -func simulateBlockMining(ctx context.Context, t *testing.T, fakeClock clock.Fake, blockTime time.Duration, node *node.Node) { - var err error - for { - select { - case <-ctx.Done(): - return - default: - // check error from previous loop (but only if not done) - require.NoError(t, err) - - fakeClock.Advance(blockTime) - _, err = node.BlockMining.BlockMiningAPI.MiningOnce(ctx) - } - } -} diff --git a/functional-tests/drand_test.go b/functional-tests/drand_test.go deleted file mode 100644 index d299105e24..0000000000 --- a/functional-tests/drand_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package functional - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestDrandPublic(t *testing.T) { - tf.FunctionalTest(t) - t.Skip(("requires local drand setup")) - - ctx := context.Background() - genTime := int64(1000000000) - blockTime := 30 * time.Second - propDelay := 6 * time.Second - // The clock is intentionally set some way ahead of the genesis time so the miner can produce - // catch-up blocks as quickly as possible. - fakeClock := clock.NewFake(time.Unix(genTime, 0).Add(4 * time.Hour)) - - // The fixture is needed in order to use the presealed genesis sectors fixture. - // Future code could decouple the whole setup.json from the presealed information. - genCfg := loadGenesisConfig(t, fixtureGenCfg()) - seed := node.MakeChainSeed(t, genCfg) - chainClock := clock.NewChainClockFromClock(uint64(genTime), blockTime, propDelay, fakeClock) - - nd := makeNode(ctx, t, seed, chainClock, nil) - - err := nd.Start(ctx) - require.NoError(t, err) - defer nd.Stop(ctx) - - err = nd.DrandAPI.Configure([]string{ - "drand-test3.nikkolasg.xyz:5003", - }, true, false) - require.NoError(t, err) - - entry1, err := nd.DrandAPI.GetEntry(ctx, 1) - require.NoError(t, err) - - assert.Equal(t, drand.Round(1), entry1.Round) - assert.NotNil(t, entry1.Data) - - entry2, err := nd.DrandAPI.GetEntry(ctx, 2) - require.NoError(t, err) - - valid, err := nd.DrandAPI.VerifyEntry(entry1, entry2) - require.NoError(t, err) - require.True(t, valid) -} diff --git a/functional-tests/faucet_test.go b/functional-tests/faucet_test.go deleted file mode 100644 index 32443e23b8..0000000000 --- a/functional-tests/faucet_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package functional - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "strconv" - "strings" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var faucetBinary = "../tools/faucet/faucet" - -func TestFaucetSendFunds(t *testing.T) { - tf.FunctionalTest(t) - if _, err := os.Stat(faucetBinary); os.IsNotExist(err) { - panic("faucet not found, run `go run build/*.go build` to fix") - } - - ctx := context.Background() - genTime := int64(1000000000) - blockTime := 30 * time.Second - propDelay := 6 * time.Second - // Set clock ahead so the miner can produce catch-up blocks as quickly as possible. - fakeClock := clock.NewFake(time.Unix(genTime, 0).Add(1 * time.Hour)) - - genCfg := loadGenesisConfig(t, fixtureGenCfg()) - seed := node.MakeChainSeed(t, genCfg) - chainClock := clock.NewChainClockFromClock(uint64(genTime), blockTime, propDelay, fakeClock) - drandImpl := &drand.Fake{ - GenesisTime: time.Unix(genTime, 0).Add(-1 * blockTime), - FirstFilecoin: 0, - } - - nd := makeNode(ctx, t, seed, chainClock, drandImpl) - api, stopAPI := test.RunNodeAPI(ctx, nd, t) - defer stopAPI() - - _, owner, err := initNodeGenesisMiner(ctx, t, nd, seed, genCfg.Miners[0].Owner, fixturePresealPath()) - require.NoError(t, err) - err = nd.Start(ctx) - require.NoError(t, err) - defer nd.Stop(ctx) - - // Start faucet server - faucetctx, faucetcancel := context.WithCancel(context.Background()) - faucetDripFil := uint64(123) - MustStartFaucet(faucetctx, t, api.Address(), owner, faucetDripFil) - defer faucetcancel() - // Wait for faucet to be ready - time.Sleep(1 * time.Second) - - // Generate an address to receive funds. - targetKi, err := crypto.NewSecpKeyFromSeed(bytes.NewReader(bytes.Repeat([]byte{1, 2, 3, 4}, 16))) - require.NoError(t, err) - targetAddr, err := targetKi.Address() - require.NoError(t, err) - - // Make request for funds - msgcid := MustSendFundsFaucet(t, "localhost:9797", targetAddr) - assert.NotEmpty(t, msgcid) - - // Mine the block containing the message, and another one to evaluate that state. - for i := 0; i < 2; i++ { - _, err := nd.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - } - - // Check that funds have been transferred - expectedBalance := types.NewAttoFILFromFIL(faucetDripFil) - actr, err := nd.PorcelainAPI.ActorGet(ctx, targetAddr) - require.NoError(t, err) - assert.True(t, actr.Balance.Equals(expectedBalance)) -} - -// MustStartFaucet runs the faucet with a provided node API endpoint and wallet from which to source transfers. -func MustStartFaucet(ctx context.Context, t *testing.T, endpoint string, sourceWallet address.Address, faucetVal uint64) { - parts := strings.Split(endpoint, "/") - filAPI := fmt.Sprintf("%s:%s", parts[2], parts[4]) - - cmd := exec.CommandContext(ctx, - faucetBinary, - "-fil-api="+filAPI, - "-fil-wallet="+sourceWallet.String(), - "-faucet-val="+strconv.FormatUint(faucetVal, 10), - ) - if err := cmd.Start(); err != nil { - t.Fatal(err) - } -} - -// MustMustSendFundsFaucet sends funds to the given wallet address -func MustSendFundsFaucet(t *testing.T, host string, target address.Address) string { - data := url.Values{} - data.Set("target", target.String()) - - resp, err := http.PostForm("http://"+host+"/tap", data) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != 200 { - all, _ := ioutil.ReadAll(resp.Body) - t.Fatalf("faucet request failed: %d %s", resp.StatusCode, all) - } - - msgcid := resp.Header.Get("Message-Cid") - return msgcid -} diff --git a/functional-tests/lib/helpers.bash b/functional-tests/lib/helpers.bash deleted file mode 100755 index ccd03cad63..0000000000 --- a/functional-tests/lib/helpers.bash +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail - -function finish { - local -i MAX_WAIT=60 - - echo "" - echo "cleaning up..." - kill "$BOOTSTRAP_MN_PID" || true - kill "$STORAGE_MN_PID" || true - kill "$CL_PID" || true - - # Force KILL after MAX_WAIT seconds if the daemons don't exit - ( - sleep $MAX_WAIT && kill -9 "$BOOTSTRAP_MN_PID"; - echo "Sent SIGKILL to BOOTSTRAP_MN, daemon failed to stop within $MAX_WAIT second at end of test"; - ) & WAITER_BOOTSTRAP_MN=$! - - # Force KILL after MAX_WAIT seconds if the daemons don't exit - ( - sleep $MAX_WAIT && kill -9 "$STORAGE_MN_PID"; - echo "Sent SIGKILL to MN, daemon failed to stop within $MAX_WAIT second at end of test"; - ) & WAITER_MN=$! - - ( - sleep $MAX_WAIT && kill -9 "$CL_PID"; - echo "Sent SIGKILL to CL, daemon failed to stop within $MAX_WAIT second at end of test"; - ) & WAITER_CL=$! - - # Wait for daemons to exit - wait "$BOOTSTRAP_MN_PID" - wait "$STORAGE_MN_PID" - wait "$CL_PID" - - # Kill watchers - kill $WAITER_BOOTSTRAP_MN - kill $WAITER_MN - kill $WAITER_CL - - rm -f "${PIECE_1_PATH}" - rm -f "${PIECE_2_PATH}" - rm -f "${UNSEAL_PATH}" - rm -rf "${CL_REPO_DIR}" - rm -rf "${BOOTSTRAP_MN_REPO_DIR}" - rm -rf "${STORAGE_MN_REPO_DIR}" - rm -rf "${CL_SECTOR_DIR}" - rm -rf "${BOOTSTRAP_MN_SECTOR_DIR}" - rm -rf "${STORAGE_MN_SECTOR_DIR}" -} - -function free_port { - python -c "import socket; s = socket.socket(); s.bind(('', 0)); print(s.getsockname()[1])" -} - -function import_private_key { - ./go-filecoin wallet import "${FIXTURES_PATH}/$1".key \ - --repodir="$2" -} - -function init_local_daemon { - ./go-filecoin init \ - --auto-seal-interval-seconds="${AUTO_SEAL_INTERVAL_SECONDS}" \ - --repodir="$1" \ - --sectordir="$2" \ - --cmdapiaddr=/ip4/127.0.0.1/tcp/"$3" \ - --genesisfile="$4" -} - -function init_devnet_daemon { - if [[ "$CLUSTER" = "staging" ]]; then - ./go-filecoin init \ - --auto-seal-interval-seconds="${AUTO_SEAL_INTERVAL_SECONDS}" \ - --repodir="$1" \ - --cmdapiaddr=/ip4/127.0.0.1/tcp/"$2" \ - --devnet-staging \ - --genesisfile="http://test.kittyhawk.wtf:8020/genesis.car" - else - ./go-filecoin init \ - --auto-seal-interval-seconds="${AUTO_SEAL_INTERVAL_SECONDS}" \ - --repodir="$1" \ - --cmdapiaddr=/ip4/127.0.0.1/tcp/"$2" \ - --devnet-nightly \ - --genesisfile="http://nightly.kittyhawk.wtf:8020/genesis.car" - fi -} - -function start_daemon { - ./go-filecoin daemon \ - --repodir="$1" \ - --block-time="${BLOCK_TIME}" \ - --cmdapiaddr=/ip4/127.0.0.1/tcp/"$2" \ - --swarmlisten=/ip4/127.0.0.1/tcp/"$3" & -} - -function get_first_address { - ./go-filecoin id \ - --repodir="$1" \ - | jq -r ".Addresses[0]" -} - -function get_peer_id { - ./go-filecoin id \ - --repodir="$1" \ - | jq -r ".ID" -} - -function get_peers { - ./go-filecoin swarm peers \ - --repodir="$1" -} - -function wait_for_peers { - local __peers - - __peers=$(get_peers "$1") - until [[ ! -z "$__peers" ]]; do - __peers=$(get_peers "$1") - sleep 1 - done -} - -function swarm_connect { - ./go-filecoin swarm connect "$1" \ - --repodir="$2" - local __peers - - __peers=$(get_peers "$2") - until [[ "$__peers" = "$1" ]]; do - __peers=$(get_peers "$2") - sleep 1 - done -} - -function chain_ls { - ./go-filecoin chain ls --enc=json \ - --repodir="$1" -} - -function wait_for_message_in_chain_by_method_and_sender { - IFS=$'\n' # make newlines the only separator - - local __chain="" - local __hodl="" - - # set the maximum number of chain polls to FLOOR(seconds/10) - local __polls_remaining=$(($( printf "%.0f" "$4" )/10)) - - while [ -z $__hodl ]; do - # dump chain state to stdout if we time out - if [ $__polls_remaining -eq 0 ] - then - echo "timed out after waiting seconds=$4 for method=$1, sent from address=$2, to be included in repodir=$3 chain..." - chain_ls "$3" - unset IFS - exit 1 - fi - - __hodl=$(echo "$(chain_ls $3)" \ - | jq -r '.[].messages["/"]' | while read -r cid; do ./go-filecoin show messages $cid --enc=json --repodir=$3; done | jq -s 'add' \ - | jq ".[] | select(.meteredMessage != null) | .meteredMessage.message | select(.method == \"$1\").from | select(. == \"$2\")" 2>/dev/null | head -n 1 || true) - - __polls_remaining=$((__polls_remaining - 1)) - local seconds_remaining=$((__polls_remaining*10)) - echo "$(date "+%T") - sleeping for 10 seconds ($seconds_remaining seconds remaining - method=$1, sent from address=$2)" - echo "$__hodl" - sleep 10 - done - - unset IFS -} - -function create_miner { - ./go-filecoin miner create 100 \ - --gas-limit=10000 \ - --gas-price=1 \ - --repodir="$1" -} - -function send_fil { - ./go-filecoin message send \ - --from "$1" \ - --value $2 \ - --gas-limit=10000 \ - --gas-price=1 \ - "$3" \ - --repodir="$4" -} - -function set_wallet_default_address_in_config { - ./go-filecoin config wallet.defaultAddress \""$1"\" \ - --repodir="$2" -} - -function set_mining_address_in_config { - ./go-filecoin config mining.minerAddress \""$1"\" \ - --repodir="$2" -} - -function wait_mpool_size { - ./go-filecoin mpool \ - --wait-for-count="$1" \ - --repodir="$2" -} - -function set_price { - ./go-filecoin miner set-price --repodir="$3" --gas-price=1 --gas-limit=300 "$1" "$2" --enc=json | jq -r .MinerSetPriceResponse.AddAskCid.'"\/"' -} - -function miner_update_pid { - ./go-filecoin miner update-peerid "$1" "$2" \ - --gas-price=1 --gas-limit=300 \ - --repodir="$3" -} - -function message_wait { - ./go-filecoin message wait $1 --repodir=$2 -} - -function fork_message_wait { - eval "exec $1< <(./go-filecoin message wait $2 --repodir=$3)" -} - -function join { - cat <&"$1" -} diff --git a/functional-tests/mining_chain_test.go b/functional-tests/mining_chain_test.go deleted file mode 100644 index 34829a0a12..0000000000 --- a/functional-tests/mining_chain_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package functional - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/stretchr/testify/require" - "gotest.tools/assert" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/proofs" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -const defaultBlockTime = builtin.EpochDurationSeconds -const defaultPropDelay = 6 * time.Second - -func TestSingleMiner(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - genTime := int64(1000000000) - // The clock is intentionally set some way ahead of the genesis time so the miner can produce - // catch-up blocks as quickly as possible. - fakeClock := clock.NewFake(time.Unix(genTime, 0).Add(4 * time.Hour)) - - // The fixture is needed in order to use the presealed genesis sectors fixture. - // Future code could decouple the whole setup.json from the presealed information. - genCfg := loadGenesisConfig(t, fixtureGenCfg()) - seed := node.MakeChainSeed(t, genCfg) - chainClock := clock.NewChainClockFromClock(uint64(genTime), defaultBlockTime, defaultPropDelay, fakeClock) - - drandImpl := &drand.Fake{ - GenesisTime: time.Unix(genTime, 0).Add(-1 * defaultBlockTime), - FirstFilecoin: 0, - } - - nd := makeNode(ctx, t, seed, chainClock, drandImpl) - minerAddr, _, err := initNodeGenesisMiner(ctx, t, nd, seed, genCfg.Miners[0].Owner, fixturePresealPath()) - require.NoError(t, err) - - err = nd.Start(ctx) - require.NoError(t, err) - defer nd.Stop(ctx) - - // Inspect genesis state. - chainReader := nd.Chain().ChainReader - head := block.NewTipSetKey(chainReader.GenesisCid()) - assert.Assert(t, chainReader.GetHead().Equals(head)) - - // Mine a block. - blk, err := nd.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(1), blk.Height) - assert.Assert(t, head.Equals(blk.Parents)) - assert.Equal(t, minerAddr, blk.Miner) - assert.Assert(t, int64(blk.Timestamp) >= genTime) - assert.Assert(t, int64(blk.Timestamp) < genTime+defaultBlockTime) - head = block.NewTipSetKey(blk.Cid()) - - // Inspect chain state. - assert.Assert(t, chainReader.GetHead().Equals(head)) - - // Mine some more and expect a connected chain. - for i := 2; i <= 5; i++ { - blk, err = nd.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - assert.Assert(t, head.Equals(blk.Parents)) - assert.Equal(t, abi.ChainEpoch(i), blk.Height) - head = block.NewTipSetKey(blk.Cid()) - } -} - -func TestSyncFromSingleMiner(t *testing.T) { - tf.IntegrationTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancel() - - genTime := int64(1000000000) - fakeClock := clock.NewFake(time.Unix(genTime, 0)) - - drandImpl := &drand.Fake{ - GenesisTime: time.Unix(genTime, 0).Add(-1 * defaultBlockTime), - FirstFilecoin: 0, - } - - genCfg := loadGenesisConfig(t, fixtureGenCfg()) - seed := node.MakeChainSeed(t, genCfg) - chainClock := clock.NewChainClockFromClock(uint64(genTime), defaultBlockTime, defaultPropDelay, fakeClock) - assert.Equal(t, fakeClock.Now(), chainClock.Now()) - - ndMiner := makeNode(ctx, t, seed, chainClock, drandImpl) - _, _, err := initNodeGenesisMiner(ctx, t, ndMiner, seed, genCfg.Miners[0].Owner, fixturePresealPath()) - require.NoError(t, err) - - ndValidator := makeNode(ctx, t, seed, chainClock, drandImpl) - - err = ndMiner.Start(ctx) - require.NoError(t, err) - err = ndValidator.Start(ctx) - require.NoError(t, err) - defer ndMiner.Stop(ctx) - defer ndValidator.Stop(ctx) - - node.ConnectNodes(t, ndValidator, ndMiner) - - // Check the nodes are starting in the same place. - head := ndMiner.Chain().ChainReader.GetHead() - assert.Assert(t, ndValidator.Chain().ChainReader.GetHead().Equals(head)) - - // Mine some blocks. - for i := 1; i <= 3; i++ { - fakeClock.Advance(defaultBlockTime) - blk, err := ndMiner.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(i), blk.Height) - head = block.NewTipSetKey(blk.Cid()) - } - - // Inspect validator node chain state. - require.NoError(t, th.WaitForIt(50, 20*time.Millisecond, func() (bool, error) { - return ndValidator.Chain().ChainReader.GetHead().Equals(head), nil - }), "validator failed to sync new head") -} - -func TestBootstrapWindowedPoSt(t *testing.T) { - // This test can require up to a whole proving period to elapse, which is slow even with fake proofs. - tf.FunctionalTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - genTime := int64(1000000000) - fakeClock := clock.NewFake(time.Unix(genTime, 0)) - - // Load genesis config fixture. - genCfg := loadGenesisConfig(t, fixtureGenCfg()) - // set proving period start to something soon - start := abi.ChainEpoch(0) - genCfg.Miners[0].ProvingPeriodStart = &start - - seed := node.MakeChainSeed(t, genCfg) - - // fake proofs so we can run through a proving period quickly - miner := test.NewNodeBuilder(t). - WithBuilderOpt(node.ChainClockConfigOption(clock.NewChainClockFromClock(uint64(genTime), defaultBlockTime, defaultPropDelay, fakeClock))). - WithGenesisInit(seed.GenesisInitFunc). - WithBuilderOpt(node.DrandConfigOption(&drand.Fake{ - GenesisTime: time.Unix(genTime, 0).Add(-1 * defaultBlockTime), - FirstFilecoin: 0, - })). - WithBuilderOpt(node.VerifierConfigOption(&proofs.FakeVerifier{})). - WithBuilderOpt(node.PoStGeneratorOption(&consensus.TestElectionPoster{})). - Build(ctx) - - _, _, err := initNodeGenesisMiner(ctx, t, miner, seed, genCfg.Miners[0].Owner, fixturePresealPath()) - require.NoError(t, err) - - err = miner.Start(ctx) - require.NoError(t, err) - - err = miner.StorageMining.Start(ctx) - require.NoError(t, err) - - // mine once to enter proving period - go simulateBlockMining(ctx, t, fakeClock, defaultBlockTime, miner) - - minerAddr := miner.Repo.Config().Mining.MinerAddress - - // Post should have been triggered, simulate mining while waiting for update to proving period start - for i := 0; i < 50; i++ { - head := miner.Chain().ChainReader.GetHead() - - view, err := miner.Chain().State.StateView(head) - require.NoError(t, err) - - poSts, err := view.MinerSuccessfulPoSts(ctx, minerAddr) - require.NoError(t, err) - - if poSts > 0 { - return - } - - // We need to mine enough blocks to get to get to the deadline that contains our sectors. Add some friction here. - time.Sleep(2 * time.Second) - } - t.Fatal("Timouut waiting for windowed PoSt") -} diff --git a/functional-tests/network-deployment/README.md b/functional-tests/network-deployment/README.md deleted file mode 100644 index d2af40b75a..0000000000 --- a/functional-tests/network-deployment/README.md +++ /dev/null @@ -1,68 +0,0 @@ -Network Deployment Tests -======================== - -These tests can be run against a deployed kittyhawk network to verify -expected behavior. - -All tests can be run by invoking the `test` command with the `-deployment ` -flag from the project root. A `go-filecoin` binary should be built and located in the -project root (`go run ./build build-filecoin`). - -``` -$ go run ./build test -deployment nightly -unit false -``` - -Their are currently four suported networks: `nightly`, `staging`, `users`, and `local`. - -The `local` network is primarly used to help during development as the network will run -with a `5s` block time and smaller 1KiB sectors. - -## Writing Tests - -A deployment test is any test with a call to `tf.Deployment(t)`. The call to `tf.Deployment` -returns the network name the test should be configured for. This value can be passed into -`fastesting.NewDeploymentEnvironment` so everything is configured correctly. - -Due to the large setup cost (chain syncing / processing) the same process can be used for -related tests as long as no state between tests is shared (creating a miner in one, and using it -in another). - -``` -package networkdeployment_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" -) - -func TestFoo(t *testing.T) { - network := tf.DeploymentTest(t) - - ctx := context.Background() - ctx, env := fastesting.NewDeploymentEnvironment(ctx, t, network, fast.FilecoinOpts{}) - - // Teardown will shutdown all running processes the environment knows about - // and cleanup anything the environment setup. This includes the directory - // the environment was created to use. - defer func() { - err := env.Teardown(ctx) - require.NoError(t, err) - }() - - node := env.RequireNewNodeWithFunds() - - // Tests can reuse the same enviroment or even a shared process. Tests should not depend - // on output of any other test such as a created miner, etc - - t.Run("Do something with node", func(t *testing.T) { - }) - - t.Run("Do something else with node", func(t *testing.T) { - }) -``` diff --git a/functional-tests/network-deployment/bootstrap_test.go b/functional-tests/network-deployment/bootstrap_test.go deleted file mode 100644 index 6925085384..0000000000 --- a/functional-tests/network-deployment/bootstrap_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package networkdeployment_test - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" - - pr "github.com/libp2p/go-libp2p-core/peer" - "github.com/multiformats/go-multiaddr" -) - -// TestBootstrap verifies information about the bootstrap peers -func TestBootstrap(t *testing.T) { - network := tf.DeploymentTest(t) - - ctx := context.Background() - ctx, env := fastesting.NewDeploymentEnvironment(ctx, t, network, fast.FilecoinOpts{}) - defer func() { - err := env.Teardown(ctx) - require.NoError(t, err) - }() - - client := env.RequireNewNodeStarted() - - t.Run("Check that we are connected to bootstrap peers", func(t *testing.T) { - maddrChan := make(chan multiaddr.Multiaddr, 16) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - go func() { - defer close(maddrChan) - protop2p := multiaddr.ProtocolWithCode(multiaddr.P_P2P) - for { - select { - case <-ctx.Done(): - return - case <-time.After(5 * time.Second): - peers, err := client.SwarmPeers(ctx) - assert.NoError(t, err) - - for _, peer := range peers { - transport, err := multiaddr.NewMultiaddr(peer.Addr) - require.NoError(t, err) - - // /ipfs/ - peercomp, err := multiaddr.NewComponent(protop2p.Name, peer.Peer) - require.NoError(t, err) - - fullmaddr := transport.Encapsulate(peercomp) - maddrChan <- fullmaddr - } - } - } - }() - - bootstrapAddrs := networkBootstrapPeers(network) - require.NotEmpty(t, bootstrapAddrs) - - bootstrapPeers, err := createResolvedPeerInfoMap(ctx, bootstrapAddrs) - require.NoError(t, err) - - for maddr := range maddrChan { - pinfo, err := pr.AddrInfoFromP2pAddr(maddr) - require.NoError(t, err) - - if _, ok := bootstrapPeers[pinfo.ID]; !ok { - continue - } - - // pinfo will have only a single address as it comes from a single multiaddr - require.NotEmpty(t, pinfo.Addrs) - addr := pinfo.Addrs[0] - - t.Logf("Looking at addr %s", addr) - for _, a := range bootstrapPeers[pinfo.ID].Addrs { - if addr.Equal(a) { - t.Logf("Found addr for peer %s", pinfo.ID) - delete(bootstrapPeers, pinfo.ID) - } - } - - if len(bootstrapPeers) == 0 { - cancel() - } - - for peerID := range bootstrapPeers { - t.Logf("Still waiting for %s", peerID) - } - } - }) -} diff --git a/functional-tests/network-deployment/logging_test.go b/functional-tests/network-deployment/logging_test.go deleted file mode 100644 index 661199175d..0000000000 --- a/functional-tests/network-deployment/logging_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package networkdeployment_test - -import ( - logging "github.com/ipfs/go-log/v2" -) - -func init() { - logging.SetAllLoggers(logging.LevelInfo) -} diff --git a/functional-tests/network-deployment/relay_check_test.go b/functional-tests/network-deployment/relay_check_test.go deleted file mode 100644 index 0cdabb9d4d..0000000000 --- a/functional-tests/network-deployment/relay_check_test.go +++ /dev/null @@ -1,316 +0,0 @@ -package networkdeployment_test - -import ( - "context" - "io" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/fixtures/networks" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" - - "github.com/ipfs/go-cid" - pr "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/routing" - "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-multiaddr-dns" -) - -// TestRelayCheck is a two part test -// 1) Check that the relay peers are advertising their addresses under -// the correct dht key -// 2) Check that a node behind a NAT aquires a circuit relay address from -// one of the relay peers -func TestRelayCheck(t *testing.T) { - network := tf.DeploymentTest(t) - - ctx := context.Background() - ctx, env := fastesting.NewDeploymentEnvironment(ctx, t, network, fast.FilecoinOpts{}) - defer func() { - err := env.Teardown(ctx) - require.NoError(t, err) - }() - - client := env.RequireNewNodeStarted() - - // In this test we query the dht looking for providers of the relay key - // and verify that all of the expected providers show up at some point. - t.Run("Check for relay providers", func(t *testing.T) { - dhtKey, err := cid.Decode("zb2rhZ6FpTqFZyiAtpQFRKmybPMjq5A7oPHfmD5WeBko5kRAo") - require.NoError(t, err) - - maddrChan := make(chan multiaddr.Multiaddr, 16) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - eventChan, err := scanDhtProviders(ctx, t, client, dhtKey) - require.NoError(t, err) - - go func() { - defer close(maddrChan) - for event := range eventChan { - // There is only a single PeerInfo in the response see command `findprovs`. - pinfo := event.Responses[0] - - // For some reason if a peer responses to a query, it will not include its - // own addresses. This might be a bug. However, all it means is that we need - // to take at least two arounds from two different peers, which is largely - // just luck of the draw. - if len(pinfo.Addrs) == 0 { - t.Logf("No addresses returned for peer %s", pinfo.ID) - continue - } - - t.Logf("Found record for peer %s", pinfo.ID) - - // Converts the pinfo into a set of addresses - maddrs, err := pr.AddrInfoToP2pAddrs(pinfo) - if err != nil { - t.Logf("Failed to get maddrs") - continue - } - - for _, maddr := range maddrs { - maddrChan <- maddr - } - } - }() - - relayPeersAddrs := networkRelayPeers(network) - require.NotEmpty(t, relayPeersAddrs) - - // To verify that all of the relay peers are advertising correctly we need to - // see one of their addresses come through when we query the relay provider key. - // Below we construct a peer.ID mapping to a PeerInfo that contains addresses we - // expect to see. - // The address we expect to see is either the dns4 multiaddr from relayPeersAddrs, - // or the resolved ip4 address. - relayPeers, err := createResolvedPeerInfoMap(ctx, relayPeersAddrs) - require.NoError(t, err) - - for maddr := range maddrChan { - pinfo, err := pr.AddrInfoFromP2pAddr(maddr) - require.NoError(t, err) - - if _, ok := relayPeers[pinfo.ID]; !ok { - continue - } - - // pinfo will have only a single address as it comes from a single multiaddr - require.NotEmpty(t, pinfo.Addrs) - addr := pinfo.Addrs[0] - - t.Logf("Looking at addr %s", addr) - for _, a := range relayPeers[pinfo.ID].Addrs { - if addr.Equal(a) { - t.Logf("Found addr for peer %s", pinfo.ID) - delete(relayPeers, pinfo.ID) - } - } - - if len(relayPeers) == 0 { - cancel() - } - - for peerID := range relayPeers { - t.Logf("Still waiting for %s", peerID) - } - } - }) - - // In this test we want to verify that we retrieve a circuit relay address - // from one of our expected relay providers. - t.Run("Has circuit address", func(t *testing.T) { - details, err := client.ID(ctx) - require.NoError(t, err) - - maddrChan := make(chan multiaddr.Multiaddr, 16) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - go func() { - defer close(maddrChan) - for { - select { - case <-ctx.Done(): - return - case <-time.After(5 * time.Second): - details, err := client.ID(ctx) - assert.NoError(t, err) - - for _, maddr := range details.Addresses { - maddrChan <- maddr - } - } - } - }() - - relayPeersAddrs := networkRelayPeers(network) - require.NotEmpty(t, relayPeersAddrs) - - relayPeers, err := createResolvedPeerInfoMap(ctx, relayPeersAddrs) - require.NoError(t, err) - - // To verify that we have a circuit address from one of our relays we need to - // strip off the circuit component, and compare the address to the known addresses - // of our relays - - protop2p := multiaddr.ProtocolWithCode(multiaddr.P_P2P) - protocircuit := multiaddr.ProtocolWithCode(multiaddr.P_CIRCUIT) - - // /ipfs/ - peercomp, err := multiaddr.NewComponent(protop2p.Name, details.ID.String()) - require.NoError(t, err) - - // /p2p-circuit - relaycomp, err := multiaddr.NewComponent(protocircuit.Name, "") - require.NoError(t, err) - - // /p2p-circuit/ipfs/ - relaypeer := relaycomp.Encapsulate(peercomp) - - for maddr := range maddrChan { - if _, err := maddr.ValueForProtocol(multiaddr.P_CIRCUIT); err != nil { - continue - } - - t.Logf("Found circuit addr %s", maddr) - - relayaddr := maddr.Decapsulate(relaypeer) - pinfo, err := pr.AddrInfoFromP2pAddr(relayaddr) - require.NoError(t, err) - - if _, ok := relayPeers[pinfo.ID]; !ok { - continue - } - - // pinfo will have only a single address as it comes from a single multiaddr - require.NotEmpty(t, pinfo.Addrs) - addr := pinfo.Addrs[0] - - if _, ok := relayPeers[pinfo.ID]; !ok { - t.Logf("Found circuit address %s from unexpected peer %s", maddr, pinfo.ID) - continue - } - - for _, a := range relayPeers[pinfo.ID].Addrs { - if addr.Equal(a) { - t.Logf("Addr relays through %s", pinfo.ID) - cancel() - break - } - } - } - }) -} - -func createResolvedPeerInfoMap(ctx context.Context, addrs []string) (map[pr.ID]*pr.AddrInfo, error) { - protop2p := multiaddr.ProtocolWithCode(multiaddr.P_P2P) - - relayPeers := make(map[pr.ID]*pr.AddrInfo) - for _, addr := range addrs { - maddr, err := multiaddr.NewMultiaddr(addr) - if err != nil { - return nil, err - } - - pinfo, err := pr.AddrInfoFromP2pAddr(maddr) - if err != nil { - return nil, err - } - - // PeerInfo stores just the transport of the multiaddr and removes the peer - // component from the end. However, when we resolve the dns4 address to ip4 - // we get back the full address, so we want to strip the peer component - // for consistently - - // This is the /ipfs/ component - peercomp, err := multiaddr.NewComponent(protop2p.Name, pinfo.ID.String()) - if err != nil { - return nil, err - } - - rmaddrs, err := madns.Resolve(ctx, maddr) - if err != nil { - return nil, err - } - - for _, maddr := range rmaddrs { - pinfo.Addrs = append(pinfo.Addrs, maddr.Decapsulate(peercomp)) - } - - relayPeers[pinfo.ID] = pinfo - } - - return relayPeers, nil -} - -// scanDhtProviders runs a `findprovs` at least every 5 seconds and reads through all -// of the events looking for `notif.Provider` events. These events contain PeerInfo -// which we convert into a slice of multiaddrs and publish on our maddrChan. -func scanDhtProviders(ctx context.Context, t *testing.T, node *fast.Filecoin, dhtKey cid.Cid) (<-chan routing.QueryEvent, error) { - eventChan := make(chan routing.QueryEvent, 16) - - go func() { - defer close(eventChan) - for { - select { - case <-ctx.Done(): - return - case <-time.After(5 * time.Second): - t.Logf("Finding Providers for %s", dhtKey) - decoder, err := node.DHTFindProvs(ctx, dhtKey) - if err != nil { - t.Logf("Failed to run `findprovs`: %s", err) - continue - } - - // Read all of the events from `findprovs` - for { - var event routing.QueryEvent - if err := decoder.Decode(&event); err != nil { - if err == io.EOF { - break - } - - t.Logf("Decode failed %s", err) - continue - } - - if event.Type == routing.Provider { - if len(event.Responses) == 0 { - t.Logf("No responses for provider event") - continue - } - - eventChan <- event - } - } - } - } - }() - - return eventChan, nil -} - -// returns the list of peer address for the network bootstrap peers -func networkBootstrapPeers(network string) []string { - // Currently all bootstrap addresses are relay peers - - switch network { - case "interop": - return networks.Interop().Bootstrap.Addresses - } - - return []string{} -} - -// returns the list of peer address for network relay peers -func networkRelayPeers(network string) []string { - return networkBootstrapPeers(network) -} diff --git a/functional-tests/plege_sector_test.go b/functional-tests/plege_sector_test.go deleted file mode 100644 index c26eee6aba..0000000000 --- a/functional-tests/plege_sector_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package functional - -import ( - "context" - "testing" - "time" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" - - "github.com/stretchr/testify/require" -) - -func TestMiningPledgeSector(t *testing.T) { - tf.FunctionalTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - genTime := int64(1000000000) - blockTime := 1 * time.Second - propDelay := 200 * time.Millisecond - fakeClock := clock.NewFake(time.Unix(genTime, 0)) - - genCfg := loadGenesisConfig(t, fixtureGenCfg()) - genCfg.Miners = append(genCfg.Miners, &gengen.CreateStorageMinerConfig{ - Owner: 1, - SealProofType: constants.DevSealProofType, - }) - seed := node.MakeChainSeed(t, genCfg) - chainClock := clock.NewChainClockFromClock(uint64(genTime), blockTime, propDelay, fakeClock) - - drandImpl := &drand.Fake{ - GenesisTime: time.Unix(genTime, 0).Add(-1 * blockTime), - FirstFilecoin: 0, - } - - bootstrapMiner := makeNode(ctx, t, seed, chainClock, drandImpl) - _, _, err := initNodeGenesisMiner(ctx, t, bootstrapMiner, seed, genCfg.Miners[0].Owner, fixturePresealPath()) - require.NoError(t, err) - - newMiner := makeNode(ctx, t, seed, chainClock, drandImpl) - seed.GiveKey(t, newMiner, 1) - - err = bootstrapMiner.Start(ctx) - require.NoError(t, err) - err = newMiner.Start(ctx) - require.NoError(t, err) - defer bootstrapMiner.Stop(ctx) - defer newMiner.Stop(ctx) - - node.ConnectNodes(t, newMiner, bootstrapMiner) - - // Have bootstrap miner mine continuously so newMiner's pledgeSector can put multiple messages on chain. - go simulateBlockMining(ctx, t, fakeClock, blockTime, bootstrapMiner) - - // create a miner - env := commands.CreateServerEnv(ctx, newMiner) - porcelainAPI := commands.GetPorcelainAPI(env) - peer := newMiner.Network().Network.GetPeerID() - - _, err = porcelainAPI.MinerCreate(ctx, seed.Addr(t, 1), types.NewAttoFILFromFIL(1), 10000, abi.RegisteredProof_StackedDRG2KiBSeal, peer, types.NewAttoFILFromFIL(5)) - require.NoError(t, err) - - // setup mining with new miner address and start mining - require.NoError(t, newMiner.SetupMining(ctx)) - err = newMiner.StorageMining.Start(ctx) - require.NoError(t, err) - - err = newMiner.PieceManager().PledgeSector(ctx) - require.NoError(t, err) - - // wait while checking to see if the new miner has added any sectors (indicating sealing was successful) - for i := 0; i < 100; i++ { - ts, err := newMiner.PorcelainAPI.ChainHead() - require.NoError(t, err) - - maddr, err := newMiner.BlockMining.BlockMiningAPI.MinerAddress() - require.NoError(t, err) - - status, err := newMiner.PorcelainAPI.MinerGetStatus(ctx, maddr, ts.Key()) - require.NoError(t, err) - - if status.SectorCount > 0 { - return - } - - time.Sleep(2 * time.Second) - } - t.Fatal("Did not add sectors in the allotted time") -} diff --git a/functional-tests/retrieval b/functional-tests/retrieval deleted file mode 100755 index b853ba0f98..0000000000 --- a/functional-tests/retrieval +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail - -DIR="$(dirname "${BASH_SOURCE[0]}")" - -source "$DIR/lib/helpers.bash" - -export GO_FILECOIN_LOG_LEVEL=3 -export FILECOIN_PROOFS_FAST_DELAY_SECONDS=1 -export RUST_LOG=info - -if [ -z "$1" ]; then - FIXTURES_PATH="./fixtures/test" - COMMIT_SECTOR_AND_POST_TIMEOUT=120 -else - FIXTURES_PATH="./fixtures/live" - COMMIT_SECTOR_AND_POST_TIMEOUT=3600 -fi - -go run ./build/*.go build - -if [ -z "$2" ]; then - AUTO_SEAL_INTERVAL_SECONDS="0" -else - AUTO_SEAL_INTERVAL_SECONDS="$2" -fi - -# forward-declare stuff that we need to clean up -STORAGE_MN_PID="" -BOOTSTRAP_MN_PID="" -CL_PID="" -STORAGE_MN_REPO_DIR="" -BOOTSTRAP_MN_REPO_DIR="" -CL_REPO_DIR="" -STORAGE_MN_SECTOR_DIR="" -BOOTSTRAP_MN_SECTOR_DIR="" -CL_SECTOR_DIR="" -PIECE_1_PATH=$(mktemp) -PIECE_2_PATH=$(mktemp) -UNSEAL_PATH=$(mktemp) -BLOCK_TIME="5s" -HODL="HODL HODL HODL HODL HODL HODL HODL HODL HODL HODL HODL HODL HODL HODL HODL" - -if [ "${FIXTURES_PATH}" = "./fixtures/test" ] ; then - dd if=/dev/urandom of="${PIECE_1_PATH}" bs=1 count=500 - dd if=/dev/urandom of="${PIECE_2_PATH}" bs=1 count=500 -else - # Maximum number of user piece-bytes in a Live-configuration sector is - # equal to 266338304. Our first piece will not fill up the whole sector. - dd if=/dev/urandom of="${PIECE_1_PATH}" bs=$((1024*1024)) count=100 - # Adding this second piece will cause the sector sealing process to run. - dd if=/dev/urandom of="${PIECE_2_PATH}" bs=$((1024*1024)) count=100 -fi - -trap finish EXIT - -STORAGE_MN_REPO_DIR=$(mktemp -d $(mktemp -d /tmp/XXXXXX)/XXXXXX) -rm -rf $STORAGE_MN_REPO_DIR -STORAGE_MN_SECTOR_DIR=$(mktemp -d $(mktemp -d /tmp/XXXXXX)/XXXXXX) -STORAGE_MN_CMDAPI_PORT=$(free_port) -STORAGE_MN_SWARM_PORT=$(free_port) - -BOOTSTRAP_MN_REPO_DIR=$(mktemp -d $(mktemp -d /tmp/XXXXXX)/XXXXXX) -rm -rf $BOOTSTRAP_MN_REPO_DIR -BOOTSTRAP_MN_SECTOR_DIR=$(mktemp -d $(mktemp -d /tmp/XXXXXX)/XXXXXX) -BOOTSTRAP_MN_CMDAPI_PORT=$(free_port) -BOOTSTRAP_MN_SWARM_PORT=$(free_port) - -CL_REPO_DIR=$(mktemp -d $(mktemp -d /tmp/XXXXXX)/XXXXXX) -rm -rf $CL_REPO_DIR -CL_SECTOR_DIR=$(mktemp -d $(mktemp -d /tmp/XXXXXX)/XXXXXX) -CL_CMDAPI_PORT=$(free_port) -CL_SWARM_PORT=$(free_port) - -echo "" -echo "generating private keys..." -BOOTSTRAP_MN_MINER_FIL_ADDR=$(jq -r '.Miners[] | select(.Owner == 0).Address' < "${FIXTURES_PATH}/gen.json") - -echo "" -echo "initializing daemons..." -init_local_daemon "${BOOTSTRAP_MN_REPO_DIR}" "${BOOTSTRAP_MN_SECTOR_DIR}" "${BOOTSTRAP_MN_CMDAPI_PORT}" "${FIXTURES_PATH}/genesis.car" -init_local_daemon "${STORAGE_MN_REPO_DIR}" "${STORAGE_MN_SECTOR_DIR}" "${STORAGE_MN_CMDAPI_PORT}" "${FIXTURES_PATH}/genesis.car" -init_local_daemon "${CL_REPO_DIR}" "${CL_SECTOR_DIR}" "${CL_CMDAPI_PORT}" "${FIXTURES_PATH}/genesis.car" - -echo "" -echo "start daemons..." -start_daemon "${STORAGE_MN_REPO_DIR}" "${STORAGE_MN_CMDAPI_PORT}" "${STORAGE_MN_SWARM_PORT}" -STORAGE_MN_PID=$! -start_daemon "${BOOTSTRAP_MN_REPO_DIR}" "${BOOTSTRAP_MN_CMDAPI_PORT}" "${BOOTSTRAP_MN_SWARM_PORT}" -BOOTSTRAP_MN_PID=$! -start_daemon "${CL_REPO_DIR}" "${CL_CMDAPI_PORT}" "${CL_SWARM_PORT}" -CL_PID=$! - -sleep 2 - -echo "" -echo "client imports pieces..." -PIECE_1_CID=$(./go-filecoin client import --repodir="${CL_REPO_DIR}" < "${PIECE_1_PATH}") -PIECE_2_CID=$(./go-filecoin client import --repodir="${CL_REPO_DIR}" < "${PIECE_2_PATH}") - -echo "" -echo "importing private keys..." -BOOTSTRAP_MN_MINER_OWNER_FIL_ADDR=$(import_private_key 0 "${BOOTSTRAP_MN_REPO_DIR}") -CL_FIL_ADDRESS=$(import_private_key 1 "${CL_REPO_DIR}") -STORAGE_MN_MINER_OWNER_FIL_ADDR=$(import_private_key 2 "${STORAGE_MN_REPO_DIR}") - -echo "" -echo "ensure that miner address is set so that the bootstrap miner-node can mine..." -set_mining_address_in_config "${BOOTSTRAP_MN_MINER_FIL_ADDR}" "${BOOTSTRAP_MN_REPO_DIR}" - -echo "" -echo "node default address should match what's associated with imported SK..." -set_wallet_default_address_in_config "${CL_FIL_ADDRESS}" "${CL_REPO_DIR}" -set_wallet_default_address_in_config "${BOOTSTRAP_MN_MINER_OWNER_FIL_ADDR}" "${BOOTSTRAP_MN_REPO_DIR}" -set_wallet_default_address_in_config "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${STORAGE_MN_REPO_DIR}" - -echo "" -echo "get storage mining node's libp2p identity..." -STORAGE_MN_PEER_ID=$(get_peer_id "${STORAGE_MN_REPO_DIR}") - -echo "" -echo "connecting daemons..." -swarm_connect "$(get_first_address "${CL_REPO_DIR}")" "${BOOTSTRAP_MN_REPO_DIR}" -swarm_connect "$(get_first_address "${BOOTSTRAP_MN_REPO_DIR}")" "${STORAGE_MN_REPO_DIR}" - -echo "" -echo "" -echo "" -echo "********************** BEGIN STORAGE PROTOCOL" -echo "" -echo "" -echo "" - -echo "" -echo "bootstrap miner node starts mining (so that messages can be processed)..." -./go-filecoin mining start \ - --repodir="$BOOTSTRAP_MN_REPO_DIR" \ - -echo "" -echo "bootstrap miner shares some funds with the storage miner..." -SEND_FIL_MSG_CID=$(send_fil "$BOOTSTRAP_MN_MINER_OWNER_FIL_ADDR" 100 "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${BOOTSTRAP_MN_REPO_DIR}") - -echo "" -echo "block until FIL-transferring messages are in blockchain..." -message_wait "${SEND_FIL_MSG_CID}" "${BOOTSTRAP_MN_REPO_DIR}" -message_wait "${SEND_FIL_MSG_CID}" "${CL_REPO_DIR}" -message_wait "${SEND_FIL_MSG_CID}" "${STORAGE_MN_REPO_DIR}" - -echo "" -echo "create a new miner actor (for storage miner)..." -STORAGE_MN_MINER_FIL_ADDR=$(create_miner "${STORAGE_MN_REPO_DIR}") - -echo "" -echo "storage miner node starts mining (so that it processes storage proposals)..." -./go-filecoin mining start \ - --repodir="$STORAGE_MN_REPO_DIR" \ - -echo "" -echo "update miner's libp2p identity to match its node's..." -STORAGE_MN_MINER_UPDATE_PID_MSG_CID=$(miner_update_pid "${STORAGE_MN_MINER_FIL_ADDR}" "${STORAGE_MN_PEER_ID}" "${STORAGE_MN_REPO_DIR}") - -echo "" -echo "storage miner adds its ask to the market..." -STORAGE_MN_MINER_SET_PRICE_MSG_CID=$(set_price 10 10000 "${STORAGE_MN_REPO_DIR}") - -echo "" -echo "block until miner peer id-update and set price-messages appear in chains..." -message_wait "${STORAGE_MN_MINER_UPDATE_PID_MSG_CID}" "${BOOTSTRAP_MN_REPO_DIR}" -message_wait "${STORAGE_MN_MINER_UPDATE_PID_MSG_CID}" "${CL_REPO_DIR}" -message_wait "${STORAGE_MN_MINER_UPDATE_PID_MSG_CID}" "${STORAGE_MN_REPO_DIR}" -message_wait "${STORAGE_MN_MINER_SET_PRICE_MSG_CID}" "${BOOTSTRAP_MN_REPO_DIR}" -message_wait "${STORAGE_MN_MINER_SET_PRICE_MSG_CID}" "${CL_REPO_DIR}" -message_wait "${STORAGE_MN_MINER_SET_PRICE_MSG_CID}" "${STORAGE_MN_REPO_DIR}" - -echo "" -echo "client proposes a storage deal, which transfers file 1..." -PROPOSAL1_CID=$(./go-filecoin client propose-storage-deal "${STORAGE_MN_MINER_FIL_ADDR}" "${PIECE_1_CID}" 0 5 --repodir="$CL_REPO_DIR" --enc=json | jq -r '.ProposalCid["/"]') -echo "proposal 1 cid: $PROPOSAL1_CID" - -echo "" -echo "client proposes a storage deal, which transfers piece 2..." -PROPOSAL2_CID=$(./go-filecoin client propose-storage-deal "${STORAGE_MN_MINER_FIL_ADDR}" "${PIECE_2_CID}" 0 5 --repodir="$CL_REPO_DIR" --enc=json | jq -r '.ProposalCid["/"]') -echo "proposal 2 cid: $PROPOSAL2_CID" - -echo "" -echo "wait for commitSector sent by miner owner to be included in a block viewable by all nodes..." -wait_for_message_in_chain_by_method_and_sender commitSector "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${CL_REPO_DIR}" "${COMMIT_SECTOR_AND_POST_TIMEOUT}" -wait_for_message_in_chain_by_method_and_sender commitSector "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${BOOTSTRAP_MN_REPO_DIR}" "${COMMIT_SECTOR_AND_POST_TIMEOUT}" -wait_for_message_in_chain_by_method_and_sender commitSector "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${STORAGE_MN_REPO_DIR}" "${COMMIT_SECTOR_AND_POST_TIMEOUT}" - -echo "" -echo "wait for submitPoSt, too..." -wait_for_message_in_chain_by_method_and_sender submitPoSt "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${CL_REPO_DIR}" "${COMMIT_SECTOR_AND_POST_TIMEOUT}" -wait_for_message_in_chain_by_method_and_sender submitPoSt "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${BOOTSTRAP_MN_REPO_DIR}" "${COMMIT_SECTOR_AND_POST_TIMEOUT}" -wait_for_message_in_chain_by_method_and_sender submitPoSt "${STORAGE_MN_MINER_OWNER_FIL_ADDR}" "${STORAGE_MN_REPO_DIR}" "${COMMIT_SECTOR_AND_POST_TIMEOUT}" - -echo "" -echo "storage deal 1 status:" -./go-filecoin client query-storage-deal "${PROPOSAL1_CID}" --repodir="$CL_REPO_DIR" --enc=json | jq . - -echo "" -echo "storage deal 2 status:" -./go-filecoin client query-storage-deal "${PROPOSAL2_CID}" --repodir="$CL_REPO_DIR" --enc=json | jq . - -echo "" -echo "" -echo "" -echo "********************** BEGIN RETRIEVAL PROTOCOL" -echo "" -echo "" -echo "" - -./go-filecoin retrieval-client retrieve-piece "${STORAGE_MN_MINER_FIL_ADDR}" "${PIECE_1_CID}" \ - --repodir="${CL_REPO_DIR}" > "${UNSEAL_PATH}" - -GOT=$(shasum < "${UNSEAL_PATH}") -EXPECTED=$(shasum < "${PIECE_1_PATH}") - -if [ "${GOT}" = "${EXPECTED}" ]; then - echo "Round trip passed!" - exit 0 -else - echo "Round trip Failed!, expected file" - echo "${UNSEAL_PATH}" - echo "to have same contents as file" - echo "${PIECE_1_PATH}" - exit 1 -fi diff --git a/functional-tests/run b/functional-tests/run deleted file mode 100755 index 63c54409bb..0000000000 --- a/functional-tests/run +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -e -set -o pipefail - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" - -# Go tests -go test -v "./$(basename $DIR)/..." -functional - -# Shell Tests -. $DIR/retrieval - - diff --git a/go.mod b/go.mod index c4ea375001..a3411f085b 100644 --- a/go.mod +++ b/go.mod @@ -1,105 +1,324 @@ -module github.com/filecoin-project/go-filecoin +module github.com/filecoin-project/venus -go 1.13 +go 1.18 require ( - contrib.go.opencensus.io/exporter/jaeger v0.1.0 - contrib.go.opencensus.io/exporter/prometheus v0.1.0 - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Microsoft/go-winio v0.4.14 // indirect - github.com/cskr/pubsub v1.0.2 - github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v0.7.3-0.20190315170154-87d593639c77 - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/drand/drand v0.8.1 - github.com/drand/kyber v1.0.1-0.20200331114745-30e90cc60f99 - github.com/filecoin-project/chain-validation v0.0.6-0.20200518190139-483332336e8e - github.com/filecoin-project/filecoin-ffi v0.26.1-0.20200508175440-05b30afeb00d - github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be - github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 - github.com/filecoin-project/go-bitfield v0.0.1 - github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v0.3.0 - github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 - github.com/filecoin-project/go-fil-markets v0.2.7 - github.com/filecoin-project/go-leb128 v0.0.0-20190212224330-8d79a5489543 - github.com/filecoin-project/go-paramfetch v0.0.2-0.20200505180321-973f8949ea8e - github.com/filecoin-project/go-statestore v0.1.0 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/sector-storage v0.0.0-20200508203401-a74812ba12f3 - github.com/filecoin-project/specs-actors v0.5.3 - github.com/filecoin-project/storage-fsm v0.0.0-20200508212339-4980cb4c92b1 - github.com/fxamacker/cbor/v2 v2.2.0 - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - github.com/golangci/golangci-lint v1.21.0 + contrib.go.opencensus.io/exporter/jaeger v0.2.1 + contrib.go.opencensus.io/exporter/prometheus v0.4.0 + github.com/DataDog/zstd v1.4.1 + github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d + github.com/ahmetb/go-linq/v3 v3.2.0 + github.com/awnumar/memguard v0.22.2 + github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833 + github.com/dchest/blake2b v1.0.0 + github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e + github.com/dgraph-io/badger/v2 v2.2007.3 + github.com/docker/go-units v0.4.0 + github.com/drand/drand v1.3.0 + github.com/drand/kyber v1.1.7 + github.com/dustin/go-humanize v1.0.0 + github.com/fatih/color v1.13.0 + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f + github.com/filecoin-project/go-address v1.0.0 + github.com/filecoin-project/go-bitfield v0.2.4 + github.com/filecoin-project/go-cbor-util v0.0.1 + github.com/filecoin-project/go-commp-utils v0.1.3 + github.com/filecoin-project/go-crypto v0.0.1 + github.com/filecoin-project/go-data-transfer v1.15.2 + github.com/filecoin-project/go-fil-commcid v0.1.0 + github.com/filecoin-project/go-fil-markets v1.24.0-v17 + github.com/filecoin-project/go-jsonrpc v0.1.5 + github.com/filecoin-project/go-paramfetch v0.0.4 + github.com/filecoin-project/go-state-types v0.9.8 + github.com/filecoin-project/pubsub v1.0.0 + github.com/filecoin-project/specs-actors v0.9.15 + github.com/filecoin-project/specs-actors/v2 v2.3.6 + github.com/filecoin-project/specs-actors/v3 v3.1.2 + github.com/filecoin-project/specs-actors/v4 v4.0.2 + github.com/filecoin-project/specs-actors/v5 v5.0.6 + github.com/filecoin-project/specs-actors/v6 v6.0.2 + github.com/filecoin-project/specs-actors/v7 v7.0.1 + github.com/filecoin-project/specs-actors/v8 v8.0.1 + github.com/filecoin-project/specs-storage v0.4.1 + github.com/filecoin-project/test-vectors/schema v0.0.5 + github.com/filecoin-project/venus-auth v1.9.0 + github.com/fxamacker/cbor/v2 v2.4.0 + github.com/go-errors/errors v1.0.1 + github.com/go-kit/kit v0.12.0 + github.com/golang/mock v1.6.0 github.com/google/go-github v17.0.0+incompatible - github.com/google/go-querystring v1.0.0 // indirect - github.com/google/uuid v1.1.1 - github.com/ipfs/go-bitswap v0.2.8 - github.com/ipfs/go-block-format v0.0.2 - github.com/ipfs/go-blockservice v0.1.3 - github.com/ipfs/go-cid v0.0.5 - github.com/ipfs/go-datastore v0.4.4 - github.com/ipfs/go-ds-badger2 v0.0.0-20200211201106-609c9d2a39c7 - github.com/ipfs/go-fs-lock v0.0.1 - github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103 - github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e - github.com/ipfs/go-ipfs-blockstore v1.0.0 + github.com/google/uuid v1.3.0 + github.com/gorilla/websocket v1.5.0 + github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 + github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/golang-lru v0.5.4 + github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c + github.com/ipfs-force-community/metrics v1.0.1-0.20211022060227-11142a08b729 + github.com/ipfs/go-bitswap v0.10.2 + github.com/ipfs/go-block-format v0.0.3 + github.com/ipfs/go-blockservice v0.4.0 + github.com/ipfs/go-cid v0.2.0 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-ds-badger2 v0.1.2 + github.com/ipfs/go-fs-lock v0.0.7 + github.com/ipfs/go-graphsync v0.13.1 + github.com/ipfs/go-ipfs-blockstore v1.2.0 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-cmdkit v0.0.1 - github.com/ipfs/go-ipfs-cmds v0.0.1 - github.com/ipfs/go-ipfs-exchange-interface v0.0.1 - github.com/ipfs/go-ipfs-exchange-offline v0.0.1 - github.com/ipfs/go-ipfs-files v0.0.8 - github.com/ipfs/go-ipfs-keystore v0.0.1 - github.com/ipfs/go-ipfs-routing v0.1.0 - github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 - github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.0.9-0.20200520025255-8c45666d33d4 - github.com/ipfs/go-merkledag v0.3.1 - github.com/ipfs/go-path v0.0.7 - github.com/ipfs/go-unixfs v0.2.4 - github.com/ipfs/iptb v1.3.8-0.20190401234037-98ccf4228a73 - github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339 - github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e - github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 + github.com/ipfs/go-ipfs-cmds v0.8.1 + github.com/ipfs/go-ipfs-ds-help v1.1.0 + github.com/ipfs/go-ipfs-exchange-interface v0.2.0 + github.com/ipfs/go-ipfs-exchange-offline v0.3.0 + github.com/ipfs/go-ipfs-files v0.1.1 + github.com/ipfs/go-ipld-cbor v0.0.6 + github.com/ipfs/go-ipld-format v0.4.0 + github.com/ipfs/go-log v1.0.5 + github.com/ipfs/go-log/v2 v2.5.1 + github.com/ipfs/go-merkledag v0.8.1 + github.com/ipfs/go-unixfs v0.3.1 + github.com/ipld/go-car v0.4.0 + github.com/ipld/go-car/v2 v2.4.1 github.com/jbenet/goprocess v0.1.4 - github.com/jstemmer/go-junit-report v0.9.1 - github.com/libp2p/go-libp2p v0.8.1 - github.com/libp2p/go-libp2p-autonat-svc v0.1.0 - github.com/libp2p/go-libp2p-circuit v0.2.1 - github.com/libp2p/go-libp2p-core v0.5.1 - github.com/libp2p/go-libp2p-kad-dht v0.1.1 - github.com/libp2p/go-libp2p-peerstore v0.2.2 - github.com/libp2p/go-libp2p-pubsub v0.2.6 - github.com/libp2p/go-libp2p-swarm v0.2.3 + github.com/libp2p/go-libp2p v0.22.0 + github.com/libp2p/go-libp2p-kad-dht v0.18.0 + github.com/libp2p/go-libp2p-pubsub v0.8.0 + github.com/libp2p/go-msgio v0.2.0 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 - github.com/morikuni/aec v1.0.0 // indirect - github.com/multiformats/go-multiaddr v0.2.1 - github.com/multiformats/go-multiaddr-dns v0.2.0 - github.com/multiformats/go-multiaddr-net v0.1.4 - github.com/multiformats/go-multihash v0.0.13 - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/otiai10/copy v1.0.2 + github.com/multiformats/go-multiaddr v0.6.0 + github.com/multiformats/go-multiaddr-dns v0.3.1 + github.com/multiformats/go-multihash v0.2.1 + github.com/multiformats/go-varint v0.0.6 + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/pborman/uuid v1.2.0 github.com/pkg/errors v0.9.1 - github.com/pmezard/go-difflib v1.0.0 - github.com/prometheus/client_golang v1.5.1 - github.com/prometheus/common v0.9.1 - github.com/stretchr/testify v1.5.1 - github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0 + github.com/prometheus/client_golang v1.12.1 + github.com/raulk/clock v1.1.0 + github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e + github.com/stretchr/testify v1.8.0 + github.com/whyrusleeping/cbor-gen v0.0.0-20220514204315-f29c37e9c44c github.com/whyrusleeping/go-logging v0.0.1 github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 - go.opencensus.io v0.22.3 - go.uber.org/zap v1.15.0 - golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 - gopkg.in/urfave/cli.v2 v2.0.0-20180128182452-d3ae77c26ac8 + go.opencensus.io v0.23.0 + go.uber.org/zap v1.22.0 + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e + golang.org/x/net v0.0.0-20220812174116-3211cb980234 + golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f + gopkg.in/cheggaaa/pb.v1 v1.0.28 + gorm.io/driver/mysql v1.1.1 + gorm.io/gorm v1.21.12 gotest.tools v2.2.0+incompatible ) -replace github.com/filecoin-project/filecoin-ffi => ./vendors/filecoin-ffi +require ( + github.com/BurntSushi/toml v1.1.0 // indirect + github.com/Kubuxu/go-os-helper v0.0.1 // indirect + github.com/Stebalien/go-bitfield v0.0.1 // indirect + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect + github.com/awnumar/memcall v0.0.0-20191004114545-73db50fd9f80 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bep/debounce v1.2.0 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cheekybits/genny v1.0.0 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/cskr/pubsub v1.0.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/deepmap/oapi-codegen v1.3.13 // indirect + github.com/dgraph-io/badger/v3 v3.2011.1 // indirect + github.com/dgraph-io/ristretto v0.0.4-0.20210122082011-bb5d392ed82d // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/drand/kyber-bls12381 v0.2.1 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect + github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 // indirect + github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 // indirect + github.com/filecoin-project/go-ds-versioning v0.1.1 // indirect + github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect + github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect + github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-padreader v0.0.1 // indirect + github.com/filecoin-project/go-statemachine v1.0.2 // indirect + github.com/filecoin-project/go-statestore v0.2.0 // indirect + github.com/flynn/noise v1.0.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/gbrlsnchs/jwt/v3 v3.0.1 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gin-gonic/gin v1.7.0 // indirect + github.com/go-kit/log v0.2.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-playground/locales v0.13.0 // indirect + github.com/go-playground/universal-translator v0.17.0 // indirect + github.com/go-playground/validator/v10 v10.4.1 // indirect + github.com/go-redis/redis/v7 v7.0.0-beta // indirect + github.com/go-redis/redis_rate/v7 v7.0.1 // indirect + github.com/go-resty/resty/v2 v2.4.0 // indirect + github.com/go-sql-driver/mysql v1.6.0 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/flatbuffers v2.0.0+incompatible // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-querystring v1.0.0 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huin/goupnp v1.0.3 // indirect + github.com/influxdata/influxdb-client-go/v2 v2.2.2 // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect + github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-bitfield v1.0.0 // indirect + github.com/ipfs/go-ipfs-delay v0.0.1 // indirect + github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect + github.com/ipfs/go-ipfs-pq v0.0.2 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-legacy v0.1.1 // indirect + github.com/ipfs/go-ipns v0.2.0 // indirect + github.com/ipfs/go-metrics-interface v0.0.1 // indirect + github.com/ipfs/go-peertaskqueue v0.7.1 // indirect + github.com/ipfs/go-unixfsnode v1.4.0 // indirect + github.com/ipfs/go-verifcid v0.0.1 // indirect + github.com/ipld/go-codec-dagpb v1.3.2 // indirect + github.com/ipld/go-ipld-prime v0.17.0 // indirect + github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.2 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 // indirect + github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/cpuid/v2 v2.1.0 // indirect + github.com/koron/go-ssdp v0.0.3 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-core v0.20.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect + github.com/libp2p/go-libp2p-record v0.2.0 // indirect + github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.0 // indirect + github.com/libp2p/go-openssl v0.1.0 // indirect + github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-yamux/v3 v3.1.2 // indirect + github.com/lucas-clemente/quic-go v0.28.1 // indirect + github.com/magefile/mage v1.11.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect + github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-pointer v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.10 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/miekg/dns v1.1.50 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.0.4 // indirect + github.com/multiformats/go-base36 v0.1.0 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.1.1 // indirect + github.com/multiformats/go-multicodec v0.5.0 // indirect + github.com/multiformats/go-multistream v0.3.3 // indirect + github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + github.com/onsi/gomega v1.17.0 // indirect + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/statsd_exporter v0.21.0 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/rivo/uniseg v0.1.0 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/rs/cors v1.7.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.12.0 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/subosito/gotenv v1.4.0 // indirect + github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e // indirect + github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect + github.com/ugorji/go/codec v1.1.7 // indirect + github.com/urfave/cli/v2 v2.8.1 // indirect + github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + go.etcd.io/bbolt v1.3.6 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect + go.opentelemetry.io/otel/sdk v1.3.0 // indirect + go.opentelemetry.io/otel/trace v1.7.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go4.org v0.0.0-20200411211856-f5505b9728dd // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + golang.org/x/tools v0.1.12 // indirect + google.golang.org/api v0.81.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect + google.golang.org/grpc v1.46.2 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.1.7 // indirect +) + +replace ( + github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi + github.com/filecoin-project/go-jsonrpc => github.com/ipfs-force-community/go-jsonrpc v0.1.4-0.20210731021807-68e5207079bc + github.com/filecoin-project/test-vectors => ./extern/test-vectors +) diff --git a/go.sum b/go.sum index c9aac26a76..d255145ada 100644 --- a/go.sum +++ b/go.sum @@ -1,679 +1,1005 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/jaeger v0.1.0 h1:WNc9HbA38xEQmsI40Tjd/MNU/g8byN2Of7lwIjv0Jdc= -contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= -contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= -contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7 h1:PqzgE6kAMi81xWQA2QIVxjWkFHptGgC547vchpUbtFo= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= +contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= +contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= +github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1 h1:EJiD2VUQyh5A9hWJLmc6iWg6yIcJ7jpBcwC8GMGXfDk= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= -github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= -github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ahmetb/go-linq/v3 v3.2.0 h1:BEuMfp+b59io8g5wYzNoFe9pWPalRklhlhbiU3hYZDE= +github.com/ahmetb/go-linq/v3 v3.2.0/go.mod h1:haQ3JfOeWK8HpVxMtHHEMPVgBKiYyQ+f1/kLZh/cj9U= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/awnumar/memcall v0.0.0-20191004114545-73db50fd9f80 h1:8kObYoBO4LNmQ+fLiScBfxEdxF1w2MHlvH/lr9MLaTg= +github.com/awnumar/memcall v0.0.0-20191004114545-73db50fd9f80/go.mod h1:S911igBPR9CThzd/hYQQmTc9SWNu3ZHIlCGaWsWsoJo= +github.com/awnumar/memguard v0.22.2 h1:tMxcq1WamhG13gigK8Yaj9i/CHNUO3fFlpS9ABBQAxw= +github.com/awnumar/memguard v0.22.2/go.mod h1:33OwJBHC+T4eEfFcDrQb78TMlBMBvcOPCXWU9xE34gM= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bombsimon/wsl v1.2.5 h1:9gTOkIwVtoDZywvX802SDHokeX4kW1cKnV8ZTVAPkRs= -github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833 h1:yCfXxYaelOyqnia8F/Yng47qhmfC9nKTRIbYRrRueq4= +github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833/go.mod h1:8c4/i2VlovMO2gBnHGQPN5EJw+H0lx1u/5p+cgsXtCk= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosiner/argv v0.1.0/go.mod h1:EusR6TucWKX+zFgtdUsKT2Cvg45K5rtpCcWz4hK06d8= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/dchest/blake2b v1.0.0 h1:KK9LimVmE0MjRl9095XJmKqZ+iLxWATvlcpVFRtaw6s= github.com/dchest/blake2b v1.0.0/go.mod h1:U034kXgbJpCle2wSk5ybGIVhOSHCVLMDqOzcPEA0F7s= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f h1:6itBiEUtu+gOzXZWn46bM5/qm8LlV6/byR7Yflx/y6M= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deepmap/oapi-codegen v1.3.13 h1:9HKGCsdJqE4dnrQ8VerFS0/1ZOJPmAhN+g8xgp8y3K4= +github.com/deepmap/oapi-codegen v1.3.13/go.mod h1:WAmG5dWY8/PYHt4vKxlt90NsbHMAOCiteYKZMiIRfOo= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200120142413-c3333a5a830e h1:Jz7uYxTCDVrtL5tzPxPu6o7Ybhom8Az7sWmjUO1OkQc= -github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200120142413-c3333a5a830e/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/badger/v3 v3.2011.1 h1:Hmyof0WMEF/QtutX5SQHzIMnJQxb/IrSzhjckV2SD6g= +github.com/dgraph-io/badger/v3 v3.2011.1/go.mod h1:0rLLrQpKVQAL0or/lBLMQznhr6dWWX7h5AKnmnqx268= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.4-0.20210122082011-bb5d392ed82d h1:eQYOG6A4td1tht0NdJB9Ls6DsXRGb2Ft6X9REU/MbbE= +github.com/dgraph-io/ristretto v0.0.4-0.20210122082011-bb5d392ed82d/go.mod h1:tv2ec8nA7vRpSYX7/MbP52ihrUMXIHit54CQMq8npXQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190315170154-87d593639c77 h1:jRmfPRYK6rNVHTzLc2Fr8mqRprfTrwKUOCM4Kope1HE= -github.com/docker/docker v0.7.3-0.20190315170154-87d593639c77/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/drand/bls12-381 v0.3.2 h1:RImU8Wckmx8XQx1tp1q04OV73J9Tj6mmpQLYDP7V1XE= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v0.8.1 h1:wAGnZKa+HbyNvRQOwLGIVnJR14o9kS/0+w9VroJ1AO0= -github.com/drand/drand v0.8.1/go.mod h1:ZdzIrSqqEYZvMiS1UuZlJs3WTb9uLz1I9uH0icYPqoE= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= -github.com/drand/kyber v1.0.1-0.20200331114745-30e90cc60f99 h1:BxLbcT0yq9ii6ShXn7U+0oXB2ABfEfw6GutaVPxoj2Y= -github.com/drand/kyber v1.0.1-0.20200331114745-30e90cc60f99/go.mod h1:Rzu9PGFt3q8d7WWdrHmR8dktHucO0dSTWlMYrgqjSpA= +github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= +github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= +github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= +github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= +github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= -github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= -github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/filecoin-project/chain-validation v0.0.6-0.20200518190139-483332336e8e h1:3x2eL2t3ZkMOHt1b5WS5aVWyJeo5+WjWCT77QdPGSwk= -github.com/filecoin-project/chain-validation v0.0.6-0.20200518190139-483332336e8e/go.mod h1:6B3uenDcH8n+PKqgzUtZmgyCzKy4qpiLwJ5aw7Rj2xQ= -github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= -github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= -github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= -github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/filecoin-project/dagstore v0.5.2 h1:Nd6oXdnolbbVhpMpkYT5PJHOjQp4OBSntHpMV5pxj3c= +github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= +github.com/filecoin-project/go-address v1.0.0 h1:IrexI0kpADLaPP+CdmU3CVAUqnW/FQC0KTmz4lVKiFU= +github.com/filecoin-project/go-address v1.0.0/go.mod h1:5t3z6qPmIADZBtuE9EIzi0EwzcRy2nVhpo0I/c1r0OA= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= +github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 h1:XM81BJ4/6h3FV0WfFjh74cIDIgqMbJsMBLM0fIuLUUk= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE= +github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= +github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= +github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= +github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 h1:4cITW0pwgvqLs86Q9bWQa34+jBfR1V687bDkmv2DgnA= +github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837/go.mod h1:e2YBjSblNVoBckkbv3PPqsq71q98oFkFqL7s1etViGo= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.3.0 h1:BwBrrXu9Unh9JjjX4GAc5FfzUNioor/aATIjfc7JTBg= -github.com/filecoin-project/go-data-transfer v0.3.0/go.mod h1:cONglGP4s/d+IUQw5mWZrQK+FQATQxr3AXzi4dRh0l4= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= -github.com/filecoin-project/go-fil-markets v0.2.7 h1:bgdK/e+xW15aVZLtdFLzAHdrx1hqtGF9veg2lstLK6o= -github.com/filecoin-project/go-fil-markets v0.2.7/go.mod h1:LI3VFHse33aU0djAmFQ8+Hg39i0J8ibAoppGu6TbgkA= -github.com/filecoin-project/go-leb128 v0.0.0-20190212224330-8d79a5489543 h1:aMJGfgqe1QDhAVwxRg5fjCRF533xHidiKsugk7Vvzug= -github.com/filecoin-project/go-leb128 v0.0.0-20190212224330-8d79a5489543/go.mod h1:mjrHv1cDGJWDlGmC0eDc1E5VJr8DmL9XMUcaFwiuKg8= -github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs= -github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE= -github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200505180321-973f8949ea8e h1:R+HNoQWirMBOhQC+L1OpYUVbvMjB+jq1hx5LmLFvNfA= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200505180321-973f8949ea8e/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9 h1:k9qVR9ItcziSB2rxtlkN/MDWNlbsI6yzec+zjUatLW0= -github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= -github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= +github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= +github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-data-transfer v1.15.2 h1:PzqsFr2Q/onMGKrGh7TtRT0dKsJcVJrioJJnjnKmxlk= +github.com/filecoin-project/go-data-transfer v1.15.2/go.mod h1:qXOJ3IF5dEJQHykXXTwcaRxu17bXAxr+LglXzkL6bZQ= +github.com/filecoin-project/go-ds-versioning v0.1.1 h1:JiyBqaQlwC+UM0WhcBtVEeT3XrX59mQhT8U3p7nu86o= +github.com/filecoin-project/go-ds-versioning v0.1.1/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= +github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= +github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= +github.com/filecoin-project/go-fil-markets v1.24.0-v17 h1:YjT0usMeR6kdAo3RBfftTPe5bNIgNmBbo5YzJHF1iLk= +github.com/filecoin-project/go-fil-markets v1.24.0-v17/go.mod h1:JW/UHkHDqP4MikCIIWNY5IHvTTsdv/zNMk9jJXKzhIU= +github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= +github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= +github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= +github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= +github.com/filecoin-project/go-paramfetch v0.0.4 h1:H+Me8EL8T5+79z/KHYQQcT8NVOzYVqXIi7nhb48tdm8= +github.com/filecoin-project/go-paramfetch v0.0.4/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.4/go.mod h1:xCA/WfKlC2zcn3fUmDv4IrzznwS98X5XW/irUP3Lhxg= +github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.1.8/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.9.8 h1:xkdITiR7h691z1tWOhNCJxHI+cq+Mq7ATkpHQ7f1gu8= +github.com/filecoin-project/go-state-types v0.9.8/go.mod h1:+HCZifUV+e8TlQkgll22Ucuiq8OrVJkK+4Kh4u75iiw= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-statemachine v1.0.2 h1:421SSWBk8GIoCoWYYTE/d+qCWccgmRH0uXotXRDjUbc= +github.com/filecoin-project/go-statemachine v1.0.2/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/sector-storage v0.0.0-20200508203401-a74812ba12f3 h1:WezmdxkWlnTe9xLzIitUrsvUVmjmWDEEuAe9l8A+Os0= -github.com/filecoin-project/sector-storage v0.0.0-20200508203401-a74812ba12f3/go.mod h1:B+xzopr/oWZJz2hBL5Ekb7Obcum5ntmfbaAUlaaho28= -github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA= -github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= -github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.4.1-0.20200509020627-3c96f54f3d7d/go.mod h1:UW3ft23q6VS8wQoNqLWjENsu9gu1uh6lxOd+H8cwhT8= -github.com/filecoin-project/specs-actors v0.5.2/go.mod h1:r5btrNzZD0oBkEz1pohv80gSCXQnqGrD0kYwOTiExyE= -github.com/filecoin-project/specs-actors v0.5.3 h1:fdq8Gx0izhnUKl6sYEtI4SUEjT2U6W2w06HeqLz5vmw= -github.com/filecoin-project/specs-actors v0.5.3/go.mod h1:r5btrNzZD0oBkEz1pohv80gSCXQnqGrD0kYwOTiExyE= -github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= -github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= -github.com/filecoin-project/storage-fsm v0.0.0-20200508212339-4980cb4c92b1 h1:qsiHXwP9Nz20xHzbKlR4K3hifvaTzE2K8iwsnRhHR68= -github.com/filecoin-project/storage-fsm v0.0.0-20200508212339-4980cb4c92b1/go.mod h1:3wZV/yMjSlKkP80kXnJSeYRGwsWKd08XGAVIQKvcyYQ= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/index-provider v0.8.1 h1:ggoBWvMSWR91HZQCWfv8SZjoTGNyJBwNMLuN9bJZrbU= +github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM= +github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg= +github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.15-0.20220514164640-94e0d5e123bd/go.mod h1:pjGEe3QlWtK20ju/aFRsiArbMX6Cn8rqEhhsiCM9xYE= +github.com/filecoin-project/specs-actors v0.9.15 h1:3VpKP5/KaDUHQKAMOg4s35g/syDaEBueKLws0vbsjMc= +github.com/filecoin-project/specs-actors v0.9.15/go.mod h1:pjGEe3QlWtK20ju/aFRsiArbMX6Cn8rqEhhsiCM9xYE= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= +github.com/filecoin-project/specs-actors/v3 v3.1.2 h1:Gq3gAbvdGLA/D0GKz1IJfewt9Fh7gA32TPt46Xv+1Cw= +github.com/filecoin-project/specs-actors/v3 v3.1.2/go.mod h1:uOJn+m6W8OW/1mdWMEvxeM1cjQPxmps7s1Z4bJ9V4kY= +github.com/filecoin-project/specs-actors/v4 v4.0.2 h1:VTsv30kIf1Keo8Jlu6Omco+2Ud0pG4EN5UAzyYCibh8= +github.com/filecoin-project/specs-actors/v4 v4.0.2/go.mod h1:zT0GVFxwFS93prGK0b/rMd1sePjRQKfAuodQ9DFAd6Y= +github.com/filecoin-project/specs-actors/v5 v5.0.6 h1:TLtA9hT3pHQF5vB83GmB+m6anw9u6MjdT+VVn/lyC+c= +github.com/filecoin-project/specs-actors/v5 v5.0.6/go.mod h1:myb/UGwESp0V1f1BACXSUrFgTWLvGUoG0ZZH7eqriFM= +github.com/filecoin-project/specs-actors/v6 v6.0.2 h1:K1xPRJoW5PBvb08QF9+4w1AjcnqwR6BjTmeltQFCvWo= +github.com/filecoin-project/specs-actors/v6 v6.0.2/go.mod h1:wnfVvPnYmzPZilNvSqCSSA/ZQX3rdV/U/Vf9EIoQhrI= +github.com/filecoin-project/specs-actors/v7 v7.0.1 h1:w72xCxijK7xs1qzmJiw+WYJaVt2EPHN8oiwpA1Ay3/4= +github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk= +github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y= +github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA= +github.com/filecoin-project/specs-storage v0.4.1 h1:yvLEaLZj8f+uByhNC4mFOtCUyL2wQku+NGBp6hjTe9M= +github.com/filecoin-project/specs-storage v0.4.1/go.mod h1:Z2eK6uMwAOSLjek6+sy0jNV2DSsMEENziMUz0GHRFBw= +github.com/filecoin-project/storetheindex v0.4.17 h1:w0dVc954TGPukoVbidlYvn9Xt+wVhk5vBvrqeJiRo8I= +github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= +github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= +github.com/filecoin-project/venus-auth v1.9.0 h1:GH0o/jPdF55/U/uLoMzrqR9+DOsMf5oWM/X4UPuyWPA= +github.com/filecoin-project/venus-auth v1.9.0/go.mod h1:Ckj8F/iuSgXnCb9LvH0IiPR7swJZQAhabDOxVycLGWs= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fxamacker/cbor/v2 v2.2.0 h1:6eXqdDDe588rSYAi1HfZKbx6YYQO4mxQ9eC6xYpU/JQ= -github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gbrlsnchs/jwt/v3 v3.0.1 h1:lbUmgAKpxnClrKloyIwpxm4OuWeDl5wLk52G91ODPw4= +github.com/gbrlsnchs/jwt/v3 v3.0.1/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM= +github.com/getkin/kin-openapi v0.13.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= +github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db h1:GYXWx7Vr3+zv833u+8IoXbNnQY0AdXsxAgI0kX7xcwA= -github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-delve/delve v1.5.0/go.mod h1:c6b3a1Gry6x8a4LGCe/CWzrocrfaHvkUxCj3k4bvSUQ= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-lintpack/lintpack v0.5.2 h1:DI5mA3+eKdWeJ40nU4d6Wc26qmdG8RCi/btYq0TuRN0= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis/v7 v7.0.0-beta h1:sm826nuE9AVZl06YSag54VTSpbGdIUMXCXXOHh48nFU= +github.com/go-redis/redis/v7 v7.0.0-beta/go.mod h1:dohSoK1cSNPaisjbZhSk7RYyPhVx2k+4sAbJdPK5KPs= +github.com/go-redis/redis_rate/v7 v7.0.1 h1:qpJUfKFkEF2zQSD1GnlC3oeZMd+E7ym55HU49BZKqbY= +github.com/go-redis/redis_rate/v7 v7.0.1/go.mod h1:IWxoSa694TQvppZ53Y5yZtqSfHKflOx+xtSw1TsSoT4= +github.com/go-resty/resty/v2 v2.4.0 h1:s6TItTLejEI+2mn98oijC5w/Rk2YU+OA6x0mnZN6r6k= +github.com/go-resty/resty/v2 v2.4.0/go.mod h1:B88+xCTEwvfD94NOuE6GS1wMlnoKNY8eEiNizfNwOwA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6 h1:aTBUNRTatDDU24gbOEKEoLiDwxtc98ga6K/iMTm6fvs= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086 h1:EIMuvbE9fbtQtimdLe5yeXjuC5CeKbQt8zH6GwtIrhM= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30 h1:zRJPftZJNLPDiOtvYbFRwjSbaJAcVOf80TeEmWGe2kQ= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8 h1:vVouagbdmqTVlCIAxpyYsNNTbkKZ3V66VpKOLU/s6W4= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2XQaA= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b h1:ekuhfTjngPhisSjOJ0QWKpPQE8/rbknHaes6WVJj5Hw= -github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 h1:YYWNAGTKWhKpcLLt7aSj/odlKrSrelQwlovBpDuf19w= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 h1:pe9JHs3cHHDQgOFXJJdYkK6fLz2PWyYtP4hthoCMvs8= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70uaOiMbNnluTg7gyQhtGqLQncQh+4J8= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.21.0 h1:HxAxpR8Z0M8omihvQdsD3PF0qPjlqYqp2vMJzstoKeI= -github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-dap v0.2.0/go.mod h1:5q8aYQFnHOAZEMP+6vmq25HKYAEwE+LF5yh7JKrrhSQ= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= -github.com/gxed/go-shellwords v1.0.3 h1:2TP32H4TAklZUdz84oj95BJhVnIrRasyx2j1cqH5K38= -github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3vH7VqgtMxQ= -github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= -github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099 h1:vQqOW42RRM5LoM/1K5dK940VipLqpH8lEVGrMz+mNjU= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= +github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c h1:aY2hhxLhjEAbfXOx2nRJxCXezC6CO2V/yN+OCr1srtk= +github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ipfs/bbloom v0.0.1 h1:s7KkiBPfxCeDVo47KySjK0ACPc5GJRUxFpdyWEuDjhw= +github.com/influxdata/influxdb-client-go/v2 v2.2.2 h1:O0CGIuIwQafvAxttAJ/VqMKfbWWn2Mt8rbOmaM2Zj4w= +github.com/influxdata/influxdb-client-go/v2 v2.2.2/go.mod h1:fa/d1lAdUHxuc1jedx30ZfNG573oQTQmUni3N6pcW+0= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/ipfs-force-community/go-jsonrpc v0.1.4-0.20210731021807-68e5207079bc h1:L4JH2Ltl/Embq4qYezs3RsIYW1BB/fB9TfUkk42FOzU= +github.com/ipfs-force-community/go-jsonrpc v0.1.4-0.20210731021807-68e5207079bc/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/ipfs-force-community/metrics v1.0.1-0.20211022060227-11142a08b729 h1:elS3KmzAMVrcZpmP2RMEjs9Zlwh6LfhJTfYQdj4TREs= +github.com/ipfs-force-community/metrics v1.0.1-0.20211022060227-11142a08b729/go.mod h1:mn40SioMuKtjmRumHFy/fJ26Pn028XuDjUJE9dorjyw= github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= +github.com/ipfs/go-bitfield v1.0.0 h1:y/XHm2GEmD9wKngheWNNCNL0pzrWXZwCdQGv1ikXknQ= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8 h1:38X1mKXkiU6Nzw4TOSWD8eTVY5eX3slQunv3QEWfXKg= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.2.8 h1:5tQrbyyRS3DkzvcM5n+bVjdSAHLgvH7D+1LopndhUII= -github.com/ipfs/go-bitswap v0.2.8/go.mod h1:2Yjog0GMdH8+AsxkE0DI9D2mANaUTxbVVav0pPoZoug= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= +github.com/ipfs/go-bitswap v0.10.2 h1:B81RIwkTnIvSYT1ZCzxjYTeF0Ek88xa9r1AMpTfk+9Q= +github.com/ipfs/go-bitswap v0.10.2/go.mod h1:+fZEvycxviZ7c+5KlKwTzLm0M28g2ukCPqiuLfJk4KA= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= +github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= +github.com/ipfs/go-blockservice v0.4.0 h1:7MUijAW5SqdsqEW/EhnNFRJXVF8mGU5aGhZ3CQaCWbY= +github.com/ipfs/go-blockservice v0.4.0/go.mod h1:kRjO3wlGW9mS1aKuiCeGhx9K1DagQ10ACpVO59qgAx4= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= -github.com/ipfs/go-cid v0.0.4 h1:UlfXKrZx1DjZoBhQHmNHLC1fK1dUJDN20Y28A7s+gJ8= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-datastore v0.0.1 h1:AW/KZCScnBWlSb5JbnEnLKFWXL224LBEh/9KXXOrUms= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= +github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= +github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5 h1:q3OfiOZV5rlsK1H5V8benjeUApRfMGs4Mrhmr6NriQo= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.0 h1:TOxI04l8CmO4zGtesENhzm4PwkFwJXY3rKiYaaMf9fI= -github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1 h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2 h1:7ToQt7QByBhOTuZF2USMv+PGlMcBC7FW7FdgQ4FCsoo= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5 h1:dxKuqw5T1Jm8OuV+lchA76H9QZFyPKZeLuT6bN42hJQ= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger2 v0.0.0-20200211201106-609c9d2a39c7 h1:2P493YpV0SsG9c0btHfZt9eZCO+tzLAelQyrwQQcey0= -github.com/ipfs/go-ds-badger2 v0.0.0-20200211201106-609c9d2a39c7/go.mod h1:d/QTAGj3T4lF4CuFpywNnAQ0RbffuDc1BtGFAvuYWls= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-fs-lock v0.0.1 h1:XHX8uW4jQBYWHj59XXcjg7BHlHxV9ZOYs6Y43yb7/l0= -github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y= -github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103 h1:SD+bXod/pOWKJCGj0tG140ht8Us5k+3JBcHw0PVYTho= -github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e h1:bUtmeXx6JpjxRPlMdlKfPXC5kKhLHuueXKgs1Txb9ZU= -github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= -github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242/go.mod h1:kq3Pi+UP3oHhAdKexE+kHHYRKMoFNuGero0R7q3hWGg= -github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e h1:Klv6s+kbuhh0JVpGFmFK2t6AtZxJfAnVneQHh1DlFOo= -github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e/go.mod h1:giiPqWYCnRBYpNTsJ/EX1ojldX5kTXrXYckSJQ7ko9M= -github.com/ipfs/go-ipfs-blockstore v0.0.1 h1:O9n3PbmTYZoNhkgkEyrXTznbmktIXif62xLX+8dPHzc= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-filestore v1.2.0 h1:O2wg7wdibwxkEDcl7xkuQsPvJFRBVgVSsOJ/GP6z3yU= +github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= +github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= +github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= +github.com/ipfs/go-graphsync v0.13.1 h1:lWiP/WLycoPUYyj3IDEi1GJNP30kFuYOvimcfeuZyQs= +github.com/ipfs/go-graphsync v0.13.1/go.mod h1:y8e8G6CmZeL9Srvx1l15CtGiRdf3h5JdQuqPz/iYL0A= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.0 h1:V1GZorHFUIB6YgTJQdq7mcaIpUfCM3fCyVi+MTo9O88= -github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= -github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v1.0.0 h1:pmFp5sFYsYVvMOp9X01AK3s85usVcLvkBTRsN6SnfUA= -github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= +github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-cmdkit v0.0.1 h1:X6YXEAjUljTzevE6DPUKXSqcgf+4FXzcn5B957F5MXo= github.com/ipfs/go-ipfs-cmdkit v0.0.1/go.mod h1:9FtbMdUabcSqv/G4/8WCxSLxkZxn/aZEFrxxqnVcRbg= -github.com/ipfs/go-ipfs-cmds v0.0.1 h1:wPTynLMa+JImcTsPaVmrUDP8mJ3S8HQVUWixnKi7+k4= -github.com/ipfs/go-ipfs-cmds v0.0.1/go.mod h1:k7I8PptE2kCJchR3ta546LRyxl4/uBYbLQHOJM0sUQ8= +github.com/ipfs/go-ipfs-cmds v0.8.1 h1:El661DBWqdqwgz7B9xwKyUpigwqk6BBBHb5B8DfJP00= +github.com/ipfs/go-ipfs-cmds v0.8.1/go.mod h1:y0bflH6m4g6ary4HniYt98UqbrVnRxmRarzeMdLIUn0= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= -github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-files v0.0.1 h1:OroTsI58plHGX70HPLKy6LQhPR3HZJ5ip61fYlo6POM= -github.com/ipfs/go-ipfs-files v0.0.1/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.3 h1:ME+QnC3uOyla1ciRPezDW0ynQYK2ikOh9OCKAEg4uUA= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= -github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= +github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-keystore v0.0.1 h1:sE4lNCZYl7OsgZp4Nmm4TCvIN3ub5tTDfjT6lIh6Brk= -github.com/ipfs/go-ipfs-keystore v0.0.1/go.mod h1:5WjcKN1ESzCVzYKo5JvO1iYHLE0n626HL/cr3dSkqBs= +github.com/ipfs/go-ipfs-files v0.1.1 h1:/MbEowmpLo9PJTEQk16m9rKzUHjeP4KRU9nWJyJO324= +github.com/ipfs/go-ipfs-files v0.1.1/go.mod h1:8xkIrMWH+Y5P7HvJ4Yc5XWwIW2e52dyXUiC0tZyjDbM= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= -github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-routing v0.0.1 h1:394mZeTLcbM/LDO12PneBYvkZAUA+nRnmC0lAzDXKOY= -github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 h1:jIVle1vGSzxyUhseYNEqd7qcDVRrIbJ7UxGwao70cF0= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= -github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= -github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= +github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.0 h1:BW3LQIiZzpNyolt84yvKNCd3FU+AK4VDw1hnHR+1aiI= github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= -github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0= github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw= github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.9-0.20200520025255-8c45666d33d4 h1:jxy4/1nHY8atvUQf/pRhFHGFySBWQPIfLHG1ODS7R4M= -github.com/ipfs/go-log/v2 v2.0.9-0.20200520025255-8c45666d33d4/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= -github.com/ipfs/go-merkledag v0.2.3 h1:aMdkK9G1hEeNvn3VXfiEMLY0iJnbiQQUHnM0HFJREsE= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= +github.com/ipfs/go-merkledag v0.8.1 h1:N3yrqSre/ffvdwtHL4MXy0n7XH+VzN8DlzDrJySPa94= +github.com/ipfs/go-merkledag v0.8.1/go.mod h1:uYUlWE34GhbcTjGuUDEcdPzsEtOdnOupL64NgSRjmWI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= -github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= -github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1 h1:+gPjbI+V3NktXZOqJA1kzbms2pYmhjgQQal0MzZrOAY= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-todocounter v0.0.1 h1:kITWA5ZcQZfrUnDNkRn04Xzh0YFaDFXsoO2A81Eb6Lw= -github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= -github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= -github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfsnode v1.4.0 h1:9BUxHBXrbNi8mWHc6j+5C580WJqtVw9uoeEKn4tMhwA= +github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipfs/iptb v1.3.8-0.20190401234037-98ccf4228a73 h1:aVEkLO+VpBjWcEh6XuhRus91Pd2Wj4p6cgcq/gS0er8= -github.com/ipfs/iptb v1.3.8-0.20190401234037-98ccf4228a73/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= -github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339 h1:YEYaf6mrrjoTfGpi7MajslcGvhP23Sh0b3ubcGYRMw0= -github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339/go.mod h1:eajxljm6I8o3LitnFeVEmucwZmz7+yLSiKce9yYMefg= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= +github.com/ipld/go-car v0.4.0 h1:U6W7F1aKF/OJMHovnOVdst2cpQE5GhmHibQkAixgNcQ= +github.com/ipld/go-car v0.4.0/go.mod h1:Uslcn4O9cBKK9wqHm/cLTFacg6RAPv6LZx2mxd2Ypl4= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.4.1 h1:9S+FYbQzQJ/XzsdiOV13W5Iu/i+gUnr6csbSD9laFEg= +github.com/ipld/go-car/v2 v2.4.1/go.mod h1:zjpRf0Jew9gHqSvjsKVyoq9OY9SWoEKdYCQUKVaaPT0= +github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= +github.com/ipld/go-codec-dagpb v1.3.2 h1:MZQUIjanHXXfDuYmtWYT8nFbqfFsZuyHClj6VDmSXr4= +github.com/ipld/go-codec-dagpb v1.3.2/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= +github.com/ipld/go-ipld-prime v0.17.0 h1:+U2peiA3aQsE7mrXjD2nYZaZrCcakoz2Wge8K42Ld8g= +github.com/ipld/go-ipld-prime v0.17.0/go.mod h1:aYcKm5TIvGfY8P3QBKz/2gKcLxzJ1zDaD+o0bOowhgs= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= -github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec h1:DQqZhhDvrTrEQ3Qod5yfavcA064e53xlQ+xajiorXgM= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A= github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= -github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI= +github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3 h1:Iy7Ifq2ysilWU4QlCx/97OoI4xT1IV7i8byT/EyIT/M= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW06AlUGT5jnpj6nqQSILebcsikSjA= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-buffer-pool v0.0.1 h1:9Rrn/H46cXjaA2HQ5Y8lyhOS1NhTkZ4yuEs2r3Eechg= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= -github.com/libp2p/go-conn-security v0.0.1 h1:4kMMrqrt9EUNCNjX1xagSJC+bq16uqjMe9lk1KBMVNs= -github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= -github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= -github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-eventbus v0.1.0 h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ= +github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-flow-metrics v0.0.1 h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s= +github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.0 h1:EFArryT9N7AVA70LCcOh8zxsW+FeDnxwcpWQx9k7+GM= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= -github.com/libp2p/go-libp2p v0.6.1 h1:mxabyJf4l6AmotDOKObwSfBNBWjL5VYXysVFLUMAuB8= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1 h1:6AK178W4GmfGxV+L51bd54/fSWEjNR+S0DO0odk/CwI= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= -github.com/libp2p/go-libp2p-autonat v0.1.0 h1:aCWAu43Ri4nU0ZPO7NyLzUvvfqd0nE3dX0R/ZGYVgOU= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= +github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw= +github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2 h1:4dlgcEEugTFWSvdG2UIFxhnOMpX76QaZSRAtXmYB8n4= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat-svc v0.1.0 h1:28IM7iWMDclZeVkpiFQaWVANwXwE7zLlpbnS7yXxrfs= -github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= -github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4 h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= +github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf/Kr6hZJj8= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1 h1:BDiBcQxX/ZJJ/yDl3sqZt1bjj4PkZCEi7IEpwxXr13k= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= +github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -681,721 +1007,1023 @@ github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7O github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.0 h1:FBQ1fpq2Fo/ClyjojVJ5AKXlKhvNc/B6U0O+7AN1ffE= github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1 h1:6Cu7WljPQtGY2krBlMoD8L/zH3tMUsCbqNFH7cZwCoI= github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-crypto v0.0.1 h1:JNQd8CmoGTohO/akqrH16ewsqZpci2CbgYH/LmYl8gw= -github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= -github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= -github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= +github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= +github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= +github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= -github.com/libp2p/go-libp2p-discovery v0.1.0 h1:j+R6cokKcGbnZLf4kcNwpx6mDEUPF3N6SrqMymQhmvs= github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0 h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0 h1:+JnYBRLzZQtRq0mK3xhyjBwHytLmJXMTZkQfbw+UrGA= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-host v0.0.1 h1:dnqusU+DheGcdxrE718kG4XgHNuL2n9eEv8Rg5zy8hQ= -github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= -github.com/libp2p/go-libp2p-host v0.0.3 h1:BB/1Z+4X0rjKP5lbQTmjEjLbDVbrcmLOlA6QDsN5/j4= -github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.1 h1:Q9EkNSLAOF+u90L88qmE9z/fTdjLh8OsJwGw74mkwk4= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.5 h1:KG/KNYL2tYzXAfMvQN5K1aAGTYSYUMJ1prgYa2/JI1E= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-pnet v0.0.1 h1:7GnzRrBTJHEsofi1ahFdPN9Si6skwXQE9UqR2S+Pkh8= -github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= -github.com/libp2p/go-libp2p-kad-dht v0.1.1 h1:IH6NQuoUv5w5e1O8Jc3KyVDtr0rNd0G9aaADpLI1xVo= -github.com/libp2p/go-libp2p-kad-dht v0.1.1/go.mod h1:1kj2Rk5pX3/0RwqMm9AMNCT7DzcMHYhgDN5VTi+cY0M= -github.com/libp2p/go-libp2p-kbucket v0.2.0 h1:FB2a0VkOTNGTP5gu/I444u4WabNM9V1zCkQcWb7zajI= -github.com/libp2p/go-libp2p-kbucket v0.2.0/go.mod h1:JNymBToym3QXKBMKGy3m29+xprg0EVr/GJFHxFEdgh8= -github.com/libp2p/go-libp2p-loggables v0.0.1 h1:HVww9oAnINIxbt69LJNkxD8lnbfgteXR97Xm4p3l9ps= -github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= -github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= +github.com/libp2p/go-libp2p-kad-dht v0.18.0 h1:akqO3gPMwixR7qFSFq70ezRun97g5hrA/lBW9jrjUYM= +github.com/libp2p/go-libp2p-kad-dht v0.18.0/go.mod h1:Gb92MYIPm3K2pJLGn8wl0m8wiKDvHrYpg+rOd0GzzPA= +github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= +github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-metrics v0.0.1 h1:yumdPC/P2VzINdmcKZd0pciSUCpou+s0lwYCjBbzQZU= -github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= -github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2 h1:+Ld7YDAfVERQ0E+qqjE7o6fHwKuM0SqTzYiwN1lVVSA= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3 h1:2zijwaJvpdesST2MXpI5w9wWFRgYtMcpRX7rrw0jmOo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-nat v0.0.4 h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5 h1:/mH8pXFVKleflDL1YwqMg27W9GD8kjEx7NY0P6eGc98= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= -github.com/libp2p/go-libp2p-net v0.0.1 h1:xJ4Vh4yKF/XKb8fd1Ev0ebAGzVjMxXzrxG2kjtU+F5Q= -github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-net v0.0.2 h1:qP06u4TYXfl7uW/hzqPhlVVTSA2nw1B/bHBJaUnbh6M= -github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-netutil v0.0.1 h1:LgD6+skofkOx8z6odD9+MZHKjupv3ng1u6KRhaADTnA= -github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= -github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-peer v0.0.1 h1:0qwAOljzYewINrU+Kndoc+1jAL7vzY/oY2Go4DCGfyY= -github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= -github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= -github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.0.1 h1:twKovq8YK5trLrd3nB7PD2Zu9JcyAIdm7Bz9yBWjhq8= -github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= -github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.2.0 h1:XcgJhI8WyUOCbHyRLNEX5542YNj8hnLSJ2G1InRjDhk= github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2 h1:iqc/m03jHn5doXN3+kS6JKvqQRHEltiXljQB85iVHWE= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= +github.com/libp2p/go-libp2p-peerstore v0.8.0 h1:bzTG693TA1Ju/zKmUCQzDLSqiJnyRFVwPpuloZ/OZtI= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-protocol v0.0.1 h1:+zkEmZ2yFDi5adpVE3t9dqh/N9TbpFWywowzeEzBbLM= -github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= -github.com/libp2p/go-libp2p-protocol v0.1.0 h1:HdqhEyhg0ToCaxgMhnOmUO8snQtt/kQlcjVk3UoJU3c= -github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= -github.com/libp2p/go-libp2p-pubsub v0.2.6 h1:ypZaukCFrtD8cNeeb9nnWG4MD2Y1T0p22aQ+f7FKJig= -github.com/libp2p/go-libp2p-pubsub v0.2.6/go.mod h1:5jEp7R3ItQ0pgcEMrPZYE9DQTg/H3CTc7Mu1j2G4Y5o= -github.com/libp2p/go-libp2p-record v0.0.1 h1:zN7AS3X46qmwsw5JLxdDuI43cH5UYwovKxHPjKBYQxw= -github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= -github.com/libp2p/go-libp2p-record v0.1.0 h1:wHwBGbFzymoIl69BpgwIu0O6ta3TXGcMPvHUAcodzRc= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= +github.com/libp2p/go-libp2p-pubsub v0.8.0 h1:KygfDpaa9AeUPGCVcpVenpXNFauDn+5kBYu3EjcL3Tg= +github.com/libp2p/go-libp2p-pubsub v0.8.0/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= -github.com/libp2p/go-libp2p-record v0.1.1 h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY= -github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= -github.com/libp2p/go-libp2p-routing v0.0.1 h1:hPMAWktf9rYi3ME4MG48qE7dq1ofJxiQbfdvpNntjhc= -github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing v0.1.0 h1:hFnj3WR3E2tOcKaGpyzfP4gvFZ3t8JkQmbapN0Ct+oU= -github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= -github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0 h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2 h1:rLLPvShPQAcY6eNurKNZq3eZjPWfU9kXF2eI9jIYdrg= github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2 h1:T4hUpgEs2r371PweU3DuH7EOmBIdTBCwWs+FLcgx3bQ= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3 h1:uVkCb8Blfg7HQ/f30TyHn1g/uCwXsAET7pU0U59gx/A= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4 h1:Qev57UR47GcLPXWjrunv5aLIQGO4n9mhI/8/EIrEEFc= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0 h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= -github.com/libp2p/go-libp2p-transport v0.0.5 h1:pV6+UlRxyDpASSGD+60vMvdifSCby6JkJDfi+yUMHac= -github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= -github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0 h1:5EhPgQhXZNyfL22ERZTUoVp9UVVbNowWNVtELQaKCHk= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= -github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1 h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI= github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2 h1:eGvbqWqWY9S5lrpe2gA0UCOLCdzCgYSAR3vo/xCsNQg= github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5 h1:MuyItOqz03oi8npvjgMJxgnhllJLZnO/dKVOpTZ9+XI= github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7 h1:vzKu0NVtxvEIDGCv6mjKRcK0gipSgaXmJZ6jFv0d/dk= github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-maddr-filter v0.0.1 h1:apvYTg0aIxxQyBX+XHKOR+0+lYhGs1Yv+JmTH9nyl5I= -github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0= github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1 h1:huPH/GGRJzmsHR9IZJJsrSwIM5YE2gL4ssgl1YWb/ps= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-msgio v0.0.2 h1:ivPvEKHxmVkTClHzg6RXTYHqaJQ0V9cDbq+6lKb3UV0= +github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= +github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= +github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4 h1:KbizNnq8YIf7+Hn7+VFL/xE0eDrkPru2zIO9NMwL8UQ= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-netroute v0.1.2 h1:UHhB35chwgvcRI392znJA3RCBtZ3MpE3ahNCN5MR4Xg= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= +github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-sockaddr v0.0.2 h1:tCuXfpA9rq7llM/v834RKc/Xvovy/AqM9kHvTV/jY/Q= +github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-stream-muxer v0.0.1 h1:Ce6e2Pyu+b5MC1k3eeFtAax0pW4gc6MosYSLV05UeLw= +github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer v0.1.0 h1:3ToDXUzx8pDC6RfuOzGsUYP5roMDthbUKRdMRRhqAqY= -github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= -github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= -github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= -github.com/libp2p/go-tcp-transport v0.1.0 h1:IGhowvEqyMFknOar4FWCKSWE0zL36UFKQtiRQD60/8o= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1 h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWciG5LyYAPo= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-testutil v0.0.1 h1:Xg+O0G2HIMfHqBOBDcMS1iSZJ3GEcId4qOxCQvsGZHk= -github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= -github.com/libp2p/go-testutil v0.1.0 h1:4QhjaWGO89udplblLVpgGDOQjzFlRavZOjuEnz2rLMc= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= -github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= -github.com/libp2p/go-ws-transport v0.1.0 h1:F+0OvvdmPTDsVc4AjPHjV7L7Pk1B7D5QwtDcKE2oag4= github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0 h1:MJCw2OrPA9+76YNRvdo1wMnSOxb9Bivj6sVFY1Xrj6w= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0 h1:mjo6pL5aVR9rCjl9wNq3DupbaQlyR61pzoOT2MdtxaA= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= +github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0 h1:FsYzT16Wq2XqUGJsBbOxoz9g+dFklvNi7jN6YFPfl7U= github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3 h1:mWuzZRCAeTBFdynLlsYgA/EIeMOLr8XY04wa52NRhsE= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5 h1:ibuz4naPAully0pN6J/kmUARiqLpnDQIzI/8GCOrljg= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= +github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= +github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls= +github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.0.0-20170327083344-ded68f7a9561/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= +github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.0 h1:U41/2erhAKcmSI14xh/ZTUdBPOzDOIfS93ibzUSl8KM= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mmcloughlin/avo v0.0.0-20201105074841-5d2f697d268f/go.mod h1:6aKT4zZIrpGqB3RpFU14ByCSSyKY6LfJz4J/JJChHfI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2 h1:RBysRCv5rv3FWlhKWKoXv8tnsCUpEpIZpCmqAGZos2s= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.1 h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1 h1:SgG/cw5vqyB5QQe5FPe2TqggU9WtrA9X4nZw7LlVqOI= github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= +github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= +github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= +github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2 h1:/Bbsgsy3R6e3jf2qBahzNHzww6usYaZ0NhNH3sqdFS8= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= -github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0 h1:ZepO8Ezwovd+7b5XPPDhQhayk1yt0AJpzQBpq9fejx4= github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1 h1:jFFKUuXTXv+3ARyHZi3XUqQO+YWMKgBdhEvuGRfnL6s= github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2 h1:P7zcBH9FRETdPkDrylcXVjQLQ2t1JQtNItZULWNWgeg= github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3 h1:q/IYAvoPKuRzGeERn3uacWgm0LIWkLZBAvO5DxSzq3g= github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4 h1:g6gwydsfADqFvrHoMkS0n9Ok9CG6F7ytOH/bJDkhIOY= github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= -github.com/multiformats/go-multihash v0.0.8 h1:wrYcW5yxSi3dU07n5jnuS5PrNwyHy0zRHGVoUugWvXg= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multistream v0.0.1 h1:JV4VfSdY9n7ECTtY59/TlSyFCzRILvYx4T4Ws8ZgihU= -github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= +github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= +github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2 h1:6sUvyh2YHpJCb8RZ6eYzj6iJQ4+chWYmyIHxszqlPTA= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= -github.com/nikkolasg/slog v0.0.0-20170921200349-3c8d441d7a1e h1:07zdEcJ4Fble5uWsqKpjW19699kQWRLXP+RZh1a6ZRg= -github.com/nikkolasg/slog v0.0.0-20170921200349-3c8d441d7a1e/go.mod h1:79GLCU4P87rYvYYACbNwVyc1WmRvkwQbYnybpCmRXzg= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df h1:vdYtBU6zvL7v+Tr+0xFM/qhahw/EvY8DMMunZHKH6eE= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/otiai10/copy v1.0.2 h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc= -github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/mint v1.3.0 h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/peterh/liner v0.0.0-20170317030525-88609521dc4b/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14 h1:2m16U/rLwVaRdz7ANkHtHTodP3zTP3N451MADg64x5k= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= +github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d h1:BzRvVq1EHuIjxpijCEKpAxzKUUMurOQ4sknehIATRh8= -github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= -github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= +github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smola/gocompat v0.2.0 h1:6b1oIMlUXIpz//VKEDzPVBK8KG7beVwmHIUEBIs/Pns= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/go-diff v0.5.1 h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= -github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v0.0.0-20170417170307-b6cb39589372/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170417173400-9e4c21054fa1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/src-d/envconfig v1.0.0 h1:/AJi6DtjFhZKNx3OB2qMsq7y4yT5//AeSZIe7rk+PX8= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e h1:mOtuXaRAbVZsxAHVdPR3IjfmN8T1h2iczJLynhLybf8= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= +github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e h1:T5PdfK/M1xyrHwynxMIVMWLS7f/qHwfslZphxtGnw7s= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/twitchyliquid64/golang-asm v0.15.0/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.2 h1:Av96YVBwwNSe4MLR7iI/BIa3VyI7/djnto/pK3Uxbdo= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4= github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517 h1:ChMKTho2hWKpks/nD/FL2KqM1wuVt62oJeiE8+eFpGs= -github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4= +github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= -github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.10.0 h1:E86YlUMYfwIacEsQGlnTvjk1IgYkyTGjPhF0RnwTCmw= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= -github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= -github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0 h1:efb/4CnrubzNGqQOeHErxyQ6rIsJb7GcgeSDF7fqWeI= +github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5 h1:EYxr08r8x6r/5fLEAMMkida1BVgxVXE4LfZv/XV+znU= +github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= +github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0 h1:dmdwCOVtJAm7qwONARangN4jgCisVFmSJ486JZ1LYaA= -github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20220514204315-f29c37e9c44c h1:6VPKXBDRt7mDUyiHx9X8ROnPYFDf3L7OfEuKCI5dZDI= +github.com/whyrusleeping/cbor-gen v0.0.0-20220514204315-f29c37e9c44c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/go-logging v0.0.1 h1:fwpzlmT0kRC/Fmd0MdmGgJG/CXIZ6gFq46FQZjprUcc= github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 h1:ctS9Anw/KozviCCtK6VWMz5kPL9nbQzbQY4yfqlIV4M= github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1/go.mod h1:tKH72zYNt/exx6/5IQO6L9LoQ0rEjd5SbbWaDTs9Zso= -github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xorcare/golden v0.6.0 h1:E8emU8bhyMIEpYmgekkTUaw4vtcrRE+Wa0c5wYIcgXc= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= -go.dedis.ch/kyber/v3 v3.0.9 h1:i0ZbOQocHUjfFasBiUql5zVeC7u/vahFd96DFA8UOWk= go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.starlark.net v0.0.0-20190702223751-32f345186213/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go4.org v0.0.0-20190218023631-ce4c26f7be8e h1:m9LfARr2VIOW0vsV19kEKp/sWQvZnGobA8JHui/XJoY= -go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= +golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200427165652-729f1e841bcc h1:ZGI/fILM2+ueot/UixBSoj9188jCAxVHEZEGhqq67I4= -golang.org/x/crypto v0.0.0-20200427165652-729f1e841bcc/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190227160552-c95aed5357e7 h1:C2F/nMkR/9sfUTpvR3QrjBuTdvMUC/cFajkphs1YLQo= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1405,157 +2033,468 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190302025703-b6889370fb10 h1:xQJI9OEiErEQ++DoXOHqEpzsGMrAv2Q2jyCpi7DmfpQ= golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200427175716-29b57079015a h1:08u6b1caTT9MQY4wSbmsd4Ulm6DmgNYnbImBuZjGJow= -golang.org/x/sys v0.0.0-20200427175716-29b57079015a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361 h1:RIIXAeV6GvDBuADKumTODatUqANFZ+5BPMnzsy4hulY= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 h1:OXjomkWHhzUx4+HldlJ2TsMxJdWgEo5CTtspD1wdhdk= -golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201105001634-bc3cf281b174/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.81.0 h1:o8WF5AvfidafWbFjsRyupxyEQJNUWxLZJCK5NXrxZZ8= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20200406120821-33397c535dc2 h1:KlOjjpQjL4dqscfbhtQvAnRMm5PaRTchHHczffkUiq0= -google.golang.org/genproto v0.0.0-20200406120821-33397c535dc2/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= +gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1 h1:heWvX7J6qbGWbeFS/aRmiy1eYaT+QMV6wNvHDyMjQV4= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v2 v2.0.0-20180128182452-d3ae77c26ac8 h1:Ggy3mWN4l3PUFPfSG0YB3n5fVYggzysUmiUQ89SnX6Y= -gopkg.in/urfave/cli.v2 v2.0.0-20180128182452-d3ae77c26ac8/go.mod h1:cKXr3E0k4aosgycml1b5z33BVV6hai1Kh7uDgFOkbcs= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/mysql v1.1.1 h1:yr1bpyqiwuSPJ4aGGUX9nu46RHXlF8RASQVb1QQNcvo= +gorm.io/driver/mysql v1.1.1/go.mod h1:KdrTanmfLPPyAOeYGyG+UpDys7/7eeWT1zCq+oekYnU= +gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= +gorm.io/gorm v1.21.12 h1:3fQM0Eiz7jcJEhPggHEpoYnsGZqynMzverL77DV40RM= +gorm.io/gorm v1.21.12/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= -howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f h1:Cq7MalBHYACRd6EesksG1Q8EoIAKOsiZviGKbOLIej4= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/internal/app/go-filecoin/connectors/common.go b/internal/app/go-filecoin/connectors/common.go deleted file mode 100644 index a913a31d1c..0000000000 --- a/internal/app/go-filecoin/connectors/common.go +++ /dev/null @@ -1,35 +0,0 @@ -package connectors - -import ( - "github.com/filecoin-project/specs-actors/actors/abi" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -type chainState interface { - GetTipSet(key block.TipSetKey) (block.TipSet, error) - Head() block.TipSetKey -} - -func GetChainHead(m chainState) (tipSetToken []byte, tipSetEpoch abi.ChainEpoch, err error) { - tsk := m.Head() - - ts, err := m.GetTipSet(tsk) - if err != nil { - return nil, 0, xerrors.Errorf("failed to get tip: %w", err) - } - - h, err := ts.Height() - if err != nil { - return nil, 0, err - } - - tok, err := encoding.Encode(tsk) - if err != nil { - return nil, 0, xerrors.Errorf("failed to marshal TipSetKey to CBOR byte slice for TipSetToken: %w", err) - } - - return tok, h, nil -} diff --git a/internal/app/go-filecoin/connectors/fsm_chain/chain_connector.go b/internal/app/go-filecoin/connectors/fsm_chain/chain_connector.go deleted file mode 100644 index 15b21f9b64..0000000000 --- a/internal/app/go-filecoin/connectors/fsm_chain/chain_connector.go +++ /dev/null @@ -1,42 +0,0 @@ -package fsmchain - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - fsm "github.com/filecoin-project/storage-fsm" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -// ChainConnector uses the chain store to provide a ChainHead method -type ChainConnector struct { - chainStore *chain.Store -} - -var _ fsm.Chain = new(ChainConnector) - -func NewChainConnector(chainStore *chain.Store) ChainConnector { - return ChainConnector{chainStore: chainStore} -} - -func (a *ChainConnector) ChainHead(ctx context.Context) (fsm.TipSetToken, abi.ChainEpoch, error) { - // TODO: use the provided context - ts, err := a.chainStore.GetTipSet(a.chainStore.GetHead()) - if err != nil { - return nil, 0, err - } - - tok, err := encoding.Encode(ts.Key()) - if err != nil { - return nil, 0, err - } - - height, err := ts.Height() - if err != nil { - return nil, 0, err - } - - return tok, height, err -} diff --git a/internal/app/go-filecoin/connectors/fsm_events/connector.go b/internal/app/go-filecoin/connectors/fsm_events/connector.go deleted file mode 100644 index de18e88395..0000000000 --- a/internal/app/go-filecoin/connectors/fsm_events/connector.go +++ /dev/null @@ -1,85 +0,0 @@ -package fsmeventsconnector - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - fsm "github.com/filecoin-project/storage-fsm" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsampler" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -var log = logging.Logger("fsm_events") // nolint: deadcode - -type FiniteStateMachineEventsConnector struct { - scheduler *chainsampler.HeightThresholdScheduler - tsp chain.TipSetProvider -} - -var _ fsm.Events = new(FiniteStateMachineEventsConnector) - -func New(scheduler *chainsampler.HeightThresholdScheduler, tsp chain.TipSetProvider) FiniteStateMachineEventsConnector { - return FiniteStateMachineEventsConnector{ - scheduler: scheduler, - tsp: tsp, - } -} - -func (f FiniteStateMachineEventsConnector) ChainAt(hnd fsm.HeightHandler, rev fsm.RevertHandler, confidence int, h abi.ChainEpoch) error { - // wait for an epoch past the target that gives us some confidence it won't reorg - l := f.scheduler.AddListener(h + abi.ChainEpoch(confidence)) - - ctx := context.Background() - - go func() { - var handledToken fsm.TipSetToken - for { - select { - case <-l.DoneCh: - return - case err := <-l.ErrCh: - log.Warn(err) - return - case tsk := <-l.HitCh: - ts, err := f.tsp.GetTipSet(tsk) - if err != nil { - log.Error(err) - return - } - - targetTipset, err := chain.FindTipsetAtEpoch(ctx, ts, h, f.tsp) - if err != nil { - log.Error(err) - return - } - - handledToken, err := encoding.Encode(targetTipset.Key()) - if err != nil { - log.Error(err) - return - } - - sampleHeight, err := targetTipset.Height() - if err != nil { - log.Error(err) - return - } - err = hnd(ctx, handledToken, sampleHeight) - if err != nil { - log.Error(err) - return - } - case <-l.InvalidCh: - err := rev(ctx, handledToken) - if err != nil { - log.Error(err) - return - } - } - } - }() - return nil -} diff --git a/internal/app/go-filecoin/connectors/fsm_node/connector.go b/internal/app/go-filecoin/connectors/fsm_node/connector.go deleted file mode 100644 index 3479d7499e..0000000000 --- a/internal/app/go-filecoin/connectors/fsm_node/connector.go +++ /dev/null @@ -1,279 +0,0 @@ -package fsmnodeconnector - -import ( - "bytes" - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/crypto" - fsm "github.com/filecoin-project/storage-fsm" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -type FiniteStateMachineNodeConnector struct { - minerAddr address.Address - waiter *msg.Waiter - chain *chain.Store - chainState *cst.ChainStateReadWriter - stateViewer *appstate.TipSetStateViewer - outbox *message.Outbox -} - -var _ fsm.SealingAPI = new(FiniteStateMachineNodeConnector) - -func New(minerAddr address.Address, waiter *msg.Waiter, chain *chain.Store, viewer *appstate.TipSetStateViewer, outbox *message.Outbox, chainState *cst.ChainStateReadWriter) *FiniteStateMachineNodeConnector { - return &FiniteStateMachineNodeConnector{ - minerAddr: minerAddr, - chain: chain, - chainState: chainState, - outbox: outbox, - stateViewer: viewer, - waiter: waiter, - } -} - -func (f *FiniteStateMachineNodeConnector) StateWaitMsg(ctx context.Context, mcid cid.Cid) (fsm.MsgLookup, error) { - var lookup fsm.MsgLookup - err := f.waiter.Wait(ctx, mcid, msg.DefaultMessageWaitLookback, func(blk *block.Block, message *types.SignedMessage, r *vm.MessageReceipt) error { - lookup.Height = blk.Height - receipt := fsm.MessageReceipt{ - ExitCode: r.ExitCode, - Return: r.ReturnValue, - GasUsed: int64(r.GasUsed), - } - lookup.Receipt = receipt - - // find tip set key at block height - tsHead, err := f.chain.GetTipSet(f.chain.GetHead()) - if err != nil { - return err - } - tsAtHeight, err := chain.FindTipsetAtEpoch(ctx, tsHead, blk.Height, f.chain) - if err != nil { - return err - } - - tsk := tsAtHeight.Key() - token, err := encoding.Encode(tsk) - if err != nil { - return err - } - - lookup.TipSetTok = token - return nil - }) - if err != nil { - return fsm.MsgLookup{}, err - } - - return lookup, err -} - -func (f *FiniteStateMachineNodeConnector) StateComputeDataCommitment(ctx context.Context, _ address.Address, sectorType abi.RegisteredProof, deals []abi.DealID, tok fsm.TipSetToken) (cid.Cid, error) { - view, err := f.stateViewForToken(tok) - if err != nil { - return cid.Undef, err - } - - return view.MarketComputeDataCommitment(ctx, sectorType, deals) -} - -func (f *FiniteStateMachineNodeConnector) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok fsm.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { - view, err := f.stateViewForToken(tok) - if err != nil { - return nil, err - } - - info, found, err := view.MinerGetPrecommittedSector(ctx, maddr, sectorNumber) - if err != nil { - return nil, err - } - - if !found { - return nil, fmt.Errorf("Could not find pre-committed sector for miner %s", maddr.String()) - } - - return info, nil -} - -func (f *FiniteStateMachineNodeConnector) StateMinerSectorSize(ctx context.Context, maddr address.Address, tok fsm.TipSetToken) (abi.SectorSize, error) { - view, err := f.stateViewForToken(tok) - if err != nil { - return 0, err - } - - conf, err := view.MinerSectorConfiguration(ctx, maddr) - if err != nil { - return 0, err - } - return conf.SectorSize, err -} - -func (f *FiniteStateMachineNodeConnector) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok fsm.TipSetToken) (address.Address, error) { - view, err := f.stateViewForToken(tok) - if err != nil { - return address.Undef, err - } - - _, worker, err := view.MinerControlAddresses(ctx, maddr) - return worker, err -} - -func (f *FiniteStateMachineNodeConnector) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok fsm.TipSetToken) (market.DealProposal, market.DealState, error) { - view, err := f.stateViewForToken(tok) - if err != nil { - return market.DealProposal{}, market.DealState{}, err - } - - deal, err := view.MarketDealProposal(ctx, dealID) - if err != nil { - return market.DealProposal{}, market.DealState{}, err - } - - state, found, err := view.MarketDealState(ctx, dealID) - if err != nil { - return market.DealProposal{}, market.DealState{}, err - } else if !found { - // The FSM actually ignores this value because it calls this before the sector is committed. - // But it can't tolerate returning an error here for not found. - // See https://github.com/filecoin-project/storage-fsm/issues/18 - state = &market.DealState{ - SectorStartEpoch: -1, - LastUpdatedEpoch: -1, - SlashEpoch: -1, - } - } - - return deal, *state, err -} - -func (f *FiniteStateMachineNodeConnector) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok fsm.TipSetToken) (*miner.Deadlines, error) { - var tsk block.TipSetKey - err := encoding.Decode(tok, &tsk) - if err != nil { - return nil, err - } - - view, err := f.stateViewer.StateView(tsk) - if err != nil { - return nil, err - } - - return view.MinerDeadlines(ctx, maddr) -} - -func (f *FiniteStateMachineNodeConnector) StateMinerInitialPledgeCollateral(context.Context, address.Address, abi.SectorNumber, fsm.TipSetToken) (big.Int, error) { - // The FSM uses this result to attach value equal to the collateral to the ProveCommit message sent from the - // worker account. This isn't absolutely necessary if the miner actor already has sufficient unlocked balance. - // The initial pledge requirement calculations are currently very difficult to access, so I'm returning - // zero here pending a proper implementation after cleaning up the actors. - // TODO https://github.com/filecoin-project/go-filecoin/issues/4035 - return big.Zero(), nil -} - -func (f *FiniteStateMachineNodeConnector) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, gasPrice big.Int, gasLimit int64, params []byte) (cid.Cid, error) { - mcid, cerr, err := f.outbox.SendEncoded( - ctx, - from, - to, - value, - gasPrice, - gas.Unit(gasLimit), - true, - method, - params, - ) - if err != nil { - return cid.Undef, err - } - err = <-cerr - if err != nil { - return cid.Undef, err - } - return mcid, nil -} - -func (f *FiniteStateMachineNodeConnector) ChainHead(_ context.Context) (fsm.TipSetToken, abi.ChainEpoch, error) { - ts, err := f.chain.GetTipSet(f.chain.GetHead()) - if err != nil { - return fsm.TipSetToken{}, 0, err - } - - epoch, err := ts.Height() - if err != nil { - return fsm.TipSetToken{}, 0, err - } - - tok, err := encoding.Encode(ts.Key()) - if err != nil { - return fsm.TipSetToken{}, 0, err - } - - return tok, epoch, nil -} - -func (f *FiniteStateMachineNodeConnector) ChainGetRandomness(ctx context.Context, tok fsm.TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return abi.Randomness{}, err - } - return f.chainState.SampleChainRandomness(ctx, tsk, personalization, randEpoch, entropy) -} - -func (f *FiniteStateMachineNodeConnector) ChainGetTicket(ctx context.Context, tok fsm.TipSetToken) (abi.SealRandomness, abi.ChainEpoch, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return abi.SealRandomness{}, 0, err - } - - ts, err := f.chain.GetTipSet(tsk) - if err != nil { - return abi.SealRandomness{}, 0, err - } - - epoch, err := ts.Height() - if err != nil { - return abi.SealRandomness{}, 0, err - } - - randomEpoch := epoch - miner.ChainFinalityish - - buf := new(bytes.Buffer) - err = f.minerAddr.MarshalCBOR(buf) - if err != nil { - return abi.SealRandomness{}, 0, err - } - - randomness, err := f.ChainGetRandomness(ctx, tok, crypto.DomainSeparationTag_SealRandomness, randomEpoch, buf.Bytes()) - return abi.SealRandomness(randomness), randomEpoch, err -} - -func (f *FiniteStateMachineNodeConnector) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - return f.chainState.ReadObj(ctx, obj) -} - -func (f *FiniteStateMachineNodeConnector) stateViewForToken(tok fsm.TipSetToken) (*appstate.View, error) { - var tsk block.TipSetKey - err := encoding.Decode(tok, &tsk) - if err != nil { - return nil, err - } - - return f.stateViewer.StateView(tsk) -} diff --git a/internal/app/go-filecoin/connectors/fsm_storage/repo_storage_connector.go b/internal/app/go-filecoin/connectors/fsm_storage/repo_storage_connector.go deleted file mode 100644 index d5536795b3..0000000000 --- a/internal/app/go-filecoin/connectors/fsm_storage/repo_storage_connector.go +++ /dev/null @@ -1,46 +0,0 @@ -package fsmstorage - -import ( - "errors" - - "github.com/filecoin-project/sector-storage/stores" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -type RepoStorageConnector struct { - inner repo.Repo -} - -var _ stores.LocalStorage = new(RepoStorageConnector) - -func NewRepoStorageConnector(r repo.Repo) *RepoStorageConnector { - return &RepoStorageConnector{inner: r} -} - -func (b *RepoStorageConnector) GetStorage() (stores.StorageConfig, error) { - rpt, err := b.inner.Path() - if err != nil { - return stores.StorageConfig{}, err - } - - scg := b.inner.Config().SectorBase - - spt, err := paths.GetSectorPath(scg.RootDirPath, rpt) - if err != nil { - return stores.StorageConfig{}, err - } - - out := stores.StorageConfig{StoragePaths: []stores.LocalPath{{Path: spt}}} - - if scg.PreSealedSectorsDirPath != "" { - out.StoragePaths = append(out.StoragePaths, stores.LocalPath{Path: scg.PreSealedSectorsDirPath}) - } - - return out, nil -} - -func (b *RepoStorageConnector) SetStorage(f func(*stores.StorageConfig)) error { - return errors.New("unsupported operation: manipulating store paths must happen through go-filecoin") -} diff --git a/internal/app/go-filecoin/connectors/retrieval_market/client.go b/internal/app/go-filecoin/connectors/retrieval_market/client.go deleted file mode 100644 index 342dade803..0000000000 --- a/internal/app/go-filecoin/connectors/retrieval_market/client.go +++ /dev/null @@ -1,173 +0,0 @@ -package retrievalmarketconnector - -import ( - "bytes" - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/specs-actors/actors/abi" - paychActor "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - xerrors "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// RetrievalClientConnector is the glue between go-filecoin and go-fil-markets' -// retrieval market interface -type RetrievalClientConnector struct { - bs blockstore.Blockstore - - // APIs/interfaces - paychMgr PaychMgrAPI - signer RetrievalSigner - cs ChainReaderAPI -} - -var _ retrievalmarket.RetrievalClientNode = new(RetrievalClientConnector) - -// NewRetrievalClientConnector creates a new RetrievalClientConnector -func NewRetrievalClientConnector( - bs blockstore.Blockstore, - cs ChainReaderAPI, - signer RetrievalSigner, - paychMgr PaychMgrAPI, -) *RetrievalClientConnector { - return &RetrievalClientConnector{ - bs: bs, - cs: cs, - paychMgr: paychMgr, - signer: signer, - } -} - -// GetOrCreatePaymentChannel gets or creates a payment channel and posts to chain -func (r *RetrievalClientConnector) GetOrCreatePaymentChannel(ctx context.Context, clientAddress address.Address, minerAddress address.Address, clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) { - - if clientAddress == address.Undef || minerAddress == address.Undef { - return address.Undef, cid.Undef, xerrors.New("empty address") - } - chinfo, err := r.paychMgr.GetPaymentChannelByAccounts(clientAddress, minerAddress) - if err != nil { - return address.Undef, cid.Undef, err - } - if chinfo.IsZero() { - // create the payment channel - bal, err := r.getBalance(ctx, clientAddress, tok) - if err != nil { - return address.Undef, cid.Undef, err - } - - filAmt := types.NewAttoFIL(clientFundsAvailable.Int) - if bal.LessThan(filAmt) { - return address.Undef, cid.Undef, xerrors.New("not enough funds in wallet") - } - - return r.paychMgr.CreatePaymentChannel(clientAddress, minerAddress, clientFundsAvailable) - } - mcid, err := r.paychMgr.AddFundsToChannel(chinfo.UniqueAddr, clientFundsAvailable) - return chinfo.UniqueAddr, mcid, err -} - -// AllocateLane creates a new lane for this paymentChannel with 0 FIL in the lane -// Assumes AllocateLane is called after GetOrCreatePaymentChannel -func (r *RetrievalClientConnector) AllocateLane(paymentChannel address.Address) (lane uint64, err error) { - return r.paychMgr.AllocateLane(paymentChannel) -} - -// CreatePaymentVoucher creates a payment voucher for the retrieval client. -func (r *RetrievalClientConnector) CreatePaymentVoucher(ctx context.Context, paychAddr address.Address, amount abi.TokenAmount, lane uint64, tok shared.TipSetToken) (*paychActor.SignedVoucher, error) { - height, err := r.getBlockHeight(tok) - if err != nil { - return nil, err - } - - bal, err := r.getBalance(ctx, paychAddr, tok) - if err != nil { - return nil, err - } - if amount.GreaterThan(bal) { - return nil, xerrors.New("insufficient funds for voucher amount") - } - - chinfo, err := r.paychMgr.GetPaymentChannelInfo(paychAddr) - if err != nil { - return nil, err - } - v := paychActor.SignedVoucher{ - TimeLockMin: height + 1, - SecretPreimage: nil, // optional - Extra: nil, // optional - Lane: lane, - Nonce: chinfo.NextNonce, - Amount: amount, - MinSettleHeight: height + 1, - Merges: nil, - Signature: nil, - } - - var buf bytes.Buffer - if err := v.MarshalCBOR(&buf); err != nil { - return nil, err - } - - sig, err := r.signer.SignBytes(ctx, buf.Bytes(), chinfo.From) - if err != nil { - return nil, err - } - v.Signature = &sig - - if err := r.paychMgr.AddVoucherToChannel(paychAddr, &v); err != nil { - return nil, err - } - return &v, nil -} - -func (r *RetrievalClientConnector) WaitForPaymentChannelAddFunds(messageCID cid.Cid) error { - return r.paychMgr.WaitForAddFundsMessage(context.Background(), messageCID) -} - -func (r *RetrievalClientConnector) WaitForPaymentChannelCreation(messageCID cid.Cid) (address.Address, error) { - return r.paychMgr.WaitForCreatePaychMessage(context.Background(), messageCID) -} - -func (r *RetrievalClientConnector) getBlockHeight(tok shared.TipSetToken) (abi.ChainEpoch, error) { - ts, err := r.getTipSet(tok) - if err != nil { - return 0, err - } - return ts.Height() -} - -func (r *RetrievalClientConnector) getBalance(ctx context.Context, account address.Address, tok shared.TipSetToken) (types.AttoFIL, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return types.ZeroAttoFIL, xerrors.Wrapf(err, "failed to marshal TipSetToken into a TipSetKey") - } - - actor, err := r.cs.GetActorAt(ctx, tsk, account) - if err != nil { - return types.ZeroAttoFIL, err - } - - return actor.Balance, nil -} - -func (r *RetrievalClientConnector) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - return connectors.GetChainHead(r.cs) -} - -func (r *RetrievalClientConnector) getTipSet(tok shared.TipSetToken) (block.TipSet, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return block.TipSet{}, xerrors.Wrapf(err, "failed to marshal TipSetToken into a TipSetKey") - } - - return r.cs.GetTipSet(tsk) -} diff --git a/internal/app/go-filecoin/connectors/retrieval_market/client_test.go b/internal/app/go-filecoin/connectors/retrieval_market/client_test.go deleted file mode 100644 index 140c10d474..0000000000 --- a/internal/app/go-filecoin/connectors/retrieval_market/client_test.go +++ /dev/null @@ -1,399 +0,0 @@ -package retrievalmarketconnector_test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - specs "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - specst "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - . "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/retrieval_market" - pch "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - paychtest "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel/testing" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor/builtin" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -func TestRetrievalClientConnector_GetOrCreatePaymentChannel(t *testing.T) { - testflags.IntegrationTest(t) - ctx := context.Background() - - paych := specst.NewActorAddr(t, "paych") - balance := abi.NewTokenAmount(1000) - channelAmt := abi.NewTokenAmount(101) - - t.Run("if the payment channel does not exist", func(t *testing.T) { - t.Run("returns a message CID to wait for", func(t *testing.T) { - bs, cs, client, miner, genTs := testSetup(ctx, t, balance) - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - - rmc := NewRetrievalMarketClientFakeAPI(t) - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - tok, err := encoding.Encode(genTs.Key()) - require.NoError(t, err) - - rmc.MsgSendCid = shared_testutil.GenerateCids(1)[0] - - expectedAddr, mcid, err := rcnc.GetOrCreatePaymentChannel(ctx, client, miner, channelAmt, tok) - require.NoError(t, err) - assert.Equal(t, address.Undef, expectedAddr) - assert.False(t, mcid.Equals(rmc.MsgSendCid)) - }) - t.Run("Errors if there aren't enough funds in wallet", func(t *testing.T) { - bs, cs, client, miner, genTs := testSetup(ctx, t, balance) - pchMgr, _ := makePaychMgr(ctx, t, client, miner, paych) - rmc := NewRetrievalMarketClientFakeAPI(t) - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - tok, err := encoding.Encode(genTs.Key()) - require.NoError(t, err) - - res, mcid, err := rcnc.GetOrCreatePaymentChannel(ctx, client, miner, big.NewInt(2000), tok) - assert.EqualError(t, err, "not enough funds in wallet") - assert.Equal(t, address.Undef, res) - assert.True(t, mcid.Equals(cid.Undef)) - }) - - t.Run("Errors if client or minerWallet addr is invalid", func(t *testing.T) { - bs, cs, client, miner, genTs := testSetup(ctx, t, balance) - pchMgr, _ := makePaychMgr(ctx, t, client, miner, paych) - rmc := NewRetrievalMarketClientFakeAPI(t) - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - tok, err := encoding.Encode(genTs.Key()) - require.NoError(t, err) - - _, mcid, err := rcnc.GetOrCreatePaymentChannel(ctx, client, address.Undef, channelAmt, tok) - assert.EqualError(t, err, "empty address") - assert.True(t, mcid.Equals(cid.Undef)) - }) - }) - - t.Run("if payment channel exists, returns payment channel addr and cid for add funds msg", func(t *testing.T) { - bs, cs, client, miner, genTs := testSetup(ctx, t, balance) - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - - rmc := NewRetrievalMarketClientFakeAPI(t) - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - - tok, err := encoding.Encode(genTs.Key()) - require.NoError(t, err) - - requireCreatePaymentChannel(ctx, t, fakePaychAPI, pchMgr, channelAmt, client, miner, paych) - - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenSendFundsMessage(client, paych, channelAmt, exitcode.Ok, 4) - - actualChID, mcid, err := rcnc.GetOrCreatePaymentChannel(ctx, client, miner, channelAmt, tok) - require.NoError(t, err) - assert.False(t, mcid.Equals(cid.Undef)) - assert.Equal(t, paych, actualChID) - }) -} - -func TestRetrievalClientConnector_AllocateLane(t *testing.T) { - testflags.IntegrationTest(t) - ctx := context.Background() - bs, cs, client, miner, _ := testSetup(ctx, t, abi.NewTokenAmount(100)) - - paych := specst.NewIDAddr(t, 101) - channelAmt := abi.NewTokenAmount(10) - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - - t.Run("Errors if payment channel does not exist", func(t *testing.T) { - rmc := NewRetrievalMarketClientFakeAPI(t) - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - - addr, err := address.NewIDAddress(12345) - require.NoError(t, err) - res, err := rcnc.AllocateLane(addr) - assert.EqualError(t, err, "No state for /t012345") - assert.Zero(t, res) - }) - t.Run("Increments and returns lastLane val", func(t *testing.T) { - rmc := NewRetrievalMarketClientFakeAPI(t) - - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - requireCreatePaymentChannel(ctx, t, fakePaychAPI, pchMgr, channelAmt, client, miner, paych) - - lane, err := rcnc.AllocateLane(paych) - require.NoError(t, err) - - chinfo, err := pchMgr.GetPaymentChannelInfo(paych) - require.NoError(t, err) - require.Equal(t, chinfo.NextLane-1, lane) - }) -} - -func TestRetrievalClientConnector_CreatePaymentVoucher(t *testing.T) { - testflags.IntegrationTest(t) - ctx := context.Background() - balance := abi.NewTokenAmount(1000) - bs, cs, client, miner, genTs := testSetup(ctx, t, balance) - paych := specst.NewIDAddr(t, 101) - expVoucherAmt := big.NewInt(10) - channelAmt := abi.NewTokenAmount(101) - - pchActor := actor.NewActor(shared_testutil.GenerateCids(1)[0], channelAmt, cid.Undef) - cs.SetActor(paych, pchActor) - - tok, err := encoding.Encode(genTs.Key()) - require.NoError(t, err) - - t.Run("Returns a voucher with a signature", func(t *testing.T) { - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - - rmc := NewRetrievalMarketClientFakeAPI(t) - rmc.StubSignature(nil) - - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - requireCreatePaymentChannel(ctx, t, fakePaychAPI, pchMgr, channelAmt, client, miner, paych) - - lane, err := rcnc.AllocateLane(paych) - require.NoError(t, err) - assert.Equal(t, uint64(0), lane) - - voucher, err := rcnc.CreatePaymentVoucher(ctx, paych, expVoucherAmt, lane, tok) - require.NoError(t, err) - assert.Equal(t, expVoucherAmt, voucher.Amount) - assert.Equal(t, lane, voucher.Lane) - assert.Equal(t, uint64(2), voucher.Nonce) - assert.NotNil(t, voucher.Signature) - chinfo, err := pchMgr.GetPaymentChannelInfo(paych) - require.NoError(t, err) - // nil SecretPreimage gets stored as zero value. - voucher.SecretPreimage = []byte{} - assert.True(t, chinfo.HasVoucher(voucher)) - }) - - t.Run("Each lane or voucher increases NextNonce", func(t *testing.T) { - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - - rmc := NewRetrievalMarketClientFakeAPI(t) - rmc.StubSignature(nil) - - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - requireCreatePaymentChannel(ctx, t, fakePaychAPI, pchMgr, channelAmt, client, miner, paych) - - chinfo, err := pchMgr.GetPaymentChannelInfo(paych) - require.NoError(t, err) - require.Equal(t, uint64(0), chinfo.NextLane) - require.Equal(t, uint64(1), chinfo.NextNonce) - - expectedNonce := uint64(10) // 3 lanes + 3*2 vouchers + 1 - for i := 0; i <= 2; i++ { - lane, err := rcnc.AllocateLane(paych) - require.NoError(t, err) - for j := 0; j <= 1; j++ { - amt := int64(i + j + 1) - newAmt := big.NewInt(amt) - _, err := rcnc.CreatePaymentVoucher(ctx, paych, newAmt, lane, tok) - require.NoError(t, err) - } - } - chinfo, err = pchMgr.GetPaymentChannelInfo(paych) - require.NoError(t, err) - assert.Equal(t, expectedNonce, chinfo.NextNonce) - }) - - t.Run("Errors if can't get block height/head tipset", func(t *testing.T) { - pchMgr, _ := makePaychMgr(ctx, t, client, miner, paych) - - _, _, _, localCs, _ := requireNewEmptyChainStore(ctx, t) - messageStore := chain.NewMessageStore(bs) - cs := cst.NewChainStateReadWriter(localCs, messageStore, bs, builtin.DefaultActors) - - rmc := NewRetrievalMarketClientFakeAPI(t) - - badTsKey := block.NewTipSetKey(shared_testutil.GenerateCids(1)[0]) - badTok, err := encoding.Encode(badTsKey) - require.NoError(t, err) - - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - res, err := rcnc.CreatePaymentVoucher(ctx, paych, abi.NewTokenAmount(1), 0, badTok) - assert.EqualError(t, err, "Key not found in tipindex") - assert.Nil(t, res) - }) - - t.Run("Errors if payment channel does not exist", func(t *testing.T) { - badAddr := specst.NewIDAddr(t, 990) - pchMgr, _ := makePaychMgr(ctx, t, client, miner, badAddr) - - rmc := NewRetrievalMarketClientFakeAPI(t) - rmc.StubSignature(nil) - - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - voucher, err := rcnc.CreatePaymentVoucher(ctx, badAddr, big.NewInt(100), 1, tok) - assert.EqualError(t, err, "No such address t0990") - assert.Nil(t, voucher) - }) - - t.Run("errors if not enough balance in payment channel", func(t *testing.T) { - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - - poorActor := actor.NewActor(shared_testutil.GenerateCids(1)[0], channelAmt, cid.Undef) - cs.SetActor(paych, poorActor) - - rmc := NewRetrievalMarketClientFakeAPI(t) - rmc.StubSignature(nil) - - requireCreatePaymentChannel(ctx, t, fakePaychAPI, pchMgr, channelAmt, client, miner, paych) - - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - lane, err := rcnc.AllocateLane(paych) - require.NoError(t, err) - - tooMuch := abi.NewTokenAmount(channelAmt.Int64() + 1) - voucher, err := rcnc.CreatePaymentVoucher(ctx, paych, tooMuch, lane, tok) - assert.EqualError(t, err, "insufficient funds for voucher amount") - assert.Nil(t, voucher) - }) - - t.Run("errors if lane is invalid", func(t *testing.T) { - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - rmc := NewRetrievalMarketClientFakeAPI(t) - rmc.StubSignature(nil) - - requireCreatePaymentChannel(ctx, t, fakePaychAPI, pchMgr, channelAmt, client, miner, paych) - - // check when no lanes allocated - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - voucher, err := rcnc.CreatePaymentVoucher(ctx, paych, expVoucherAmt, 0, tok) - require.Nil(t, voucher) - assert.EqualError(t, err, "lane does not exist 0") - require.Nil(t, voucher) - - lane, err := rcnc.AllocateLane(paych) - require.NoError(t, err) - - // check when there is a lane allocated - voucher, err = rcnc.CreatePaymentVoucher(ctx, paych, expVoucherAmt, lane+1, tok) - require.Nil(t, voucher) - assert.EqualError(t, err, "lane does not exist 1") - }) - - t.Run("Errors if can't sign bytes", func(t *testing.T) { - pchMgr, fakePaychAPI := makePaychMgr(ctx, t, client, miner, paych) - fakePaychAPI.ExpectedMsgCid, fakePaychAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, channelAmt, exitcode.Ok, 2) - - rmc := NewRetrievalMarketClientFakeAPI(t) - rmc.SigErr = errors.New("signature failure") - rmc.StubSignature(errors.New("signature failure")) - - rcnc := NewRetrievalClientConnector(bs, cs, rmc, pchMgr) - requireCreatePaymentChannel(ctx, t, fakePaychAPI, pchMgr, channelAmt, client, miner, paych) - - lane, err := rcnc.AllocateLane(paych) - require.NoError(t, err) - - voucher, err := rcnc.CreatePaymentVoucher(ctx, paych, big.NewInt(1), lane, tok) - assert.EqualError(t, err, "signature failure") - assert.Nil(t, voucher) - }) -} - -func testSetup(ctx context.Context, t *testing.T, bal abi.TokenAmount) (bstore.Blockstore, *message.FakeProvider, address.Address, address.Address, block.TipSet) { - _, builder, genTs, chainStore, st1 := requireNewEmptyChainStore(ctx, t) - rootBlk := builder.AppendBlockOnBlocks() - block.RequireNewTipSet(t, rootBlk) - require.NoError(t, chainStore.SetHead(ctx, genTs)) - root, err := st1.Commit(ctx) - require.NoError(t, err) - - // add tipset and state to chainstore - require.NoError(t, chainStore.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ - TipSet: genTs, - TipSetStateRoot: root, - TipSetReceipts: types.EmptyReceiptsCID, - })) - - ds := repo.NewInMemoryRepo().ChainDatastore() - bs := bstore.NewBlockstore(ds) - - fakeProvider := message.NewFakeProvider(t) - fakeProvider.Builder = builder - clientAddr := specst.NewIDAddr(t, 102) - clientActor := actor.NewActor(specs.AccountActorCodeID, bal, cid.Undef) - fakeProvider.SetHead(genTs.Key()) - fakeProvider.SetActor(clientAddr, clientActor) - - minerAddr := specst.NewIDAddr(t, 101) - - return bs, fakeProvider, clientAddr, minerAddr, genTs -} - -func requireCreatePaymentChannel(ctx context.Context, t *testing.T, testAPI *paychtest.FakePaymentChannelAPI, m *pch.Manager, balance abi.TokenAmount, client, miner, paych address.Address) { - - _, mcid, err := m.CreatePaymentChannel(client, miner, balance) - require.NoError(t, err) - - // give goroutine a chance to update channel store - time.Sleep(100 * time.Millisecond) - require.True(t, testAPI.ExpectedMsgCid.Equals(mcid)) - assertChannel(t, paych, m, true) -} - -func requireNewEmptyChainStore(ctx context.Context, t *testing.T) (cid.Cid, *chain.Builder, block.TipSet, *chain.Store, state.Tree) { - store := cbor.NewMemCborStore() - - // Cribbed from chain/store_test - st1 := state.NewState(store) - root, err := st1.Commit(ctx) - require.NoError(t, err) - - // link testing state to test block - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - - // setup chain store - ds := r.Datastore() - cs := chain.NewStore(ds, store, chain.NewStatusReporter(), genTS.At(0).Cid()) - return root, builder, genTS, cs, st1 -} - -func makePaychMgr(ctx context.Context, t *testing.T, client, miner, paych address.Address) (*pch.Manager, *paychtest.FakePaymentChannelAPI) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - viewer := paychtest.NewFakeStateViewer(t) - pchMgr := pch.NewManager(context.Background(), ds, testAPI, testAPI, viewer) - - viewer.GetFakeStateView().AddActorWithState(paych, client, miner, address.Undef) - return pchMgr, testAPI -} - -func assertChannel(t *testing.T, paych address.Address, pchMgr *pch.Manager, exists bool) { - has, err := pchMgr.ChannelExists(paych) - assert.NoError(t, err) - assert.Equal(t, has, exists) -} diff --git a/internal/app/go-filecoin/connectors/retrieval_market/common.go b/internal/app/go-filecoin/connectors/retrieval_market/common.go deleted file mode 100644 index 1e9b4d9fcc..0000000000 --- a/internal/app/go-filecoin/connectors/retrieval_market/common.go +++ /dev/null @@ -1,45 +0,0 @@ -package retrievalmarketconnector - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - paychActor "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" -) - -// ChainReaderAPI is the subset of the Wallet interface needed by the retrieval client node -type ChainReaderAPI interface { - // GetBalance gets the balance in AttoFIL for a given address - Head() block.TipSetKey - GetTipSet(key block.TipSetKey) (block.TipSet, error) - GetActorAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address) (*actor.Actor, error) -} - -// RetrievalSigner is an interface with the ability to sign data -type RetrievalSigner interface { - SignBytes(ctx context.Context, data []byte, addr address.Address) (crypto.Signature, error) -} - -// PaychMgrAPI is an API used for communicating with payment channel actor and store. -type PaychMgrAPI interface { - AllocateLane(paychAddr address.Address) (uint64, error) - ChannelExists(paychAddr address.Address) (bool, error) - GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) - GetPaymentChannelInfo(paychAddr address.Address) (*paymentchannel.ChannelInfo, error) - GetPaymentChannelByAccounts(payer, payee address.Address) (*paymentchannel.ChannelInfo, error) - CreatePaymentChannel(payer, payee address.Address, amt abi.TokenAmount) (address.Address, cid.Cid, error) - AddFundsToChannel(paychAddr address.Address, amt abi.TokenAmount) (cid.Cid, error) - AddVoucherToChannel(paychAddr address.Address, voucher *paychActor.SignedVoucher) error - AddVoucher(paychAddr address.Address, voucher *paychActor.SignedVoucher, proof []byte, expected big.Int, tok shared.TipSetToken) (abi.TokenAmount, error) - WaitForCreatePaychMessage(ctx context.Context, mcid cid.Cid) (address.Address, error) - WaitForAddFundsMessage(ctx context.Context, mcid cid.Cid) error -} diff --git a/internal/app/go-filecoin/connectors/retrieval_market/fake_api.go b/internal/app/go-filecoin/connectors/retrieval_market/fake_api.go deleted file mode 100644 index 5d1872601c..0000000000 --- a/internal/app/go-filecoin/connectors/retrieval_market/fake_api.go +++ /dev/null @@ -1,122 +0,0 @@ -package retrievalmarketconnector - -import ( - "context" - "io" - "math/rand" - "os" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - xerrors "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// RetrievalMarketClientFakeAPI is a test API that satisfies all needed interface methods -// for a RetrievalMarketClient -type RetrievalMarketClientFakeAPI struct { - t *testing.T - AllocateLaneErr error - - PayChBalanceErr error - - CreatePaymentChannelErr error - WorkerAddr address.Address - WorkerAddrErr error - Nonce uint64 - NonceErr error - - Sig crypto.Signature - SigErr error - - MsgSendCid cid.Cid - MsgSendErr error - - SendNewVoucherErr error - ExpectedVouchers map[address.Address]*paymentchannel.VoucherInfo - ActualVouchers map[address.Address]bool - - ExpectedSectorIDs map[uint64]string - ActualSectorIDs map[uint64]bool - UnsealErr error -} - -func (rmFake *RetrievalMarketClientFakeAPI) ChannelExists(_ address.Address) (bool, error) { - return true, nil -} - -// NewRetrievalMarketClientFakeAPI creates an instance of a test API that satisfies all needed -// interface methods for a RetrievalMarketClient. -func NewRetrievalMarketClientFakeAPI(t *testing.T) *RetrievalMarketClientFakeAPI { - return &RetrievalMarketClientFakeAPI{ - t: t, - WorkerAddr: requireMakeTestFcAddr(t), - Nonce: rand.Uint64(), - ExpectedVouchers: make(map[address.Address]*paymentchannel.VoucherInfo), - ActualVouchers: make(map[address.Address]bool), - ExpectedSectorIDs: make(map[uint64]string), - ActualSectorIDs: make(map[uint64]bool), - } -} - -// -------------- API METHODS -// NextNonce mocks getting an actor's next nonce -func (rmFake *RetrievalMarketClientFakeAPI) NextNonce(_ context.Context, _ address.Address) (uint64, error) { - rmFake.Nonce++ - return rmFake.Nonce, rmFake.NonceErr -} - -// SignBytes mocks signing data -func (rmFake *RetrievalMarketClientFakeAPI) SignBytes(_ context.Context, _ []byte, _ address.Address) (crypto.Signature, error) { - return rmFake.Sig, rmFake.SigErr -} - -// UnsealSector mocks unsealing. Assign a filename to ExpectedSectorIDs[sectorID] to -// test -func (rmFake *RetrievalMarketClientFakeAPI) UnsealSector(_ context.Context, sectorID uint64) (io.ReadCloser, error) { - if rmFake.UnsealErr != nil { - return nil, rmFake.UnsealErr - } - name, ok := rmFake.ExpectedSectorIDs[sectorID] - if !ok { - return nil, xerrors.New("RetrievalMarketClientFakeAPI: sectorID does not exist") - } - rc, err := os.OpenFile(name, os.O_RDONLY, 0500) - require.NoError(rmFake.t, err) - rmFake.ActualSectorIDs[sectorID] = true - return rc, nil -} - -// --------------- Testing methods - -// StubMessageResponse sets up a message, message receipt and return value for a create payment -// channel message -func (rmFake *RetrievalMarketClientFakeAPI) StubSignature(sigError error) { - mockSigner, _ := types.NewMockSignersAndKeyInfo(1) - addr1 := mockSigner.Addresses[0] - - sig, err := mockSigner.SignBytes(context.TODO(), []byte("pork chops and applesauce"), addr1) - require.NoError(rmFake.t, err) - - signature := crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: sig.Data, - } - rmFake.Sig = signature - rmFake.SigErr = sigError -} - -// requireMakeTestFcAddr generates a random ID addr for test -func requireMakeTestFcAddr(t *testing.T) address.Address { - res, err := address.NewIDAddress(rand.Uint64()) - require.NoError(t, err) - return res -} - -var _ RetrievalSigner = &RetrievalMarketClientFakeAPI{} -var _ UnsealerAPI = &RetrievalMarketClientFakeAPI{} diff --git a/internal/app/go-filecoin/connectors/retrieval_market/provider.go b/internal/app/go-filecoin/connectors/retrieval_market/provider.go deleted file mode 100644 index 5c7496906f..0000000000 --- a/internal/app/go-filecoin/connectors/retrieval_market/provider.go +++ /dev/null @@ -1,120 +0,0 @@ -package retrievalmarketconnector - -import ( - "bufio" - "context" - "io" - "math" - - "github.com/filecoin-project/go-address" - retmkt "github.com/filecoin-project/go-fil-markets/retrievalmarket" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - blockstore "github.com/ipfs/go-ipfs-blockstore" - xerrors "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors" -) - -// MaxInt is the max value of an Int -const MaxInt = int(^uint(0) >> 1) - -// RetrievalProviderConnector is the glue between go-filecoin and retrieval market provider API -type RetrievalProviderConnector struct { - chainReader ChainReaderAPI - bstore blockstore.Blockstore - net rmnet.RetrievalMarketNetwork - paychMgr PaychMgrAPI - unsealer UnsealerAPI -} - -var _ retmkt.RetrievalProviderNode = &RetrievalProviderConnector{} - -// UnsealerAPI is the API required for unsealing a sectorgi -type UnsealerAPI interface { - UnsealSector(ctx context.Context, sectorID uint64) (io.ReadCloser, error) -} - -// NewRetrievalProviderConnector creates a new RetrievalProviderConnector -func NewRetrievalProviderConnector(net rmnet.RetrievalMarketNetwork, us UnsealerAPI, - bs blockstore.Blockstore, paychMgr PaychMgrAPI, chainReader ChainReaderAPI) *RetrievalProviderConnector { - return &RetrievalProviderConnector{ - bstore: bs, - net: net, - paychMgr: paychMgr, - unsealer: us, - chainReader: chainReader, - } -} - -// UnsealSector unseals the sector given by sectorId and offset with length `length` -// It rejects offsets > int size and length > int64 size; the interface wants -// uint64s. This would return a bufio overflow error anyway, but the check -// is provided as a debugging convenience for the consumer of this function. -func (r *RetrievalProviderConnector) UnsealSector(ctx context.Context, sectorID uint64, - offset uint64, length uint64) (io.ReadCloser, error) { - // reject anything that's a real uint64 rather than trying to get cute - // and offset that much or copy into a buf that large - if offset >= uint64(MaxInt) { - return nil, xerrors.New("offset overflows int") - } - if length >= math.MaxInt64 { - return nil, xerrors.New("length overflows int64") - } - - unsealedSector, err := r.unsealer.UnsealSector(ctx, sectorID) - if err != nil { - return nil, err - } - return newWrappedReadCloser(unsealedSector, offset, length) -} - -type limitedOffsetReadCloser struct { - originalRC io.ReadCloser - limitedReader io.Reader -} - -func newWrappedReadCloser(originalRc io.ReadCloser, offset, length uint64) (io.ReadCloser, error) { - bufr := bufio.NewReader(originalRc) - _, err := bufr.Discard(int(offset)) - if err != nil { - return nil, err - } - limitedR := io.LimitedReader{R: bufr, N: int64(length)} - return &limitedOffsetReadCloser{ - originalRC: originalRc, - limitedReader: &limitedR, - }, nil -} - -func (wrc limitedOffsetReadCloser) Read(p []byte) (int, error) { - return wrc.limitedReader.Read(p) -} -func (wrc limitedOffsetReadCloser) Close() error { - return wrc.originalRC.Close() -} - -// SavePaymentVoucher stores the provided payment voucher. -// Returns the difference between voucher amount and largest previous voucher amount, and -// error if this amount is less than `expected` amount -func (r *RetrievalProviderConnector) SavePaymentVoucher(_ context.Context, paymentChannel address.Address, voucher *paych.SignedVoucher, proof []byte, expected abi.TokenAmount, tok shared.TipSetToken) (actual abi.TokenAmount, err error) { - actual, err = r.paychMgr.AddVoucher(paymentChannel, voucher, proof, expected, tok) - - if err != nil { - return abi.NewTokenAmount(0), err - } - - return actual, nil -} - -// GetMinerWorkerAddress produces the worker address for the provided storage -// miner address from the tipset for the provided token. -func (r *RetrievalProviderConnector) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { - return r.paychMgr.GetMinerWorkerAddress(ctx, miner, tok) -} - -func (r *RetrievalProviderConnector) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - return connectors.GetChainHead(r.chainReader) -} diff --git a/internal/app/go-filecoin/connectors/retrieval_market/provider_test.go b/internal/app/go-filecoin/connectors/retrieval_market/provider_test.go deleted file mode 100644 index 8bb3834df0..0000000000 --- a/internal/app/go-filecoin/connectors/retrieval_market/provider_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package retrievalmarketconnector_test - -import ( - "context" - "errors" - "io/ioutil" - "math/rand" - "os" - "reflect" - "testing" - - "github.com/filecoin-project/go-address" - gfmtut "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - specst "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - . "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/retrieval_market" - pch "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - paychtest "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel/testing" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestNewRetrievalProviderNodeConnector(t *testing.T) { - tf.UnitTest(t) - rmnet := gfmtut.NewTestRetrievalMarketNetwork(gfmtut.TestNetworkParams{}) - pm := piecemanager.NewFiniteStateMachineBackEnd(nil, nil) - bs := blockstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - - pchMgr, _ := makePaychMgr(context.Background(), t, - specst.NewIDAddr(t, 99), - specst.NewIDAddr(t, 100), - specst.NewActorAddr(t, "foobar")) - rpc := NewRetrievalProviderConnector(rmnet, &pm, bs, pchMgr, nil) - assert.NotZero(t, rpc) -} - -func TestRetrievalProviderConnector_UnsealSector(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - sectorID := rand.Uint64() - fixtureFile, err := ioutil.TempFile(".", "file") - require.NoError(t, err) - fileData := "somedata" - _, err = fixtureFile.WriteString(fileData) - require.NoError(t, err) - defer func() { _ = os.Remove(fixtureFile.Name()) }() - - intSz := reflect.TypeOf(0).Size()*8 - 1 - maxOffset := uint64(1 << intSz) - - testCases := []struct { - name string - offset, length, expectedLen uint64 - unsealErr error - expectedErr string - }{ - {name: "happy path", offset: 2, length: 6, expectedLen: 6, expectedErr: ""}, - {name: "happy even if length more than file length", offset: 2, length: 9999, expectedLen: 6, expectedErr: ""}, - {name: "returns error if Unseal errors", unsealErr: errors.New("boom"), expectedErr: "boom"}, - {name: "returns EOF if offset more than file length", offset: 9999, expectedErr: "EOF"}, - {name: "returns error if offset > int64", offset: maxOffset, expectedErr: "offset overflows int"}, - {name: "returns error if length > int64", length: 1 << 63, expectedErr: "length overflows int64"}, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - rmp, rpc := unsealTestSetup(ctx, t) - rmp.ExpectedSectorIDs[sectorID] = fixtureFile.Name() - - if tc.expectedErr != "" { - rmp.UnsealErr = tc.unsealErr - _, err := rpc.UnsealSector(ctx, sectorID, tc.offset, tc.length) - assert.EqualError(t, err, tc.expectedErr) - } else { - res, err := rpc.UnsealSector(ctx, sectorID, tc.offset, tc.length) - require.NoError(t, err) - readBytes := make([]byte, tc.length+1) - readlen, err := res.Read(readBytes) - require.NoError(t, err) - assert.Equal(t, int(tc.expectedLen), readlen) - - // check that it read something & the offset worked - assert.Equal(t, fileData[2:], string(readBytes[0:6])) - } - }) - } -} - -func unsealTestSetup(ctx context.Context, t *testing.T) (*RetrievalMarketClientFakeAPI, *RetrievalProviderConnector) { - rmnet := gfmtut.NewTestRetrievalMarketNetwork(gfmtut.TestNetworkParams{}) - bs := blockstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - rmp := NewRetrievalMarketClientFakeAPI(t) - pchMgr, _ := makePaychMgr(ctx, t, - specst.NewIDAddr(t, 99), - specst.NewIDAddr(t, 100), - specst.NewActorAddr(t, "foobar")) - rpc := NewRetrievalProviderConnector(rmnet, rmp, bs, pchMgr, nil) - return rmp, rpc -} - -func TestRetrievalProviderConnector_SavePaymentVoucher(t *testing.T) { - ctx := context.Background() - - rmnet := gfmtut.NewTestRetrievalMarketNetwork(gfmtut.TestNetworkParams{}) - pm := piecemanager.NewFiniteStateMachineBackEnd(nil, nil) - - bs := blockstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - pchan := specst.NewIDAddr(t, 100) - clientAddr := specst.NewIDAddr(t, 101) - minerAddr := specst.NewIDAddr(t, 102) - root := gfmtut.GenerateCids(1)[0] - tsk := block.NewTipSetKey(root) - tok, err := encoding.Encode(tsk) - require.NoError(t, err) - - voucher := &paych.SignedVoucher{ - Lane: rand.Uint64(), - Nonce: rand.Uint64(), - Amount: big.NewInt(rand.Int63()), - MinSettleHeight: abi.ChainEpoch(99), - SecretPreimage: []byte{}, - } - proof := []byte("proof") - - t.Run("saves payment voucher and returns voucher amount if new", func(t *testing.T) { - viewer, pchMgr := makeViewerAndManager(ctx, t, clientAddr, minerAddr, pchan) - viewer.GetFakeStateView().AddActorWithState(pchan, clientAddr, minerAddr, address.Undef) - rmp := NewRetrievalMarketClientFakeAPI(t) - // simulate creating payment channel - rmp.ExpectedVouchers[pchan] = &pch.VoucherInfo{Voucher: voucher, Proof: proof} - - rpc := NewRetrievalProviderConnector(rmnet, &pm, bs, pchMgr, nil) - - tokenamt, err := rpc.SavePaymentVoucher(ctx, pchan, voucher, proof, voucher.Amount, tok) - assert.NoError(t, err) - assert.True(t, voucher.Amount.Equals(tokenamt)) - - chinfo, err := pchMgr.GetPaymentChannelInfo(pchan) - require.NoError(t, err) - assert.True(t, chinfo.HasVoucher(voucher)) - }) - - t.Run("errors if manager fails to save voucher, does not store new channel info", func(t *testing.T) { - viewer, pchMgr := makeViewerAndManager(ctx, t, clientAddr, minerAddr, pchan) - viewer.GetFakeStateView().AddActorWithState(pchan, clientAddr, minerAddr, address.Undef) - viewer.GetFakeStateView().PaychActorPartiesErr = errors.New("boom") - - rmp := NewRetrievalMarketClientFakeAPI(t) - rmp.ExpectedVouchers[pchan] = &pch.VoucherInfo{Voucher: voucher, Proof: proof} - rpc := NewRetrievalProviderConnector(rmnet, &pm, bs, pchMgr, nil) - _, err := rpc.SavePaymentVoucher(ctx, pchan, voucher, proof, voucher.Amount, tok) - assert.EqualError(t, err, "boom") - - _, err = pchMgr.GetPaymentChannelInfo(pchan) - require.EqualError(t, err, "No state for /t0100: datastore: key not found") - }) -} - -func makeViewerAndManager(ctx context.Context, t *testing.T, client, miner, paych address.Address) (*paychtest.FakeStateViewer, *pch.Manager) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - viewer := paychtest.NewFakeStateViewer(t) - pchMgr := pch.NewManager(context.Background(), ds, testAPI, testAPI, viewer) - blockHeight := uint64(1234) - balance := types.NewAttoFILFromFIL(1000) - - testAPI.ExpectedMsgCid, testAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, client, miner, paych, balance, exitcode.Ok, blockHeight) - return viewer, pchMgr -} diff --git a/internal/app/go-filecoin/connectors/sectors/persisted.go b/internal/app/go-filecoin/connectors/sectors/persisted.go deleted file mode 100644 index 2e36f1a6ba..0000000000 --- a/internal/app/go-filecoin/connectors/sectors/persisted.go +++ /dev/null @@ -1,31 +0,0 @@ -package sectors - -import ( - "sync" - - "github.com/filecoin-project/go-storedcounter" - "github.com/filecoin-project/specs-actors/actors/abi" - fsm "github.com/filecoin-project/storage-fsm" - "github.com/ipfs/go-datastore" -) - -// PersistedSectorNumberCounter dispenses unique sector numbers using a -// monotonically increasing internal counter -type PersistedSectorNumberCounter struct { - inner *storedcounter.StoredCounter - innerLk sync.Mutex -} - -var _ fsm.SectorIDCounter = new(PersistedSectorNumberCounter) - -func (s *PersistedSectorNumberCounter) Next() (abi.SectorNumber, error) { - s.innerLk.Lock() - defer s.innerLk.Unlock() - i, err := s.inner.Next() - return abi.SectorNumber(i), err -} - -func NewPersistedSectorNumberCounter(ds datastore.Batching) fsm.SectorIDCounter { - sc := storedcounter.New(ds, datastore.NewKey("/storage/nextid")) - return &PersistedSectorNumberCounter{inner: sc} -} diff --git a/internal/app/go-filecoin/connectors/storage_market/client.go b/internal/app/go-filecoin/connectors/storage_market/client.go deleted file mode 100644 index d9e08b2b32..0000000000 --- a/internal/app/go-filecoin/connectors/storage_market/client.go +++ /dev/null @@ -1,244 +0,0 @@ -package storagemarketconnector - -import ( - "context" - "reflect" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - spaminer "github.com/filecoin-project/specs-actors/actors/builtin/miner" - spapow "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-hamt-ipld" - cbor "github.com/ipfs/go-ipld-cbor" - xerrors "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// StorageClientNodeConnector adapts the node to provide the correct interface to the storage client. -type StorageClientNodeConnector struct { - connectorCommon - - clientAddr ClientAddressGetter - cborStore cbor.IpldStore -} - -type ClientAddressGetter func() (address.Address, error) - -var _ storagemarket.StorageClientNode = &StorageClientNodeConnector{} - -// NewStorageClientNodeConnector creates a new connector -func NewStorageClientNodeConnector( - cbor cbor.IpldStore, - cs chainReader, - w *msg.Waiter, - s types.Signer, - ob *message.Outbox, - ca ClientAddressGetter, - sv *appstate.Viewer, -) *StorageClientNodeConnector { - return &StorageClientNodeConnector{ - connectorCommon: connectorCommon{cs, sv, w, s, ob}, - cborStore: cbor, - clientAddr: ca, - } -} - -// AddFunds adds storage market funds for a storage client -func (s *StorageClientNodeConnector) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - clientAddr, err := s.clientAddr() - if err != nil { - return cid.Undef, err - } - return s.addFunds(ctx, clientAddr, addr, amount) -} - -// EnsureFunds compares the passed amount to the available balance for an address, and will add funds if necessary -func (s *StorageClientNodeConnector) EnsureFunds(ctx context.Context, addr, walletAddr address.Address, amount abi.TokenAmount, tok shared.TipSetToken) (cid.Cid, error) { - balance, err := s.GetBalance(ctx, addr, tok) - if err != nil { - return cid.Undef, err - } - - if balance.Available.LessThan(amount) { - return s.AddFunds(ctx, addr, big.Sub(amount, balance.Available)) - } - - return cid.Undef, err -} - -// ListClientDeals returns all deals published on chain for the given account -func (s *StorageClientNodeConnector) ListClientDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken) ([]storagemarket.StorageDeal, error) { - return s.listDeals(ctx, tok, func(proposal *market.DealProposal, _ *market.DealState) bool { - return proposal.Client == addr - }) -} - -// ListStorageProviders finds all miners that will provide storage -func (s *StorageClientNodeConnector) ListStorageProviders(ctx context.Context, tok shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return nil, xerrors.Wrapf(err, "failed to marshal TipSetToken into a TipSetKey") - } - - var spState spapow.State - err := s.chainStore.GetActorStateAt(ctx, tsk, builtin.StoragePowerActorAddr, &spState) - if err != nil { - return nil, err - } - - infos := []*storagemarket.StorageProviderInfo{} - powerHamt, err := hamt.LoadNode(ctx, s.cborStore, spState.Claims) - if err != nil { - return nil, err - } - - err = powerHamt.ForEach(ctx, func(minerAddrStr string, _ interface{}) error { - minerAddr, err := address.NewFromString(minerAddrStr) - if err != nil { - return err - } - - var mState spaminer.State - err = s.chainStore.GetActorStateAt(ctx, tsk, minerAddr, &mState) - if err != nil { - return err - } - - info := mState.Info - infos = append(infos, &storagemarket.StorageProviderInfo{ - Address: minerAddr, - Owner: info.Owner, - Worker: info.Worker, - SectorSize: uint64(info.SectorSize), - PeerID: info.PeerId, - }) - return nil - }) - if err != nil { - return nil, err - } - - return infos, nil -} - -// ValidatePublishedDeal validates a deal has been published correctly -// Adapted from https://github.com/filecoin-project/lotus/blob/3b34eba6124d16162b712e971f0db2ee108e0f67/markets/storageadapter/client.go#L156 -func (s *StorageClientNodeConnector) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (dealID abi.DealID, err error) { - var unsigned types.UnsignedMessage - var receipt *vm.MessageReceipt - - // TODO: This is an inefficient way to discover a deal ID. See if we can find it uniquely on chain some other way or store the dealID when the message first lands (#4066). - // give the wait 30 seconds to avoid races - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - // Fetch receipt to return dealId - about2Days := uint64(24 * 60) - err = s.waiter.Wait(ctx, *deal.PublishMessage, about2Days, func(_ *block.Block, msg *types.SignedMessage, rcpt *vm.MessageReceipt) error { - unsigned = msg.Message - receipt = rcpt - return nil - }) - if err != nil { - return 0, err - } - - tok, err := encoding.Encode(s.chainStore.Head()) - if err != nil { - return 0, err - } - - minerWorker, err := s.GetMinerWorkerAddress(ctx, deal.Proposal.Provider, tok) - if err != nil { - return 0, err - } - - if unsigned.From != minerWorker { - return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s", unsigned.From, deal.Proposal.Provider) - } - - if unsigned.To != builtin.StorageMarketActorAddr { - return 0, xerrors.Errorf("deal publish message wasn't set to StorageMarket actor (to=%s)", unsigned.To) - } - - if unsigned.Method != builtin.MethodsMarket.PublishStorageDeals { - return 0, xerrors.Errorf("deal publish message called incorrect method (method=%d)", unsigned.Method) - } - - var params market.PublishStorageDealsParams - err = encoding.Decode(unsigned.Params, ¶ms) - if err != nil { - return 0, err - } - - msgProposals := params.Deals - // The return value doesn't recapitulate the whole deal. If inspection is required, we should look up the deal - // in the market actor state. - - for _, proposal := range msgProposals { - if reflect.DeepEqual(proposal.Proposal, deal.Proposal) { - var ret market.PublishStorageDealsReturn - err := encoding.Decode(receipt.ReturnValue, &ret) - if err != nil { - return 0, err - } - return ret.IDs[0], nil - } - } - - return 0, xerrors.Errorf("published deal does not match ClientDeal") -} - -// SignProposal uses the local wallet to sign the given proposal -func (s *StorageClientNodeConnector) SignProposal(ctx context.Context, signer address.Address, proposal market.DealProposal) (*market.ClientDealProposal, error) { - buf, err := encoding.Encode(&proposal) - if err != nil { - return nil, err - } - - signature, err := s.SignBytes(ctx, signer, buf) - if err != nil { - return nil, err - } - - return &market.ClientDealProposal{ - Proposal: proposal, - ClientSignature: *signature, - }, nil -} - -// GetDefaultWalletAddress returns the default account for this node -func (s *StorageClientNodeConnector) GetDefaultWalletAddress(_ context.Context) (address.Address, error) { - return s.clientAddr() -} - -// ValidateAskSignature ensures the given ask has been signed correctly -func (s *StorageClientNodeConnector) ValidateAskSignature(ctx context.Context, signed *storagemarket.SignedStorageAsk, tok shared.TipSetToken) (bool, error) { - ask := signed.Ask - - buf, err := encoding.Encode(ask) - if err != nil { - return false, err - } - - return s.VerifySignature(ctx, *signed.Signature, ask.Miner, buf, tok) -} - -// EventLogger logs new events on the storage client -func (s *StorageClientNodeConnector) EventLogger(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - log.Infof("Event: %s, Proposal CID: %s, State: %s, Message: %s", storagemarket.ClientEvents[event], deal.ProposalCid, storagemarket.DealStates[deal.State], deal.Message) -} diff --git a/internal/app/go-filecoin/connectors/storage_market/common.go b/internal/app/go-filecoin/connectors/storage_market/common.go deleted file mode 100644 index 9d6ed16771..0000000000 --- a/internal/app/go-filecoin/connectors/storage_market/common.go +++ /dev/null @@ -1,250 +0,0 @@ -package storagemarketconnector - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - spasm "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -var log = logging.Logger("storage-protocol") - -type chainReader interface { - Head() block.TipSetKey - GetTipSet(block.TipSetKey) (block.TipSet, error) - GetTipSetStateRoot(ctx context.Context, tipKey block.TipSetKey) (cid.Cid, error) - GetActorStateAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address, out interface{}) error - StateView(key block.TipSetKey) (*state.View, error) - cbor.IpldStore -} - -// WorkerGetter is a function that can retrieve the miner worker for the given address from actor state -type WorkerGetter func(ctx context.Context, minerAddr address.Address, baseKey block.TipSetKey) (address.Address, error) - -type connectorCommon struct { - chainStore chainReader - stateViewer *appstate.Viewer - waiter *msg.Waiter - signer types.Signer - outbox *message.Outbox -} - -// MostRecentStateId returns the state key from the current head of the chain. -func (c *connectorCommon) GetChainHead(_ context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { // nolint: golint - return connectors.GetChainHead(c.chainStore) -} - -func (c *connectorCommon) WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, error) error) error { - return c.waiter.Wait(ctx, mcid, msg.DefaultMessageWaitLookback, func(b *block.Block, message *types.SignedMessage, r *vm.MessageReceipt) error { - return onCompletion(r.ExitCode, r.ReturnValue, nil) - }) -} - -func (c *connectorCommon) addFunds(ctx context.Context, fromAddr address.Address, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - mcid, _, err := c.outbox.Send( - ctx, - fromAddr, - builtin.StorageMarketActorAddr, - types.NewAttoFIL(amount.Int), - types.NewGasPrice(1), - gas.NewGas(5000), - true, - builtin.MethodsMarket.AddBalance, - &addr, - ) - return mcid, err -} - -// SignBytes uses the local wallet to sign the bytes with the given address -func (c *connectorCommon) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { - sig, err := c.signer.SignBytes(ctx, b, signer) - return &sig, err -} - -func (c *connectorCommon) GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (storagemarket.Balance, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return storagemarket.Balance{}, xerrors.Errorf("failed to marshal TipSetToken into a TipSetKey: %w", err) - } - - // Direct state access should be replaced with use of the state view. - var smState spasm.State - err := c.chainStore.GetActorStateAt(ctx, tsk, builtin.StorageMarketActorAddr, &smState) - if err != nil { - return storagemarket.Balance{}, err - } - - view, err := c.chainStore.StateView(tsk) - if err != nil { - return storagemarket.Balance{}, err - } - resAddr, err := view.InitResolveAddress(ctx, addr) - if err != nil { - return storagemarket.Balance{}, err - } - - available, err := c.getBalance(ctx, smState.EscrowTable, resAddr) - if err != nil { - return storagemarket.Balance{}, err - } - - locked, err := c.getBalance(ctx, smState.LockedTable, resAddr) - if err != nil { - return storagemarket.Balance{}, err - } - - return storagemarket.Balance{ - Available: abi.NewTokenAmount(available.Int64()), - Locked: abi.NewTokenAmount(locked.Int64()), - }, nil -} - -func (c *connectorCommon) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { - view, err := c.loadStateView(tok) - if err != nil { - return address.Undef, err - } - - _, fcworker, err := view.MinerControlAddresses(ctx, miner) - if err != nil { - return address.Undef, err - } - - return fcworker, nil -} - -func (c *connectorCommon) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, cb storagemarket.DealSectorCommittedCallback) error { - view, err := c.chainStore.StateView(c.chainStore.Head()) - if err != nil { - cb(err) - return err - } - - resolvedProvider, err := view.InitResolveAddress(ctx, provider) - if err != nil { - cb(err) - return err - } - - err = c.waiter.WaitPredicate(ctx, msg.DefaultMessageWaitLookback, func(msg *types.SignedMessage, msgCid cid.Cid) bool { - resolvedTo, err := view.InitResolveAddress(ctx, msg.Message.To) - if err != nil { - return false - } - - if resolvedTo != resolvedProvider { - return false - } - - if msg.Message.Method != builtin.MethodsMiner.ProveCommitSector { - return false - } - - // that's enough for us to check chain state - view, err = c.chainStore.StateView(c.chainStore.Head()) - if err != nil { - return false - } - - _, found, err := view.MarketDealState(ctx, dealID) - if err != nil { - return false - } - - return found - }, func(b *block.Block, signedMessage *types.SignedMessage, receipt *vm.MessageReceipt) error { - return nil - }) - - cb(err) - return err -} - -func (c *connectorCommon) getBalance(ctx context.Context, root cid.Cid, addr address.Address) (abi.TokenAmount, error) { - // These should be replaced with methods on the state view - table, err := adt.AsBalanceTable(state.StoreFromCbor(ctx, c.chainStore), root) - if err != nil { - return abi.TokenAmount{}, err - } - - hasBalance, err := table.Has(addr) - if err != nil { - return big.Zero(), err - } - balance := abi.NewTokenAmount(0) - if hasBalance { - balance, err = table.Get(addr) - if err != nil { - return big.Zero(), err - } - } - return balance, nil -} - -func (c *connectorCommon) listDeals(ctx context.Context, tok shared.TipSetToken, predicate func(proposal *spasm.DealProposal, dealState *spasm.DealState) bool) ([]storagemarket.StorageDeal, error) { - view, err := c.loadStateView(tok) - if err != nil { - return nil, err - } - - // Deals are not indexed in (expensive) chain state. - // This iterates *all* deal states, loads the associated proposals, and filters by provider. - // This is going to be really slow until we find a place to index deals, either here or in the module. - deals := []storagemarket.StorageDeal{} - err = view.MarketDealStatesForEach(ctx, func(dealId abi.DealID, state *spasm.DealState) error { - proposal, err := view.MarketDealProposal(ctx, dealId) - if err != nil { - return xerrors.Errorf("no proposal for deal %d: %w", dealId, err) - } - if predicate(&proposal, state) { - deals = append(deals, storagemarket.StorageDeal{ - DealProposal: proposal, - DealState: *state, - }) - } - return nil - }) - return deals, err -} - -func (c *connectorCommon) VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, plaintext []byte, tok shared.TipSetToken) (bool, error) { - view, err := c.loadStateView(tok) - if err != nil { - return false, err - } - - validator := state.NewSignatureValidator(view) - - return nil == validator.ValidateSignature(ctx, plaintext, signer, signature), nil -} - -func (c *connectorCommon) loadStateView(tok shared.TipSetToken) (*appstate.View, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return nil, xerrors.Errorf("failed to marshal tok to a tipset key: %w", err) - } - return c.chainStore.StateView(tsk) -} diff --git a/internal/app/go-filecoin/connectors/storage_market/provider.go b/internal/app/go-filecoin/connectors/storage_market/provider.go deleted file mode 100644 index 2d53712983..0000000000 --- a/internal/app/go-filecoin/connectors/storage_market/provider.go +++ /dev/null @@ -1,204 +0,0 @@ -package storagemarketconnector - -import ( - "context" - "io" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - spaminer "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - "github.com/pkg/errors" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// StorageProviderNodeConnector adapts the node to provide an interface for the storage provider -type StorageProviderNodeConnector struct { - connectorCommon - - minerAddr address.Address - chainStore chainReader - outbox *message.Outbox - pieceManager piecemanager.PieceManager -} - -var _ storagemarket.StorageProviderNode = &StorageProviderNodeConnector{} - -// NewStorageProviderNodeConnector creates a new connector -func NewStorageProviderNodeConnector(ma address.Address, - cs chainReader, - ob *message.Outbox, - w *msg.Waiter, - pm piecemanager.PieceManager, - s types.Signer, - sv *appstate.Viewer, -) *StorageProviderNodeConnector { - return &StorageProviderNodeConnector{ - connectorCommon: connectorCommon{cs, sv, w, s, ob}, - chainStore: cs, - minerAddr: ma, - outbox: ob, - pieceManager: pm, - } -} - -// AddFunds adds storage market funds for a storage provider -func (s *StorageProviderNodeConnector) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - tok, err := encoding.Encode(s.chainStore.Head()) - if err != nil { - return cid.Undef, err - } - - workerAddr, err := s.GetMinerWorkerAddress(ctx, s.minerAddr, tok) - if err != nil { - return cid.Undef, err - } - - return s.addFunds(ctx, workerAddr, addr, amount) -} - -// EnsureFunds compares the passed amount to the available balance for an address, and will add funds if necessary -func (s *StorageProviderNodeConnector) EnsureFunds(ctx context.Context, addr, walletAddr address.Address, amount abi.TokenAmount, tok shared.TipSetToken) (cid.Cid, error) { - balance, err := s.GetBalance(ctx, addr, tok) - if err != nil { - return cid.Undef, err - } - - if balance.Available.LessThan(amount) { - return s.AddFunds(ctx, addr, big.Sub(amount, balance.Available)) - } - - return cid.Undef, err -} - -// PublishDeals publishes storage deals on chain -func (s *StorageProviderNodeConnector) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { - params := market.PublishStorageDealsParams{Deals: []market.ClientDealProposal{deal.ClientDealProposal}} - - tok, err := encoding.Encode(s.chainStore.Head()) - if err != nil { - return cid.Undef, err - } - - workerAddr, err := s.GetMinerWorkerAddress(ctx, s.minerAddr, tok) - if err != nil { - return cid.Undef, err - } - - mcid, _, err := s.outbox.Send( - ctx, - workerAddr, - builtin.StorageMarketActorAddr, - types.ZeroAttoFIL, - types.NewGasPrice(1), - gas.NewGas(10000), - true, - builtin.MethodsMarket.PublishStorageDeals, - ¶ms, - ) - - if err != nil { - return cid.Undef, err - } - - return mcid, err -} - -// ListProviderDeals lists all deals for the given provider -func (s *StorageProviderNodeConnector) ListProviderDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken) ([]storagemarket.StorageDeal, error) { - return s.listDeals(ctx, tok, func(proposal *market.DealProposal, dealState *market.DealState) bool { - return proposal.Provider == addr - }) -} - -// OnDealComplete adds the piece to the storage provider -func (s *StorageProviderNodeConnector) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) error { - // TODO: callback. - return s.pieceManager.SealPieceIntoNewSector(ctx, deal.DealID, deal.Proposal.StartEpoch, deal.Proposal.EndEpoch, pieceSize, pieceReader) -} - -// LocatePieceForDealWithinSector finds the sector, offset and length of a piece associated with the given deal id -func (s *StorageProviderNodeConnector) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, tok shared.TipSetToken) (sectorNumber uint64, offset uint64, length uint64, err error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return 0, 0, 0, xerrors.Errorf("failed to marshal TipSetToken into a TipSetKey: %w", err) - } - - var smState market.State - err = s.chainStore.GetActorStateAt(ctx, tsk, builtin.StorageMarketActorAddr, &smState) - if err != nil { - return 0, 0, 0, err - } - - stateStore := state.StoreFromCbor(ctx, s.chainStore) - proposals, err := adt.AsArray(stateStore, smState.Proposals) - if err != nil { - return 0, 0, 0, err - } - - var minerState spaminer.State - err = s.chainStore.GetActorStateAt(ctx, tsk, s.minerAddr, &minerState) - if err != nil { - return 0, 0, 0, err - } - - precommitted, err := adt.AsMap(stateStore, minerState.PreCommittedSectors) - if err != nil { - return 0, 0, 0, err - } - - var sectorInfo spaminer.SectorPreCommitOnChainInfo - err = precommitted.ForEach(§orInfo, func(key string) error { - k, err := adt.ParseIntKey(key) - if err != nil { - return err - } - sectorNumber = uint64(k) - - for _, deal := range sectorInfo.Info.DealIDs { - if deal == dealID { - offset = uint64(0) - for _, did := range sectorInfo.Info.DealIDs { - var proposal market.DealProposal - found, err := proposals.Get(uint64(did), &proposal) - if err != nil { - return err - } - if !found { - return errors.Errorf("Could not find miner deal %d in storage market state", did) - } - - if did == dealID { - sectorNumber = uint64(k) - length = uint64(proposal.PieceSize) - return nil // Found! - } - offset += uint64(proposal.PieceSize) - } - } - } - return errors.New("Deal not found") - }) - return -} - -// EventLogger logs new events on the storage provider -func (s *StorageProviderNodeConnector) EventLogger(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - log.Infof("Event: %s, Proposal CID: %s, State: %s, Message: %s", storagemarket.ProviderEvents[event], deal.ProposalCid, storagemarket.DealStates[deal.State], deal.Message) -} diff --git a/internal/app/go-filecoin/internal/submodule/block_mining_submodule.go b/internal/app/go-filecoin/internal/submodule/block_mining_submodule.go deleted file mode 100644 index 3e7df5c70a..0000000000 --- a/internal/app/go-filecoin/internal/submodule/block_mining_submodule.go +++ /dev/null @@ -1,47 +0,0 @@ -package submodule - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-filecoin/internal/pkg/mining" - "github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator" - mining_protocol "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/mining" -) - -// BlockMiningSubmodule enhances the `Node` with block mining capabilities. -type BlockMiningSubmodule struct { - BlockMiningAPI *mining_protocol.API - - // Mining stuff. - AddNewlyMinedBlock newBlockFunc - // cancelMining cancels the context for block production and sector commitments. - CancelMining context.CancelFunc - MiningWorker *mining.DefaultWorker - MiningScheduler mining.Scheduler - Mining struct { - sync.Mutex - IsMining bool - } - MiningDoneWg *sync.WaitGroup - - // Inject non-default post generator here or leave nil for default - PoStGenerator postgenerator.PoStGenerator -} - -type newBlockFunc func(context.Context, mining.FullBlock) - -// NewBlockMiningSubmodule creates a new block mining submodule. -func NewBlockMiningSubmodule(ctx context.Context, gen postgenerator.PoStGenerator) (BlockMiningSubmodule, error) { - return BlockMiningSubmodule{ - // BlockMiningAPI: nil, - // AddNewlyMinedBlock: nil, - // cancelMining: nil, - // MiningWorker: nil, - // MiningScheduler: nil, - // mining: nil, - // miningDoneWg: nil, - // MessageSub: nil, - PoStGenerator: gen, - }, nil -} diff --git a/internal/app/go-filecoin/internal/submodule/blockservice_submoodule.go b/internal/app/go-filecoin/internal/submodule/blockservice_submoodule.go deleted file mode 100644 index 16c532062e..0000000000 --- a/internal/app/go-filecoin/internal/submodule/blockservice_submoodule.go +++ /dev/null @@ -1,26 +0,0 @@ -package submodule - -import ( - "context" - - bserv "github.com/ipfs/go-blockservice" -) - -// BlockServiceSubmodule enhances the `Node` with networked key/value fetching capabilities. -// -// TODO: split chain data from piece data (issue: https://github.com/filecoin-project/go-filecoin/issues/3481) -// Note: at present: -// - `BlockService` is shared by chain/graphsync and piece/bitswap data -type BlockServiceSubmodule struct { - // Blockservice is a higher level interface for fetching data - Blockservice bserv.BlockService -} - -// NewBlockserviceSubmodule creates a new block service submodule. -func NewBlockserviceSubmodule(ctx context.Context, blockstore *BlockstoreSubmodule, network *NetworkSubmodule) (BlockServiceSubmodule, error) { - bservice := bserv.New(blockstore.Blockstore, network.Bitswap) - - return BlockServiceSubmodule{ - Blockservice: bservice, - }, nil -} diff --git a/internal/app/go-filecoin/internal/submodule/blockstore_submodule.go b/internal/app/go-filecoin/internal/submodule/blockstore_submodule.go deleted file mode 100644 index a33ae7536b..0000000000 --- a/internal/app/go-filecoin/internal/submodule/blockstore_submodule.go +++ /dev/null @@ -1,41 +0,0 @@ -package submodule - -import ( - "context" - - ds "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" - - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" -) - -// BlockstoreSubmodule enhances the `Node` with local key/value storing capabilities. -// -// TODO: split chain data from piece data (issue: https://github.com/filecoin-project/go-filecoin/issues/3481) -// Note: at present: -// - `Blockstore` is shared by chain/graphsync and piece/bitswap data -// - `cborStore` is used for chain state and shared with piece data exchange for deals at the moment. -type BlockstoreSubmodule struct { - // Blockstore is the un-networked blocks interface - Blockstore bstore.Blockstore - - // cborStore is a wrapper for a `cbor.IpldStore` that works on the local IPLD-Cbor objects stored in `Blockstore`. - CborStore *cborutil.IpldStore -} - -type blockstoreRepo interface { - Datastore() ds.Batching -} - -// NewBlockstoreSubmodule creates a new block store submodule. -func NewBlockstoreSubmodule(ctx context.Context, repo blockstoreRepo) (BlockstoreSubmodule, error) { - // set up block store - bs := bstore.NewBlockstore(repo.Datastore()) - // setup a ipldCbor on top of the local store - ipldCborStore := cborutil.NewIpldStore(bs) - - return BlockstoreSubmodule{ - Blockstore: bs, - CborStore: ipldCborStore, - }, nil -} diff --git a/internal/app/go-filecoin/internal/submodule/chain_submodule.go b/internal/app/go-filecoin/internal/submodule/chain_submodule.go deleted file mode 100644 index 73efd57bac..0000000000 --- a/internal/app/go-filecoin/internal/submodule/chain_submodule.go +++ /dev/null @@ -1,82 +0,0 @@ -package submodule - -import ( - "context" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/slashing" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor/builtin" - "github.com/filecoin-project/go-filecoin/internal/pkg/vmsupport" -) - -// ChainSubmodule enhances the `Node` with chain capabilities. -type ChainSubmodule struct { - ChainReader *chain.Store - MessageStore *chain.MessageStore - State *cst.ChainStateReadWriter - - Sampler *chain.Sampler - ActorState *appstate.TipSetStateViewer - Processor *consensus.DefaultProcessor - - StatusReporter *chain.StatusReporter -} - -// xxx go back to using an interface here -/*type nodeChainReader interface { - GenesisCid() cid.Cid - GetHead() block.TipSetKey - GetTipSet(block.TipSetKey) (block.TipSet, error) - GetTipSetState(ctx context.Context, tsKey block.TipSetKey) (state.Tree, error) - GetTipSetStateRoot(tsKey block.TipSetKey) (cid.Cid, error) - GetTipSetReceiptsRoot(tsKey block.TipSetKey) (cid.Cid, error) - HeadEvents() *ps.PubSub - Load(context.Context) error - Stop() -} -*/ -type chainRepo interface { - ChainDatastore() repo.Datastore -} - -type chainConfig interface { - GenesisCid() cid.Cid -} - -// NewChainSubmodule creates a new chain submodule. -func NewChainSubmodule(config chainConfig, repo chainRepo, blockstore *BlockstoreSubmodule, verifier *ProofVerificationSubmodule) (ChainSubmodule, error) { - // initialize chain store - chainStatusReporter := chain.NewStatusReporter() - chainStore := chain.NewStore(repo.ChainDatastore(), blockstore.CborStore, chainStatusReporter, config.GenesisCid()) - - actorState := appstate.NewTipSetStateViewer(chainStore, blockstore.CborStore) - messageStore := chain.NewMessageStore(blockstore.Blockstore) - chainState := cst.NewChainStateReadWriter(chainStore, messageStore, blockstore.Blockstore, builtin.DefaultActors) - faultChecker := slashing.NewFaultChecker(chainState) - syscalls := vmsupport.NewSyscalls(faultChecker, verifier.ProofVerifier) - processor := consensus.NewDefaultProcessor(syscalls, chainState) - - return ChainSubmodule{ - ChainReader: chainStore, - MessageStore: messageStore, - ActorState: actorState, - State: chainState, - Processor: processor, - StatusReporter: chainStatusReporter, - }, nil -} - -type chainNode interface { - Chain() ChainSubmodule -} - -// Start loads the chain from disk. -func (c *ChainSubmodule) Start(ctx context.Context, node chainNode) error { - return node.Chain().ChainReader.Load(ctx) -} diff --git a/internal/app/go-filecoin/internal/submodule/discovery_submodule.go b/internal/app/go-filecoin/internal/submodule/discovery_submodule.go deleted file mode 100644 index df5f6a70d6..0000000000 --- a/internal/app/go-filecoin/internal/submodule/discovery_submodule.go +++ /dev/null @@ -1,109 +0,0 @@ -package submodule - -import ( - "context" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/discovery" - "github.com/filecoin-project/go-filecoin/internal/pkg/net" - "github.com/filecoin-project/go-filecoin/internal/pkg/util/moresync" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/pkg/errors" -) - -var log = logging.Logger("node") // nolint: deadcode - -// DiscoverySubmodule enhances the `Node` with peer discovery capabilities. -type DiscoverySubmodule struct { - Bootstrapper *discovery.Bootstrapper - BootstrapReady *moresync.Latch - - // PeerTracker maintains a list of peers. - PeerTracker *discovery.PeerTracker - - // HelloHandler handle peer connections for the "hello" protocol. - HelloHandler *discovery.HelloProtocolHandler -} - -type discoveryConfig interface { - GenesisCid() cid.Cid -} - -// NewDiscoverySubmodule creates a new discovery submodule. -func NewDiscoverySubmodule(ctx context.Context, config discoveryConfig, bsConfig *config.BootstrapConfig, network *NetworkSubmodule) (DiscoverySubmodule, error) { - periodStr := bsConfig.Period - period, err := time.ParseDuration(periodStr) - if err != nil { - return DiscoverySubmodule{}, errors.Wrapf(err, "couldn't parse bootstrap period %s", periodStr) - } - - // bootstrapper maintains connections to some subset of addresses - ba := bsConfig.Addresses - bpi, err := net.PeerAddrsToAddrInfo(ba) - if err != nil { - return DiscoverySubmodule{}, errors.Wrapf(err, "couldn't parse bootstrap addresses [%s]", ba) - } - - minPeerThreshold := bsConfig.MinPeerThreshold - - // create a bootstrapper - bootstrapper := discovery.NewBootstrapper(bpi, network.Host, network.Host.Network(), network.Router, minPeerThreshold, period) - - // set up peer tracking - peerTracker := discovery.NewPeerTracker(network.Host.ID()) - - return DiscoverySubmodule{ - Bootstrapper: bootstrapper, - BootstrapReady: moresync.NewLatch(uint(minPeerThreshold)), - PeerTracker: peerTracker, - HelloHandler: discovery.NewHelloProtocolHandler(network.Host, config.GenesisCid(), network.NetworkName), - }, nil -} - -type discoveryNode interface { - Network() NetworkSubmodule - Chain() ChainSubmodule - Syncer() SyncerSubmodule -} - -// Start starts the discovery submodule for a node. It blocks until bootstrap -// satisfies the configured security conditions. -func (m *DiscoverySubmodule) Start(node discoveryNode) error { - // Start bootstrapper. - m.Bootstrapper.Start(context.Background()) - - // Register peer tracker disconnect function with network. - m.PeerTracker.RegisterDisconnect(node.Network().Host.Network()) - - // Start up 'hello' handshake service - peerDiscoveredCallback := func(ci *block.ChainInfo) { - m.PeerTracker.Track(ci) - m.BootstrapReady.Done() - err := node.Syncer().ChainSyncManager.BlockProposer().SendHello(ci) - if err != nil { - log.Errorf("error receiving chain info from hello %s: %s", ci, err) - return - } - } - - // chain head callback - chainHeadCallback := func() (block.TipSet, error) { - return node.Chain().State.GetTipSet(node.Chain().State.Head()) - } - - // Register the "hello" protocol with the network - m.HelloHandler.Register(peerDiscoveredCallback, chainHeadCallback) - - // Wait for bootstrap to be sufficient connected - m.BootstrapReady.Wait() - - return nil -} - -// Stop stops the discovery submodule. -func (m *DiscoverySubmodule) Stop() { - m.Bootstrapper.Stop() -} diff --git a/internal/app/go-filecoin/internal/submodule/libp2p.go b/internal/app/go-filecoin/internal/submodule/libp2p.go deleted file mode 100644 index f83bf7cb37..0000000000 --- a/internal/app/go-filecoin/internal/submodule/libp2p.go +++ /dev/null @@ -1,144 +0,0 @@ -package submodule - -import ( - "context" - - "github.com/jbenet/goprocess" - "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/event" - net "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/peerstore" - "github.com/libp2p/go-libp2p-core/protocol" - pstoremem "github.com/libp2p/go-libp2p-peerstore/pstoremem" - multiaddr "github.com/multiformats/go-multiaddr" - errors "github.com/pkg/errors" -) - -type noopLibP2PHost struct{} - -func (noopLibP2PHost) ID() peer.ID { - return "" -} - -func (noopLibP2PHost) Peerstore() peerstore.Peerstore { - return pstoremem.NewPeerstore() -} - -func (noopLibP2PHost) Addrs() []multiaddr.Multiaddr { - return []multiaddr.Multiaddr{} -} - -func (noopLibP2PHost) EventBus() event.Bus { - panic("NYI") -} - -func (noopLibP2PHost) Network() net.Network { - return noopLibP2PNetwork{} -} - -func (noopLibP2PHost) Mux() protocol.Switch { - panic("implement me") -} - -func (noopLibP2PHost) Connect(ctx context.Context, pi peer.AddrInfo) error { - return errors.New("Connect called on noopLibP2PHost") -} - -func (noopLibP2PHost) SetStreamHandler(pid protocol.ID, handler net.StreamHandler) { - -} - -func (noopLibP2PHost) SetStreamHandlerMatch(protocol.ID, func(string) bool, net.StreamHandler) { - -} - -func (noopLibP2PHost) RemoveStreamHandler(pid protocol.ID) { - panic("implement me") -} - -func (noopLibP2PHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (net.Stream, error) { - return nil, errors.New("NewStream on noopLibP2PHost") -} - -func (noopLibP2PHost) Close() error { - return nil -} - -func (noopLibP2PHost) ConnManager() connmgr.ConnManager { - return &connmgr.NullConnMgr{} -} - -type noopLibP2PNetwork struct{} - -func (noopLibP2PNetwork) Peerstore() peerstore.Peerstore { - panic("implement me") -} - -func (noopLibP2PNetwork) LocalPeer() peer.ID { - panic("implement me") -} - -func (noopLibP2PNetwork) DialPeer(context.Context, peer.ID) (net.Conn, error) { - panic("implement me") -} - -func (noopLibP2PNetwork) ClosePeer(peer.ID) error { - panic("implement me") -} - -func (noopLibP2PNetwork) Connectedness(peer.ID) net.Connectedness { - panic("implement me") -} - -func (noopLibP2PNetwork) Peers() []peer.ID { - return []peer.ID{} -} - -func (noopLibP2PNetwork) Conns() []net.Conn { - return []net.Conn{} -} - -func (noopLibP2PNetwork) ConnsToPeer(p peer.ID) []net.Conn { - return []net.Conn{} -} - -func (noopLibP2PNetwork) Notify(net.Notifiee) { - -} - -func (noopLibP2PNetwork) StopNotify(net.Notifiee) { - panic("implement me") -} - -func (noopLibP2PNetwork) Close() error { - panic("implement me") -} - -func (noopLibP2PNetwork) SetStreamHandler(net.StreamHandler) { - panic("implement me") -} - -func (noopLibP2PNetwork) SetConnHandler(net.ConnHandler) { - panic("implement me") -} - -func (noopLibP2PNetwork) NewStream(context.Context, peer.ID) (net.Stream, error) { - panic("implement me") -} - -func (noopLibP2PNetwork) Listen(...multiaddr.Multiaddr) error { - panic("implement me") -} - -func (noopLibP2PNetwork) ListenAddresses() []multiaddr.Multiaddr { - panic("implement me") -} - -func (noopLibP2PNetwork) InterfaceListenAddresses() ([]multiaddr.Multiaddr, error) { - panic("implement me") -} - -func (noopLibP2PNetwork) Process() goprocess.Process { - panic("implement me") -} diff --git a/internal/app/go-filecoin/internal/submodule/messaging_submodule.go b/internal/app/go-filecoin/internal/submodule/messaging_submodule.go deleted file mode 100644 index be70c9a9d0..0000000000 --- a/internal/app/go-filecoin/internal/submodule/messaging_submodule.go +++ /dev/null @@ -1,71 +0,0 @@ -package submodule - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/journal" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/msgsub" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub" -) - -// MessagingSubmodule enhances the `Node` with internal messaging capabilities. -type MessagingSubmodule struct { - // Incoming messages for block mining. - Inbox *message.Inbox - - // Messages sent and not yet mined. - Outbox *message.Outbox - - // Network Fields - MessageTopic *pubsub.Topic - MessageSub pubsub.Subscription - - MsgPool *message.Pool - MsgSigVal *consensus.MessageSignatureValidator -} - -type messagingConfig interface { - Journal() journal.Journal -} - -type messagingRepo interface { - Config() *config.Config -} - -// NewMessagingSubmodule creates a new discovery submodule. -func NewMessagingSubmodule(ctx context.Context, config messagingConfig, repo messagingRepo, network *NetworkSubmodule, chain *ChainSubmodule, wallet *WalletSubmodule) (MessagingSubmodule, error) { - msgSyntaxValidator := consensus.NewMessageSyntaxValidator() - msgSignatureValidator := consensus.NewMessageSignatureValidator(chain.State) - msgPool := message.NewPool(repo.Config().Mpool, msgSyntaxValidator) - inbox := message.NewInbox(msgPool, message.InboxMaxAgeTipsets, chain.ChainReader, chain.MessageStore) - - // setup messaging topic. - // register block validation on pubsub - mtv := msgsub.NewMessageTopicValidator(msgSyntaxValidator, msgSignatureValidator) - if err := network.pubsub.RegisterTopicValidator(mtv.Topic(network.NetworkName), mtv.Validator(), mtv.Opts()...); err != nil { - return MessagingSubmodule{}, errors.Wrap(err, "failed to register message validator") - } - topic, err := network.pubsub.Join(msgsub.Topic(network.NetworkName)) - if err != nil { - return MessagingSubmodule{}, err - } - - msgQueue := message.NewQueue() - outboxPolicy := message.NewMessageQueuePolicy(chain.MessageStore, message.OutboxMaxAgeRounds) - msgPublisher := message.NewDefaultPublisher(pubsub.NewTopic(topic), msgPool) - outbox := message.NewOutbox(wallet.Signer, msgSyntaxValidator, msgQueue, msgPublisher, outboxPolicy, chain.ChainReader, chain.State, config.Journal().Topic("outbox")) - - return MessagingSubmodule{ - Inbox: inbox, - Outbox: outbox, - MessageTopic: pubsub.NewTopic(topic), - // MessageSub: nil, - MsgPool: msgPool, - MsgSigVal: msgSignatureValidator, - }, nil -} diff --git a/internal/app/go-filecoin/internal/submodule/network_submodule.go b/internal/app/go-filecoin/internal/submodule/network_submodule.go deleted file mode 100644 index d3c1d46d21..0000000000 --- a/internal/app/go-filecoin/internal/submodule/network_submodule.go +++ /dev/null @@ -1,216 +0,0 @@ -package submodule - -import ( - "context" - "time" - - "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - graphsync "github.com/ipfs/go-graphsync" - graphsyncimpl "github.com/ipfs/go-graphsync/impl" - gsnet "github.com/ipfs/go-graphsync/network" - gsstoreutil "github.com/ipfs/go-graphsync/storeutil" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - offroute "github.com/ipfs/go-ipfs-routing/offline" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p" - autonatsvc "github.com/libp2p/go-libp2p-autonat-svc" - circuit "github.com/libp2p/go-libp2p-circuit" - "github.com/libp2p/go-libp2p-core/host" - p2pmetrics "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/routing" - dht "github.com/libp2p/go-libp2p-kad-dht" - dhtopts "github.com/libp2p/go-libp2p-kad-dht/opts" - libp2pps "github.com/libp2p/go-libp2p-pubsub" - rhost "github.com/libp2p/go-libp2p/p2p/host/routed" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" - ma "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/discovery" - "github.com/filecoin-project/go-filecoin/internal/pkg/net" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" -) - -// NetworkSubmodule enhances the `Node` with networking capabilities. -type NetworkSubmodule struct { - NetworkName string - - Host host.Host - - // Router is a router from IPFS - Router routing.Routing - - pubsub *libp2pps.PubSub - - // TODO: split chain bitswap from storage bitswap (issue: ???) - Bitswap exchange.Interface - - Network *net.Network - - GraphExchange graphsync.GraphExchange -} - -type blankValidator struct{} - -func (blankValidator) Validate(_ string, _ []byte) error { return nil } -func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } - -type networkConfig interface { - GenesisCid() cid.Cid - OfflineMode() bool - IsRelay() bool - Libp2pOpts() []libp2p.Option -} - -type networkRepo interface { - Config() *config.Config - Datastore() ds.Batching -} - -// NewNetworkSubmodule creates a new network submodule. -func NewNetworkSubmodule(ctx context.Context, config networkConfig, repo networkRepo, blockstore *BlockstoreSubmodule) (NetworkSubmodule, error) { - bandwidthTracker := p2pmetrics.NewBandwidthCounter() - libP2pOpts := append(config.Libp2pOpts(), libp2p.BandwidthReporter(bandwidthTracker)) - - networkName, err := retrieveNetworkName(ctx, config.GenesisCid(), blockstore.CborStore) - if err != nil { - return NetworkSubmodule{}, err - } - - // set up host - var peerHost host.Host - var router routing.Routing - validator := blankValidator{} - var pubsubMessageSigning bool - if !config.OfflineMode() { - makeDHT := func(h host.Host) (routing.Routing, error) { - r, err := dht.New( - ctx, - h, - dhtopts.Datastore(repo.Datastore()), - dhtopts.NamespacedValidator("v", validator), - dhtopts.Protocols(net.FilecoinDHT(networkName)), - ) - if err != nil { - return nil, errors.Wrap(err, "failed to setup routing") - } - router = r - return r, err - } - - var err error - peerHost, err = buildHost(ctx, config, libP2pOpts, repo, makeDHT) - if err != nil { - return NetworkSubmodule{}, err - } - // require message signing in online mode when we have priv key - pubsubMessageSigning = true - } else { - router = offroute.NewOfflineRouter(repo.Datastore(), validator) - peerHost = rhost.Wrap(noopLibP2PHost{}, router) - pubsubMessageSigning = false - } - - // Set up libp2p network - // The gossipsub heartbeat timeout needs to be set sufficiently low - // to enable publishing on first connection. The default of one - // second is not acceptable for tests. - libp2pps.GossipSubHeartbeatInterval = 100 * time.Millisecond - gsub, err := libp2pps.NewGossipSub(ctx, peerHost, libp2pps.WithMessageSigning(pubsubMessageSigning), libp2pps.WithDiscovery(&discovery.NoopDiscovery{})) - if err != nil { - return NetworkSubmodule{}, errors.Wrap(err, "failed to set up network") - } - - // set up bitswap - nwork := bsnet.NewFromIpfsHost(peerHost, router) - //nwork := bsnet.NewFromIpfsHost(innerHost, router) - bswap := bitswap.New(ctx, nwork, blockstore.Blockstore) - - // set up pinger - pingService := ping.NewPingService(peerHost) - - // set up graphsync - graphsyncNetwork := gsnet.NewFromLibp2pHost(peerHost) - loader := gsstoreutil.LoaderForBlockstore(blockstore.Blockstore) - storer := gsstoreutil.StorerForBlockstore(blockstore.Blockstore) - gsync := graphsyncimpl.New(ctx, graphsyncNetwork, loader, storer, graphsyncimpl.RejectAllRequestsByDefault()) - - // build network - network := net.New(peerHost, net.NewRouter(router), bandwidthTracker, net.NewPinger(peerHost, pingService)) - // build the network submdule - return NetworkSubmodule{ - NetworkName: networkName, - Host: peerHost, - Router: router, - pubsub: gsub, - Bitswap: bswap, - GraphExchange: gsync, - Network: network, - }, nil -} - -func retrieveNetworkName(ctx context.Context, genCid cid.Cid, cborStore cbor.IpldStore) (string, error) { - var genesis block.Block - err := cborStore.Get(ctx, genCid, &genesis) - if err != nil { - return "", errors.Wrapf(err, "failed to get block %s", genCid.String()) - } - - return appstate.NewView(cborStore, genesis.StateRoot.Cid).InitNetworkName(ctx) -} - -// buildHost determines if we are publically dialable. If so use public -// Address, if not configure node to announce relay address. -func buildHost(ctx context.Context, config networkConfig, libP2pOpts []libp2p.Option, repo networkRepo, makeDHT func(host host.Host) (routing.Routing, error)) (host.Host, error) { - // Node must build a host acting as a libp2p relay. Additionally it - // runs the autoNAT service which allows other nodes to check for their - // own dialability by having this node attempt to dial them. - makeDHTRightType := func(h host.Host) (routing.PeerRouting, error) { - return makeDHT(h) - } - - if config.IsRelay() { - cfg := repo.Config() - publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress) - if err != nil { - return nil, err - } - publicAddrFactory := func(lc *libp2p.Config) error { - lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { - if cfg.Swarm.PublicRelayAddress == "" { - return addrs - } - return append(addrs, publicAddr) - } - return nil - } - relayHost, err := libp2p.New( - ctx, - libp2p.EnableRelay(circuit.OptHop), - libp2p.EnableAutoRelay(), - libp2p.Routing(makeDHTRightType), - publicAddrFactory, - libp2p.ChainOptions(libP2pOpts...), - ) - if err != nil { - return nil, err - } - // Set up autoNATService as a streamhandler on the host. - _, err = autonatsvc.NewAutoNATService(ctx, relayHost) - if err != nil { - return nil, err - } - return relayHost, nil - } - return libp2p.New( - ctx, - libp2p.EnableAutoRelay(), - libp2p.Routing(makeDHTRightType), - libp2p.ChainOptions(libP2pOpts...), - ) -} diff --git a/internal/app/go-filecoin/internal/submodule/proof_verification_submodule.go b/internal/app/go-filecoin/internal/submodule/proof_verification_submodule.go deleted file mode 100644 index d675e4deef..0000000000 --- a/internal/app/go-filecoin/internal/submodule/proof_verification_submodule.go +++ /dev/null @@ -1,17 +0,0 @@ -package submodule - -import ( - "github.com/filecoin-project/sector-storage/ffiwrapper" -) - -// ProofVerificationSubmodule adds proof verification capabilities to the node. -type ProofVerificationSubmodule struct { - ProofVerifier ffiwrapper.Verifier -} - -// NewProofVerificationSubmodule creates a new proof verification submodule. -func NewProofVerificationSubmodule(verifier ffiwrapper.Verifier) ProofVerificationSubmodule { - return ProofVerificationSubmodule{ - ProofVerifier: verifier, - } -} diff --git a/internal/app/go-filecoin/internal/submodule/retrieval_protocol_submodule.go b/internal/app/go-filecoin/internal/submodule/retrieval_protocol_submodule.go deleted file mode 100644 index fa48ec534b..0000000000 --- a/internal/app/go-filecoin/internal/submodule/retrieval_protocol_submodule.go +++ /dev/null @@ -1,77 +0,0 @@ -package submodule - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/piecestore" - iface "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery" - impl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-storedcounter" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/libp2p/go-libp2p-core/host" - - retmkt "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/retrieval_market" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" -) - -// RetrievalProviderDSPrefix is a prefix for all datastore keys related to the retrieval provider -const RetrievalProviderDSPrefix = "/retrievalmarket/provider" - -// RetrievalCounterDSKey is the datastore key for the stored counter used by the retrieval counter -const RetrievalCounterDSKey = "/retrievalmarket/client/counter" - -// RetrievalClientDSPrefix is a prefix for all datastore keys related to the retrieval clients -const RetrievalClientDSPrefix = "/retrievalmarket/client" - -// RetrievalProtocolSubmodule enhances the node with retrieval protocol -// capabilities. -type RetrievalProtocolSubmodule struct { - client iface.RetrievalClient - provider iface.RetrievalProvider -} - -// NewRetrievalProtocolSubmodule creates a new retrieval protocol submodule. -func NewRetrievalProtocolSubmodule( - bs blockstore.Blockstore, - ds datastore.Batching, - cr *cst.ChainStateReadWriter, - host host.Host, - providerAddr address.Address, - signer retmkt.RetrievalSigner, - pchMgrAPI retmkt.PaychMgrAPI, - pieceManager piecemanager.PieceManager, -) (*RetrievalProtocolSubmodule, error) { - - retrievalDealPieceStore := piecestore.NewPieceStore(namespace.Wrap(ds, datastore.NewKey(PieceStoreDSPrefix))) - - netwk := network.NewFromLibp2pHost(host) - pnode := retmkt.NewRetrievalProviderConnector(netwk, pieceManager, bs, pchMgrAPI, nil) - - marketProvider, err := impl.NewProvider(providerAddr, pnode, netwk, retrievalDealPieceStore, bs, namespace.Wrap(ds, datastore.NewKey(RetrievalProviderDSPrefix))) - if err != nil { - return nil, err - } - - cnode := retmkt.NewRetrievalClientConnector(bs, cr, signer, pchMgrAPI) - counter := storedcounter.New(ds, datastore.NewKey(RetrievalCounterDSKey)) - - resolver := discovery.Multi(discovery.NewLocal(namespace.Wrap(ds, datastore.NewKey(DiscoveryDSPrefix)))) - marketClient, err := impl.NewClient(netwk, bs, cnode, resolver, namespace.Wrap(ds, datastore.NewKey(RetrievalClientDSPrefix)), counter) - if err != nil { - return nil, err - } - - return &RetrievalProtocolSubmodule{marketClient, marketProvider}, nil -} - -func (rps *RetrievalProtocolSubmodule) Client() iface.RetrievalClient { - return rps.client -} - -func (rps *RetrievalProtocolSubmodule) Provider() iface.RetrievalProvider { - return rps.provider -} diff --git a/internal/app/go-filecoin/internal/submodule/storage_mining_submodule.go b/internal/app/go-filecoin/internal/submodule/storage_mining_submodule.go deleted file mode 100644 index 6c701d705e..0000000000 --- a/internal/app/go-filecoin/internal/submodule/storage_mining_submodule.go +++ /dev/null @@ -1,188 +0,0 @@ -package submodule - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-address" - sectorstorage "github.com/filecoin-project/sector-storage" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/sector-storage/stores" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - fsm "github.com/filecoin-project/storage-fsm" - "github.com/ipfs/go-datastore" - - fsmchain "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/fsm_chain" - fsmeventsconnector "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/fsm_events" - fsmnodeconnector "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/fsm_node" - fsmstorage "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/fsm_storage" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/sectors" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsampler" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" - "github.com/filecoin-project/go-filecoin/internal/pkg/poster" - "github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" -) - -// StorageMiningSubmodule enhances the `Node` with storage mining capabilities. -type StorageMiningSubmodule struct { - started bool - startedLk sync.RWMutex - - // StorageMining is used by the miner to fill and seal sectors. - PieceManager piecemanager.PieceManager - - // PoStGenerator generates election PoSts - PoStGenerator postgenerator.PoStGenerator - - hs *chainsampler.HeightThresholdScheduler - fsm *fsm.Sealing - poster *poster.Poster -} - -// NewStorageMiningSubmodule creates a new storage mining submodule. -func NewStorageMiningSubmodule( - minerAddr address.Address, - ds datastore.Batching, - c *ChainSubmodule, - m *MessagingSubmodule, - mw *msg.Waiter, - stateViewer *appstate.Viewer, - sealProofType abi.RegisteredProof, - r repo.Repo, - postGeneratorOverride postgenerator.PoStGenerator, -) (*StorageMiningSubmodule, error) { - chainThresholdScheduler := chainsampler.NewHeightThresholdScheduler(c.ChainReader) - - ccn := fsmchain.NewChainConnector(c.ChainReader) - - sdx := stores.NewIndex() - - fcg := ffiwrapper.Config{ - SealProofType: sealProofType, - } - - scg := sectorstorage.SealerConfig{AllowPreCommit1: true, AllowPreCommit2: true, AllowCommit: true} - - mgr, err := sectorstorage.New(context.TODO(), fsmstorage.NewRepoStorageConnector(r), sdx, &fcg, scg, []string{}, nil) - if err != nil { - return nil, err - } - - sid := sectors.NewPersistedSectorNumberCounter(ds) - - // FSM requires id address to work correctly. Resolve it now and hope it's stable - // - minerAddrID, err := resolveMinerAddress(context.TODO(), c, minerAddr, stateViewer) - if err != nil { - return nil, err - } - - ncn := fsmnodeconnector.New(minerAddrID, mw, c.ChainReader, c.ActorState, m.Outbox, c.State) - - ppStart, err := getMinerProvingPeriod(c, minerAddr, stateViewer) - if err != nil { - return nil, err - } - - pcp := fsm.NewBasicPreCommitPolicy(&ccn, abi.ChainEpoch(2*60*24), ppStart%miner.WPoStProvingPeriod) - - fsmConnector := fsmeventsconnector.New(chainThresholdScheduler, c.State) - fsm := fsm.New(ncn, fsmConnector, minerAddrID, ds, mgr, sid, ffiwrapper.ProofVerifier, &pcp) - - bke := piecemanager.NewFiniteStateMachineBackEnd(fsm, sid) - - modu := &StorageMiningSubmodule{ - PieceManager: &bke, - hs: chainThresholdScheduler, - fsm: fsm, - poster: poster.NewPoster(minerAddr, m.Outbox, mgr, c.State, stateViewer, mw), - } - - // allow the caller to provide a thing which generates fake PoSts - if postGeneratorOverride == nil { - modu.PoStGenerator = mgr.Prover - } else { - modu.PoStGenerator = postGeneratorOverride - } - - return modu, nil -} - -// Start starts the StorageMiningSubmodule -func (s *StorageMiningSubmodule) Start(ctx context.Context) error { - s.startedLk.Lock() - defer s.startedLk.Unlock() - - if s.started { - return nil - } - - err := s.fsm.Run(ctx) - if err != nil { - return err - } - - s.started = true - return nil -} - -// Stop stops the StorageMiningSubmodule -func (s *StorageMiningSubmodule) Stop(ctx context.Context) error { - s.startedLk.Lock() - defer s.startedLk.Unlock() - - if !s.started { - return nil - } - - err := s.fsm.Stop(ctx) - if err != nil { - return err - } - - s.poster.StopPoSting() - s.started = false - return nil -} - -// HandleNewHead submits a new chain head for possible fallback PoSt. -func (s *StorageMiningSubmodule) HandleNewHead(ctx context.Context, newHead block.TipSet) error { - s.startedLk.RLock() - defer s.startedLk.RUnlock() - - if !s.started { - return nil - } - - err := s.hs.HandleNewTipSet(ctx, newHead) - if err != nil { - return err - } - - return s.poster.HandleNewHead(ctx, newHead) -} - -func getMinerProvingPeriod(c *ChainSubmodule, minerAddr address.Address, viewer *appstate.Viewer) (abi.ChainEpoch, error) { - tsk := c.ChainReader.GetHead() - root, err := c.ChainReader.GetTipSetStateRoot(tsk) - if err != nil { - return 0, err - } - view := viewer.StateView(root) - return view.MinerProvingPeriodStart(context.Background(), minerAddr) -} - -func resolveMinerAddress(ctx context.Context, c *ChainSubmodule, minerAddr address.Address, viewer *appstate.Viewer) (address.Address, error) { - tsk := c.ChainReader.GetHead() - root, err := c.ChainReader.GetTipSetStateRoot(tsk) - if err != nil { - return address.Undef, err - } - view := viewer.StateView(root) - return view.InitResolveAddress(ctx, minerAddr) -} diff --git a/internal/app/go-filecoin/internal/submodule/storage_networking_submodule.go b/internal/app/go-filecoin/internal/submodule/storage_networking_submodule.go deleted file mode 100644 index 438d5957bf..0000000000 --- a/internal/app/go-filecoin/internal/submodule/storage_networking_submodule.go +++ /dev/null @@ -1,20 +0,0 @@ -package submodule - -import ( - "context" - - exchange "github.com/ipfs/go-ipfs-exchange-interface" -) - -// StorageNetworkingSubmodule enhances the `Node` with data transfer capabilities. -type StorageNetworkingSubmodule struct { - // Exchange is the interface for fetching data from other nodes. - Exchange exchange.Interface -} - -// NewStorgeNetworkingSubmodule creates a new storage networking submodule. -func NewStorgeNetworkingSubmodule(ctx context.Context, network *NetworkSubmodule) (StorageNetworkingSubmodule, error) { - return StorageNetworkingSubmodule{ - Exchange: network.Bitswap, - }, nil -} diff --git a/internal/app/go-filecoin/internal/submodule/storage_protocol_submodule.go b/internal/app/go-filecoin/internal/submodule/storage_protocol_submodule.go deleted file mode 100644 index c2aa86c1a9..0000000000 --- a/internal/app/go-filecoin/internal/submodule/storage_protocol_submodule.go +++ /dev/null @@ -1,171 +0,0 @@ -package submodule - -import ( - "context" - "os" - - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/go-storedcounter" - - "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" - graphsyncimpl "github.com/filecoin-project/go-data-transfer/impl/graphsync" - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery" - iface "github.com/filecoin-project/go-fil-markets/storagemarket" - impl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - smvalid "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - smnetwork "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "github.com/ipfs/go-graphsync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/libp2p/go-libp2p-core/host" - "github.com/pkg/errors" - - storagemarketconnector "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/storage_market" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// DiscoveryDSPrefix is a prefix for all datastore keys used by the local -const DiscoveryDSPrefix = "/deals/local" - -// ClientDSPrefix is a prefix for all datastore keys used by a storage client -const ClientDSPrefix = "/deals/client" - -// ProviderDSPrefix is a prefix for all datastore keys used by the storage provider -const ProviderDSPrefix = "/deals/provider" - -// DTCounterDSKey is the datastore key for the stored counter used by data transfer -const DTCounterDSKey = "/datatransfer/counter" - -// PieceStoreDSPrefix is a prefix for all datastore keys used by the piecestore -const PieceStoreDSPrefix = "/piecestore" - -// AskDSKey is the datastore key for the stored ask used by the storage provider -const AskDSKey = "/deals/latest-ask" - -// StorageProtocolSubmodule enhances the node with storage protocol -// capabilities. -type StorageProtocolSubmodule struct { - StorageClient iface.StorageClient - StorageProvider iface.StorageProvider - dataTransfer datatransfer.Manager - requestValidator *smvalid.UnifiedRequestValidator - pieceManager piecemanager.PieceManager -} - -// NewStorageProtocolSubmodule creates a new storage protocol submodule. -func NewStorageProtocolSubmodule( - ctx context.Context, - clientAddr storagemarketconnector.ClientAddressGetter, - c *ChainSubmodule, - m *MessagingSubmodule, - mw *msg.Waiter, - s types.Signer, - h host.Host, - ds datastore.Batching, - bs blockstore.Blockstore, - gsync graphsync.GraphExchange, - stateViewer *appstate.Viewer, -) (*StorageProtocolSubmodule, error) { - cnode := storagemarketconnector.NewStorageClientNodeConnector(cborutil.NewIpldStore(bs), c.State, mw, s, m.Outbox, clientAddr, stateViewer) - dtStoredCounter := storedcounter.New(ds, datastore.NewKey(DTCounterDSKey)) - dt := graphsyncimpl.NewGraphSyncDataTransfer(h, gsync, dtStoredCounter) - clientDs := namespace.Wrap(ds, datastore.NewKey(ClientDSPrefix)) - validator := smvalid.NewUnifiedRequestValidator(nil, statestore.New(clientDs)) - err := dt.RegisterVoucherType(&smvalid.StorageDataTransferVoucher{}, validator) - if err != nil { - return nil, err - } - - local := discovery.NewLocal(namespace.Wrap(ds, datastore.NewKey(DiscoveryDSPrefix))) - client, err := impl.NewClient(smnetwork.NewFromLibp2pHost(h), bs, dt, local, clientDs, cnode) - if err != nil { - return nil, errors.Wrap(err, "error creating storage client") - } - - sm := &StorageProtocolSubmodule{ - StorageClient: client, - dataTransfer: dt, - requestValidator: validator, - } - sm.StorageClient.SubscribeToEvents(cnode.EventLogger) - return sm, nil -} - -func (sm *StorageProtocolSubmodule) AddStorageProvider( - ctx context.Context, - minerAddr address.Address, - c *ChainSubmodule, - m *MessagingSubmodule, - mw *msg.Waiter, - pm piecemanager.PieceManager, - s types.Signer, - h host.Host, - ds datastore.Batching, - bs blockstore.Blockstore, - gsync graphsync.GraphExchange, - repoPath string, - sealProofType abi.RegisteredProof, - stateViewer *appstate.Viewer, -) error { - sm.pieceManager = pm - - pnode := storagemarketconnector.NewStorageProviderNodeConnector(minerAddr, c.State, m.Outbox, mw, pm, s, stateViewer) - - pieceStagingPath, err := paths.PieceStagingDir(repoPath) - if err != nil { - return err - } - - // ensure pieces directory exists - err = os.MkdirAll(pieceStagingPath, 0700) - if err != nil { - return err - } - - fs, err := filestore.NewLocalFileStore(filestore.OsPath(pieceStagingPath)) - if err != nil { - return err - } - - providerDs := namespace.Wrap(ds, datastore.NewKey(ProviderDSPrefix)) - sm.requestValidator.SetPushDeals(statestore.New(providerDs)) - ps := piecestore.NewPieceStore(namespace.Wrap(ds, datastore.NewKey(PieceStoreDSPrefix))) - storedAsk, err := storedask.NewStoredAsk(ds, datastore.NewKey(AskDSKey), pnode, minerAddr) - if err != nil { - return err - } - sm.StorageProvider, err = impl.NewProvider(smnetwork.NewFromLibp2pHost(h), providerDs, bs, fs, ps, sm.dataTransfer, pnode, minerAddr, sealProofType, storedAsk) - if err == nil { - sm.StorageProvider.SubscribeToEvents(pnode.EventLogger) - } - return err -} - -func (sm *StorageProtocolSubmodule) Provider() (iface.StorageProvider, error) { - if sm.StorageProvider == nil { - return nil, errors.New("Mining has not been started so storage provider is not available") - } - return sm.StorageProvider, nil -} - -func (sm *StorageProtocolSubmodule) Client() iface.StorageClient { - return sm.StorageClient -} - -func (sm *StorageProtocolSubmodule) PieceManager() (piecemanager.PieceManager, error) { - if sm.StorageProvider == nil { - return nil, errors.New("Mining has not been started so piece manager is not available") - } - return sm.pieceManager, nil -} diff --git a/internal/app/go-filecoin/internal/submodule/syncer_submodule.go b/internal/app/go-filecoin/internal/submodule/syncer_submodule.go deleted file mode 100644 index 7db886e006..0000000000 --- a/internal/app/go-filecoin/internal/submodule/syncer_submodule.go +++ /dev/null @@ -1,141 +0,0 @@ -package submodule - -import ( - "context" - "time" - - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/fetcher" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/blocksub" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub" - "github.com/filecoin-project/go-filecoin/internal/pkg/slashing" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" -) - -// SyncerSubmodule enhances the node with chain syncing capabilities -type SyncerSubmodule struct { - BlockTopic *pubsub.Topic - BlockSub pubsub.Subscription - ChainSelector nodeChainSelector - Consensus consensus.Protocol - FaultDetector slashing.ConsensusFaultDetector - ChainSyncManager *chainsync.Manager - Drand drand.IFace - - // cancelChainSync cancels the context for chain sync subscriptions and handlers. - CancelChainSync context.CancelFunc - // faultCh receives detected consensus faults - faultCh chan slashing.ConsensusFault -} - -type syncerConfig interface { - GenesisCid() cid.Cid - BlockTime() time.Duration - ChainClock() clock.ChainEpochClock - Drand() drand.IFace -} - -type nodeChainSelector interface { - Weight(context.Context, block.TipSet, cid.Cid) (fbig.Int, error) - IsHeavier(ctx context.Context, a, b block.TipSet, aStateID, bStateID cid.Cid) (bool, error) -} - -// NewSyncerSubmodule creates a new chain submodule. -func NewSyncerSubmodule(ctx context.Context, config syncerConfig, blockstore *BlockstoreSubmodule, network *NetworkSubmodule, - discovery *DiscoverySubmodule, chn *ChainSubmodule, postVerifier consensus.EPoStVerifier) (SyncerSubmodule, error) { - // setup validation - blkValid := consensus.NewDefaultBlockValidator(config.ChainClock(), chn.MessageStore, chn.State) - msgValid := consensus.NewMessageSyntaxValidator() - syntax := consensus.WrappedSyntaxValidator{ - BlockSyntaxValidator: blkValid, - MessageSyntaxValidator: msgValid, - } - - // register block validation on pubsub - btv := blocksub.NewBlockTopicValidator(blkValid) - if err := network.pubsub.RegisterTopicValidator(btv.Topic(network.NetworkName), btv.Validator(), btv.Opts()...); err != nil { - return SyncerSubmodule{}, errors.Wrap(err, "failed to register block validator") - } - - // setup topic. - topic, err := network.pubsub.Join(blocksub.Topic(network.NetworkName)) - if err != nil { - return SyncerSubmodule{}, err - } - - genBlk, err := chn.ChainReader.GetGenesisBlock(ctx) - if err != nil { - return SyncerSubmodule{}, errors.Wrap(err, "failed to locate genesis block during node build") - } - - // setup default drand - d := config.Drand() - - // set up consensus - elections := consensus.NewElectionMachine(chn.State) - sampler := chain.NewSampler(chn.ChainReader, genBlk.Ticket) - tickets := consensus.NewTicketMachine(sampler) - stateViewer := consensus.AsDefaultStateViewer(state.NewViewer(blockstore.CborStore)) - nodeConsensus := consensus.NewExpected(blockstore.CborStore, blockstore.Blockstore, chn.Processor, &stateViewer, - config.BlockTime(), elections, tickets, postVerifier, chn.ChainReader, config.ChainClock(), d) - nodeChainSelector := consensus.NewChainSelector(blockstore.CborStore, &stateViewer, config.GenesisCid()) - - // setup fecher - network.GraphExchange.RegisterIncomingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { - _, has := requestData.Extension(fetcher.ChainsyncProtocolExtension) - if has { - // TODO: Don't just validate every request with the extension -- support only known selectors - // TODO: use separate block store for the chain (supported in GraphSync) - hookActions.ValidateRequest() - } - }) - fetcher := fetcher.NewGraphSyncFetcher(ctx, network.GraphExchange, blockstore.Blockstore, syntax, config.ChainClock(), discovery.PeerTracker) - faultCh := make(chan slashing.ConsensusFault) - faultDetector := slashing.NewConsensusFaultDetector(faultCh) - - chainSyncManager, err := chainsync.NewManager(nodeConsensus, blkValid, nodeChainSelector, chn.ChainReader, chn.MessageStore, fetcher, config.ChainClock(), faultDetector) - if err != nil { - return SyncerSubmodule{}, err - } - - return SyncerSubmodule{ - BlockTopic: pubsub.NewTopic(topic), - // BlockSub: nil, - Consensus: nodeConsensus, - ChainSelector: nodeChainSelector, - ChainSyncManager: &chainSyncManager, - Drand: d, - // cancelChainSync: nil, - faultCh: faultCh, - }, nil -} - -type syncerNode interface { -} - -// Start starts the syncer submodule for a node. -func (s *SyncerSubmodule) Start(ctx context.Context, _node syncerNode) error { - go func() { - for { - select { - case <-ctx.Done(): - return - case <-s.faultCh: - // TODO #3690 connect this up to a slasher that sends messages - // to outbound queue to carry out penalization - } - } - }() - return s.ChainSyncManager.Start(ctx) -} diff --git a/internal/app/go-filecoin/internal/submodule/wallet_submodule.go b/internal/app/go-filecoin/internal/submodule/wallet_submodule.go deleted file mode 100644 index 5651089101..0000000000 --- a/internal/app/go-filecoin/internal/submodule/wallet_submodule.go +++ /dev/null @@ -1,35 +0,0 @@ -package submodule - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" - "github.com/pkg/errors" -) - -// WalletSubmodule enhances the `Node` with a "Wallet" and FIL transfer capabilities. -type WalletSubmodule struct { - Wallet *wallet.Wallet - Signer types.Signer -} - -type walletRepo interface { - WalletDatastore() repo.Datastore -} - -// NewWalletSubmodule creates a new storage protocol submodule. -func NewWalletSubmodule(ctx context.Context, repo walletRepo, chain *ChainSubmodule) (WalletSubmodule, error) { - backend, err := wallet.NewDSBackend(repo.WalletDatastore()) - if err != nil { - return WalletSubmodule{}, errors.Wrap(err, "failed to set up wallet backend") - } - fcWallet := wallet.New(backend) - - return WalletSubmodule{ - Wallet: fcWallet, - Signer: state.NewSigner(chain.ActorState, chain.ChainReader, fcWallet), - }, nil -} diff --git a/internal/app/go-filecoin/node/block.go b/internal/app/go-filecoin/node/block.go deleted file mode 100644 index fe9a23f8aa..0000000000 --- a/internal/app/go-filecoin/node/block.go +++ /dev/null @@ -1,85 +0,0 @@ -package node - -import ( - "context" - - "github.com/pkg/errors" - "go.opencensus.io/trace" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics/tracing" - "github.com/filecoin-project/go-filecoin/internal/pkg/mining" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/blocksub" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub" -) - -// AddNewBlock receives a newly mined block and stores, validates and propagates it to the network. -func (node *Node) AddNewBlock(ctx context.Context, o mining.FullBlock) (err error) { - b := o.Header - ctx, span := trace.StartSpan(ctx, "Node.AddNewBlock") - span.AddAttributes(trace.StringAttribute("block", b.Cid().String())) - defer tracing.AddErrorEndSpan(ctx, span, &err) - - // Put block in storage wired to an exchange so this node and other - // nodes can fetch it. - log.Debugf("putting block in bitswap exchange: %s", b.Cid().String()) - blkCid, err := node.Blockstore.CborStore.Put(ctx, b) - if err != nil { - return errors.Wrap(err, "could not add new block to online storage") - } - - // Publish blocksub message - log.Debugf("publishing new block: %s", b.Cid().String()) - go func() { - payload, err := blocksub.MakePayload(o.Header, o.BLSMessages, o.SECPMessages) - if err != nil { - log.Errorf("failed to create blocksub payload: %s", err) - } - err = node.syncer.BlockTopic.Publish(ctx, payload) - if err != nil { - log.Errorf("failed to publish on blocksub: %s", err) - } - }() - - log.Debugf("syncing new block: %s", b.Cid().String()) - ci := block.NewChainInfo(node.Host().ID(), node.Host().ID(), block.NewTipSetKey(blkCid), b.Height) - return node.syncer.ChainSyncManager.BlockProposer().SendOwnBlock(ci) -} - -func (node *Node) handleBlockSub(ctx context.Context, msg pubsub.Message) (err error) { - sender := msg.GetSender() - source := msg.GetSource() - // ignore messages from self - if sender == node.Host().ID() || source == node.Host().ID() { - return nil - } - - ctx, span := trace.StartSpan(ctx, "Node.handleBlockSub") - defer tracing.AddErrorEndSpan(ctx, span, &err) - - var payload blocksub.Payload - err = encoding.Decode(msg.GetData(), &payload) - if err != nil { - return errors.Wrapf(err, "failed to decode blocksub payload from source: %s, sender: %s", source, sender) - } - - header := &payload.Header - span.AddAttributes(trace.StringAttribute("block", header.Cid().String())) - log.Infof("Received new block %s from peer %s", header.Cid(), sender) - log.Debugf("Received new block sender: %s source: %s, %s", sender, source, header) - - // The block we went to all that effort decoding is dropped on the floor! - // Don't be too quick to change that, though: the syncer re-fetching the block - // is currently critical to reliable validation. - // See https://github.com/filecoin-project/go-filecoin/issues/2962 - // TODO Implement principled trusting of ChainInfo's - // to address in #2674 - chainInfo := block.NewChainInfo(source, sender, block.NewTipSetKey(header.Cid()), header.Height) - err = node.syncer.ChainSyncManager.BlockProposer().SendGossipBlock(chainInfo) - if err != nil { - return errors.Wrapf(err, "failed to notify syncer of new block, block: %s", header.Cid()) - } - - return nil -} diff --git a/internal/app/go-filecoin/node/block_propagate_test.go b/internal/app/go-filecoin/node/block_propagate_test.go deleted file mode 100644 index b0881e9e40..0000000000 --- a/internal/app/go-filecoin/node/block_propagate_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package node_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/filecoin-project/go-address" - specsbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - . "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/proofs" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/version" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -func TestBlockPropsManyNodes(t *testing.T) { - tf.IntegrationTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - numNodes := 4 - _, nodes, fakeClock, blockTime := makeNodesBlockPropTests(t, numNodes) - - StartNodes(t, nodes) - defer StopNodes(nodes) - - minerNode := nodes[0] - - ConnectNodes(t, minerNode, nodes[1]) - ConnectNodes(t, nodes[1], nodes[2]) - ConnectNodes(t, nodes[2], nodes[3]) - - // Advance node's time so that it is epoch 1 - fakeClock.Advance(blockTime) - nextBlk, err := minerNode.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - // Wait for network connection notifications to propagate - time.Sleep(time.Millisecond * 100) - - equal := false - for i := 0; i < 30; i++ { - for j := 1; j < numNodes; j++ { - otherHead := nodes[j].PorcelainAPI.ChainHeadKey() - assert.NotNil(t, otherHead) - equal = otherHead.ToSlice()[0].Equals(nextBlk.Cid()) - if equal { - break - } - time.Sleep(time.Millisecond * 20) - } - } - - assert.True(t, equal, "failed to sync chains") -} - -func TestChainSyncA(t *testing.T) { - tf.IntegrationTest(t) - - ctx := context.Background() - _, nodes, fakeClock, blockTime := makeNodesBlockPropTests(t, 2) - - StartNodes(t, nodes) - defer StopNodes(nodes) - - ConnectNodes(t, nodes[0], nodes[1]) - - fakeClock.Advance(blockTime) - _, err := nodes[0].BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - fakeClock.Advance(blockTime) - _, err = nodes[0].BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - fakeClock.Advance(blockTime) - thirdBlock, err := nodes[0].BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - equal := false - for i := 0; i < 30; i++ { - otherHead := nodes[1].PorcelainAPI.ChainHeadKey() - assert.NotNil(t, otherHead) - equal = otherHead.ToSlice()[0].Equals(thirdBlock.Cid()) - if equal { - break - } - time.Sleep(time.Millisecond * 50) - } - - assert.True(t, equal, "failed to sync chains") -} - -func TestChainSyncWithMessages(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - /* setup */ - // genesis has two accounts - genCfg := &gengen.GenesisCfg{} - require.NoError(t, gengen.MinerConfigs(MakeTestGenCfg(t, 1).Miners)(genCfg)) - require.NoError(t, gengen.GenKeys(3, "1000000")(genCfg)) - require.NoError(t, gengen.NetworkName(version.TEST)(genCfg)) - cs := MakeChainSeed(t, genCfg) - genUnixSeconds := int64(1234567890) - genTime := time.Unix(genUnixSeconds, 0) - fakeClock := clock.NewFake(genTime) - blockTime := 30 * time.Second - propDelay := 6 * time.Second - c := clock.NewChainClockFromClock(uint64(genUnixSeconds), blockTime, propDelay, fakeClock) - - // first node is the message sender. - builder1 := test.NewNodeBuilder(t). - WithBuilderOpt(ChainClockConfigOption(c)). - WithGenesisInit(cs.GenesisInitFunc). - WithBuilderOpt(VerifierConfigOption(&proofs.FakeVerifier{})). - WithBuilderOpt(MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)). - WithBuilderOpt(DrandConfigOption(drand.NewFake(genTime))) - nodeSend := builder1.Build(ctx) - senderAddress := cs.GiveKey(t, nodeSend, 1) - - // second node is receiver - builder2 := test.NewNodeBuilder(t). - WithBuilderOpt(ChainClockConfigOption(c)). - WithGenesisInit(cs.GenesisInitFunc). - WithBuilderOpt(VerifierConfigOption(&proofs.FakeVerifier{})). - WithBuilderOpt(MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)). - WithBuilderOpt(DrandConfigOption(drand.NewFake(genTime))) - nodeReceive := builder2.Build(ctx) - receiverAddress := cs.GiveKey(t, nodeReceive, 2) - - // third node is miner - builder3 := test.NewNodeBuilder(t). - WithBuilderOpt(ChainClockConfigOption(c)). - WithGenesisInit(cs.GenesisInitFunc). - WithBuilderOpt(VerifierConfigOption(&proofs.FakeVerifier{})). - WithBuilderOpt(MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)). - WithBuilderOpt(PoStGeneratorOption(&consensus.TestElectionPoster{})). - WithBuilderOpt(DrandConfigOption(drand.NewFake(genTime))) - nodeMine := builder3.Build(ctx) - cs.GiveKey(t, nodeMine, 0) - cs.GiveMiner(t, nodeMine, 0) - - StartNodes(t, []*Node{nodeSend, nodeReceive, nodeMine}) - ConnectNodes(t, nodeSend, nodeMine) - ConnectNodes(t, nodeMine, nodeSend) - ConnectNodes(t, nodeMine, nodeReceive) - ConnectNodes(t, nodeReceive, nodeMine) - - /* collect initial balance values */ - senderStart, err := nodeSend.PorcelainAPI.WalletBalance(ctx, senderAddress) - require.NoError(t, err) - receiverStart, err := nodeReceive.PorcelainAPI.WalletBalance(ctx, receiverAddress) - require.NoError(t, err) - gasPrice := types.NewGasPrice(1) - expGasCost := gas.NewGas(242).ToTokens(gasPrice) // DRAGONS -- this is brittle need a better way to predict this. - - /* send message from SendNode */ - sendVal := specsbig.NewInt(100) - _, _, err = nodeSend.PorcelainAPI.MessageSend( - ctx, - senderAddress, - receiverAddress, - sendVal, - gasPrice, - gas.NewGas(1000), - builtin.MethodSend, - adt.Empty, - ) - require.NoError(t, err) - smsgs, err := nodeMine.PorcelainAPI.MessagePoolWait(ctx, 1) - require.NoError(t, err) - require.Equal(t, 1, len(smsgs)) - uCid, err := smsgs[0].Message.Cid() // Message waiter needs unsigned cid for bls - require.NoError(t, err) - - /* mine block with message */ - fakeClock.Advance(blockTime) - fmt.Printf("about to mining once\n") - _, err = nodeMine.BlockMining.BlockMiningAPI.MiningOnce(ctx) - require.NoError(t, err) - fmt.Printf("finished mining once\n") - /* verify new state */ - _, err = nodeReceive.PorcelainAPI.MessageWaitDone(ctx, uCid) - require.NoError(t, err) - _, err = nodeSend.PorcelainAPI.MessageWaitDone(ctx, uCid) - require.NoError(t, err) - - senderEnd, err := nodeSend.PorcelainAPI.WalletBalance(ctx, senderAddress) - require.NoError(t, err) - receiverEnd, err := nodeReceive.PorcelainAPI.WalletBalance(ctx, receiverAddress) - require.NoError(t, err) - - assert.Equal(t, senderStart, specsbig.Add(specsbig.Add(senderEnd, sendVal), expGasCost)) - assert.Equal(t, receiverEnd, specsbig.Add(receiverStart, sendVal)) -} - -// makeNodes makes at least two nodes, a miner and a client; numNodes is the total wanted -func makeNodesBlockPropTests(t *testing.T, numNodes int) (address.Address, []*Node, clock.Fake, time.Duration) { - seed := MakeChainSeed(t, MakeTestGenCfg(t, 3)) - ctx := context.Background() - genUnixSeconds := int64(1234567890) - genTime := time.Unix(genUnixSeconds, 0) - fc := clock.NewFake(genTime) - blockTime := 30 * time.Second - propDelay := 6 * time.Second - c := clock.NewChainClockFromClock(1234567890, blockTime, propDelay, fc) - - builder := test.NewNodeBuilder(t). - WithGenesisInit(seed.GenesisInitFunc). - WithBuilderOpt(ChainClockConfigOption(c)). - WithBuilderOpt(VerifierConfigOption(&proofs.FakeVerifier{})). - WithBuilderOpt(PoStGeneratorOption(&consensus.TestElectionPoster{})). - WithBuilderOpt(MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)). - WithBuilderOpt(DrandConfigOption(drand.NewFake(genTime))). - WithInitOpt(PeerKeyOpt(PeerKeys[0])) - minerNode := builder.Build(ctx) - seed.GiveKey(t, minerNode, 0) - mineraddr, _ := seed.GiveMiner(t, minerNode, 0) - - nodes := []*Node{minerNode} - - nodeLimit := 1 - if numNodes > 2 { - nodeLimit = numNodes - } - builder2 := test.NewNodeBuilder(t). - WithGenesisInit(seed.GenesisInitFunc). - WithBuilderOpt(ChainClockConfigOption(c)). - WithBuilderOpt(VerifierConfigOption(&proofs.FakeVerifier{})). - WithBuilderOpt(PoStGeneratorOption(&consensus.TestElectionPoster{})). - WithBuilderOpt(MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)). - WithBuilderOpt(DrandConfigOption(drand.NewFake(genTime))) - - for i := 0; i < nodeLimit; i++ { - nodes = append(nodes, builder2.Build(ctx)) - } - return mineraddr, nodes, fc, blockTime -} diff --git a/internal/app/go-filecoin/node/builder.go b/internal/app/go-filecoin/node/builder.go deleted file mode 100644 index 4dd50206e3..0000000000 --- a/internal/app/go-filecoin/node/builder.go +++ /dev/null @@ -1,388 +0,0 @@ -package node - -import ( - "context" - "time" - - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-merkledag" - "github.com/libp2p/go-libp2p" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/internal/submodule" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cfg" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/dag" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/journal" - "github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator" - drandapi "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/storage" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/version" -) - -// Builder is a helper to aid in the construction of a filecoin node. -type Builder struct { - blockTime time.Duration - libp2pOpts []libp2p.Option - offlineMode bool - verifier ffiwrapper.Verifier - postGen postgenerator.PoStGenerator - propDelay time.Duration - repo repo.Repo - journal journal.Journal - isRelay bool - chainClock clock.ChainEpochClock - genCid cid.Cid - drand drand.IFace -} - -// BuilderOpt is an option for building a filecoin node. -type BuilderOpt func(*Builder) error - -// OfflineMode enables or disables offline mode. -func OfflineMode(offlineMode bool) BuilderOpt { - return func(c *Builder) error { - c.offlineMode = offlineMode - return nil - } -} - -// IsRelay configures node to act as a libp2p relay. -func IsRelay() BuilderOpt { - return func(c *Builder) error { - c.isRelay = true - return nil - } -} - -// BlockTime sets the blockTime. -func BlockTime(blockTime time.Duration) BuilderOpt { - return func(c *Builder) error { - c.blockTime = blockTime - return nil - } -} - -// PropagationDelay sets the time the node needs to wait for blocks to arrive before mining. -func PropagationDelay(propDelay time.Duration) BuilderOpt { - return func(c *Builder) error { - c.propDelay = propDelay - return nil - } -} - -// Libp2pOptions returns a builder option that sets up the libp2p node -func Libp2pOptions(opts ...libp2p.Option) BuilderOpt { - return func(b *Builder) error { - // Quietly having your options overridden leads to hair loss - if len(b.libp2pOpts) > 0 { - panic("Libp2pOptions can only be called once") - } - b.libp2pOpts = opts - return nil - } -} - -// VerifierConfigOption returns a function that sets the verifier to use in the node consensus -func VerifierConfigOption(verifier ffiwrapper.Verifier) BuilderOpt { - return func(c *Builder) error { - c.verifier = verifier - return nil - } -} - -// PoStGeneratorOption returns a builder option that sets the post generator to -// use during block generation -func PoStGeneratorOption(generator postgenerator.PoStGenerator) BuilderOpt { - return func(b *Builder) error { - b.postGen = generator - return nil - } -} - -// ChainClockConfigOption returns a function that sets the chainClock to use in the node. -func ChainClockConfigOption(clk clock.ChainEpochClock) BuilderOpt { - return func(c *Builder) error { - c.chainClock = clk - return nil - } -} - -// DrandConfigOption returns a function that sets the node's drand interface -func DrandConfigOption(d drand.IFace) BuilderOpt { - return func(c *Builder) error { - c.drand = d - return nil - } -} - -// JournalConfigOption returns a function that sets the journal to use in the node. -func JournalConfigOption(jrl journal.Journal) BuilderOpt { - return func(c *Builder) error { - c.journal = jrl - return nil - } -} - -// MonkeyPatchNetworkParamsOption returns a function that sets global vars in the -// binary's specs actor dependency to change network parameters that live there -func MonkeyPatchNetworkParamsOption(params *config.NetworkParamsConfig) BuilderOpt { - return func(c *Builder) error { - if params.ConsensusMinerMinPower > 0 { - power.ConsensusMinerMinPower = big.NewIntUnsigned(params.ConsensusMinerMinPower) - } - if len(params.ReplaceProofTypes) > 0 { - newSupportedTypes := make(map[abi.RegisteredProof]struct{}) - for _, proofType := range params.ReplaceProofTypes { - newSupportedTypes[abi.RegisteredProof(proofType)] = struct{}{} - } - // Switch reference rather than mutate in place to avoid concurrent map mutation (in tests). - miner.SupportedProofTypes = newSupportedTypes - } - return nil - } -} - -// MonkeyPatchSetProofTypeOption returns a function that sets package variable -// SuppurtedProofTypes to be only the given registered proof type -func MonkeyPatchSetProofTypeOption(proofType abi.RegisteredProof) BuilderOpt { - return func(c *Builder) error { - // Switch reference rather than mutate in place to avoid concurrent map mutation (in tests). - miner.SupportedProofTypes = map[abi.RegisteredProof]struct{}{proofType: {}} - return nil - } -} - -// New creates a new node. -func New(ctx context.Context, opts ...BuilderOpt) (*Node, error) { - // initialize builder and set base values - n := &Builder{ - offlineMode: false, - blockTime: clock.DefaultEpochDuration, - propDelay: clock.DefaultPropagationDelay, - verifier: ffiwrapper.ProofVerifier, - } - - // apply builder options - for _, o := range opts { - if err := o(n); err != nil { - return nil, err - } - } - - // build the node - return n.build(ctx) -} - -func (b *Builder) build(ctx context.Context) (*Node, error) { - // - // Set default values on un-initialized fields - // - - if b.repo == nil { - b.repo = repo.NewInMemoryRepo() - } - - var err error - - if b.journal == nil { - b.journal = journal.NewNoopJournal() - } - - // fetch genesis block id - b.genCid, err = readGenesisCid(b.repo.Datastore()) - if err != nil { - return nil, err - } - - // create the node - nd := &Node{ - OfflineMode: b.offlineMode, - Repo: b.repo, - } - - nd.Blockstore, err = submodule.NewBlockstoreSubmodule(ctx, b.repo) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Blockstore") - } - - nd.network, err = submodule.NewNetworkSubmodule(ctx, (*builder)(b), b.repo, &nd.Blockstore) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Network") - } - - nd.Discovery, err = submodule.NewDiscoverySubmodule(ctx, (*builder)(b), b.repo.Config().Bootstrap, &nd.network) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Discovery") - } - - nd.VersionTable, err = version.ConfigureProtocolVersions(nd.network.NetworkName) - if err != nil { - return nil, err - } - - nd.Blockservice, err = submodule.NewBlockserviceSubmodule(ctx, &nd.Blockstore, &nd.network) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Blockservice") - } - - nd.ProofVerification = submodule.NewProofVerificationSubmodule(b.verifier) - - nd.chain, err = submodule.NewChainSubmodule((*builder)(b), b.repo, &nd.Blockstore, &nd.ProofVerification) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Chain") - } - if b.drand == nil { - genBlk, err := nd.chain.ChainReader.GetGenesisBlock(ctx) - if err != nil { - return nil, errors.Wrap(err, "failed to construct drand grpc") - } - dGRPC, err := DefaultDrandIfaceFromConfig(b.repo.Config(), genBlk.Timestamp) - if err != nil { - return nil, err - } - b.drand = dGRPC - } - - if b.chainClock == nil { - // get the genesis block time from the chainsubmodule - geneBlk, err := nd.chain.ChainReader.GetGenesisBlock(ctx) - if err != nil { - return nil, err - } - b.chainClock = clock.NewChainClock(geneBlk.Timestamp, b.blockTime, b.propDelay) - } - nd.ChainClock = b.chainClock - - nd.syncer, err = submodule.NewSyncerSubmodule(ctx, (*builder)(b), &nd.Blockstore, &nd.network, &nd.Discovery, &nd.chain, nd.ProofVerification.ProofVerifier) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Syncer") - } - - nd.Wallet, err = submodule.NewWalletSubmodule(ctx, b.repo, &nd.chain) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Wallet") - } - - nd.Messaging, err = submodule.NewMessagingSubmodule(ctx, (*builder)(b), b.repo, &nd.network, &nd.chain, &nd.Wallet) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.Messaging") - } - - nd.StorageNetworking, err = submodule.NewStorgeNetworkingSubmodule(ctx, &nd.network) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.StorageNetworking") - } - - nd.BlockMining, err = submodule.NewBlockMiningSubmodule(ctx, b.postGen) - if err != nil { - return nil, errors.Wrap(err, "failed to build node.BlockMining") - } - - waiter := msg.NewWaiter(nd.chain.ChainReader, nd.chain.MessageStore, nd.Blockstore.Blockstore, nd.Blockstore.CborStore) - - nd.PorcelainAPI = porcelain.New(plumbing.New(&plumbing.APIDeps{ - Chain: nd.chain.State, - Sync: cst.NewChainSyncProvider(nd.syncer.ChainSyncManager), - Config: cfg.NewConfig(b.repo), - DAG: dag.NewDAG(merkledag.NewDAGService(nd.Blockservice.Blockservice)), - Expected: nd.syncer.Consensus, - MsgPool: nd.Messaging.MsgPool, - MsgPreviewer: msg.NewPreviewer(nd.chain.ChainReader, nd.Blockstore.CborStore, nd.Blockstore.Blockstore, nd.chain.Processor), - MsgWaiter: waiter, - Network: nd.network.Network, - Outbox: nd.Messaging.Outbox, - PieceManager: nd.PieceManager, - Wallet: nd.Wallet.Wallet, - })) - - nd.StorageProtocol, err = submodule.NewStorageProtocolSubmodule( - ctx, - nd.PorcelainAPI.WalletDefaultAddress, - &nd.chain, - &nd.Messaging, - waiter, - nd.Wallet.Signer, - nd.Host(), - nd.Repo.Datastore(), - nd.Blockstore.Blockstore, - nd.network.GraphExchange, - state.NewViewer(nd.Blockstore.CborStore), - ) - if err != nil { - return nil, err - } - - nd.StorageAPI = storage.NewAPI(nd.StorageProtocol) - nd.DrandAPI = drandapi.New(b.drand, nd.PorcelainAPI) - - return nd, nil -} - -// Repo returns the repo. -func (b Builder) Repo() repo.Repo { - return b.repo -} - -// Builder private method accessors for impl's - -type builder Builder - -func (b builder) GenesisCid() cid.Cid { - return b.genCid -} - -func (b builder) BlockTime() time.Duration { - return b.blockTime -} - -func (b builder) Repo() repo.Repo { - return b.repo -} - -func (b builder) IsRelay() bool { - return b.isRelay -} - -func (b builder) ChainClock() clock.ChainEpochClock { - return b.chainClock -} - -func (b builder) Journal() journal.Journal { - return b.journal -} - -func (b builder) Libp2pOpts() []libp2p.Option { - return b.libp2pOpts -} - -func (b builder) OfflineMode() bool { - return b.offlineMode -} - -func (b builder) Drand() drand.IFace { - return b.drand -} - -func DefaultDrandIfaceFromConfig(cfg *config.Config, fcGenTS uint64) (drand.IFace, error) { - drandConfig := cfg.Drand - addrs := make([]drand.Address, len(drandConfig.Addresses)) - for i, a := range drandConfig.Addresses { - addrs[i] = drand.NewAddress(a, drandConfig.Secure) - } - return drand.NewGRPC(addrs, drandConfig.DistKey, time.Unix(drandConfig.StartTimeUnix, 0), - time.Unix(int64(fcGenTS), 0), time.Duration(drandConfig.RoundSeconds)*time.Second) -} diff --git a/internal/app/go-filecoin/node/config.go b/internal/app/go-filecoin/node/config.go deleted file mode 100644 index cf6f8258e3..0000000000 --- a/internal/app/go-filecoin/node/config.go +++ /dev/null @@ -1,43 +0,0 @@ -package node - -import ( - libp2p "github.com/libp2p/go-libp2p" - ci "github.com/libp2p/go-libp2p-core/crypto" - errors "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -// OptionsFromRepo takes a repo and returns options that configure a node -// to use the given repo. -func OptionsFromRepo(r repo.Repo) ([]BuilderOpt, error) { - sk, err := privKeyFromKeystore(r) - if err != nil { - return nil, err - } - - cfg := r.Config() - cfgopts := []BuilderOpt{ - // Libp2pOptions can only be called once, so add all options here. - Libp2pOptions( - libp2p.ListenAddrStrings(cfg.Swarm.Address), - libp2p.Identity(sk), - ), - } - - dsopt := func(c *Builder) error { - c.repo = r - return nil - } - - return append(cfgopts, dsopt), nil -} - -func privKeyFromKeystore(r repo.Repo) (ci.PrivKey, error) { - sk, err := r.Keystore().Get("self") - if err != nil { - return nil, errors.Wrap(err, "failed to get key from keystore") - } - - return sk, nil -} diff --git a/internal/app/go-filecoin/node/helpers.go b/internal/app/go-filecoin/node/helpers.go deleted file mode 100644 index cf0af16188..0000000000 --- a/internal/app/go-filecoin/node/helpers.go +++ /dev/null @@ -1,30 +0,0 @@ -package node - -import ( - "context" - "encoding/json" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/pkg/errors" -) - -type pubSubHandler func(ctx context.Context, msg pubsub.Message) error - -// readGenesisCid is a helper function that queries the provided datastore for -// an entry with the genesisKey cid, returning if found. -func readGenesisCid(ds datastore.Datastore) (cid.Cid, error) { - bb, err := ds.Get(chain.GenesisKey) - if err != nil { - return cid.Undef, errors.Wrap(err, "failed to read genesisKey") - } - - var c cid.Cid - err = json.Unmarshal(bb, &c) - if err != nil { - return cid.Undef, errors.Wrap(err, "failed to cast genesisCid") - } - return c, nil -} diff --git a/internal/app/go-filecoin/node/init.go b/internal/app/go-filecoin/node/init.go deleted file mode 100644 index 64ef07f05d..0000000000 --- a/internal/app/go-filecoin/node/init.go +++ /dev/null @@ -1,348 +0,0 @@ -package node - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/sector-storage/stores" - "github.com/filecoin-project/specs-actors/actors/abi" - fsm "github.com/filecoin-project/storage-fsm" - "github.com/google/uuid" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" - keystore "github.com/ipfs/go-ipfs-keystore" - acrypto "github.com/libp2p/go-libp2p-core/crypto" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/sectors" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/genesis" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" -) - -const defaultPeerKeyBits = 2048 - -// initCfg contains configuration for initializing a node's repo. -type initCfg struct { - peerKey acrypto.PrivKey - defaultKey *crypto.KeyInfo - initImports []*crypto.KeyInfo -} - -// InitOpt is an option for initialization of a node's repo. -type InitOpt func(*initCfg) - -// PeerKeyOpt sets the private key for a node's 'self' libp2p identity. -// If unspecified, initialization will create a new one. -func PeerKeyOpt(k acrypto.PrivKey) InitOpt { - return func(opts *initCfg) { - opts.peerKey = k - } -} - -// DefaultKeyOpt sets the private key for the wallet's default account. -// If unspecified, initialization will create a new one. -func DefaultKeyOpt(ki *crypto.KeyInfo) InitOpt { - return func(opts *initCfg) { - opts.defaultKey = ki - } -} - -// ImportKeyOpt imports the provided key during initialization. -func ImportKeyOpt(ki *crypto.KeyInfo) InitOpt { - return func(opts *initCfg) { - opts.initImports = append(opts.initImports, ki) - } -} - -// Init initializes a Filecoin repo with genesis state and keys. -// This will always set the configuration for wallet default address (to the specified default -// key or a newly generated one), but otherwise leave the repo's config object intact. -// Make further configuration changes after initialization. -func Init(ctx context.Context, r repo.Repo, gen genesis.InitFunc, opts ...InitOpt) error { - cfg := new(initCfg) - for _, o := range opts { - o(cfg) - } - - bs := bstore.NewBlockstore(r.Datastore()) - cst := cborutil.NewIpldStore(bs) - chainstore, err := chain.Init(ctx, r, bs, cst, gen) - if err != nil { - return errors.Wrap(err, "Could not Init Node") - } - - if err := initPeerKey(r.Keystore(), cfg.peerKey); err != nil { - return err - } - - backend, err := wallet.NewDSBackend(r.WalletDatastore()) - if err != nil { - return errors.Wrap(err, "failed to open wallet datastore") - } - w := wallet.New(backend) - - defaultKey, err := initDefaultKey(w, cfg.defaultKey) - if err != nil { - return err - } - err = importInitKeys(w, cfg.initImports) - if err != nil { - return err - } - - defaultAddress, err := defaultKey.Address() - if err != nil { - return errors.Wrap(err, "failed to extract address from default key") - } - r.Config().Wallet.DefaultAddress = defaultAddress - if err = r.ReplaceConfig(r.Config()); err != nil { - return errors.Wrap(err, "failed to write config") - } - - genesisBlock, err := chainstore.GetGenesisBlock(ctx) - if err != nil { - return err - } - return InitSectors(ctx, r, genesisBlock) -} - -func initPeerKey(store keystore.Keystore, key acrypto.PrivKey) error { - var err error - if key == nil { - key, _, err = acrypto.GenerateKeyPair(acrypto.RSA, defaultPeerKeyBits) - if err != nil { - return errors.Wrap(err, "failed to create peer key") - } - } - if err := store.Put("self", key); err != nil { - return errors.Wrap(err, "failed to store private key") - } - return nil -} - -func initDefaultKey(w *wallet.Wallet, key *crypto.KeyInfo) (*crypto.KeyInfo, error) { - var err error - if key == nil { - key, err = w.NewKeyInfo() - if err != nil { - return nil, errors.Wrap(err, "failed to create default key") - } - } else { - if _, err := w.Import(key); err != nil { - return nil, errors.Wrap(err, "failed to import default key") - } - } - return key, nil -} - -func importInitKeys(w *wallet.Wallet, importKeys []*crypto.KeyInfo) error { - for _, ki := range importKeys { - _, err := w.Import(ki) - if err != nil { - return err - } - } - return nil -} - -func InitSectors(ctx context.Context, rep repo.Repo, genesisBlock *block.Block) error { - cfg := rep.Config() - - rpt, err := rep.Path() - if err != nil { - return err - } - - spt, err := paths.GetSectorPath(cfg.SectorBase.RootDirPath, rpt) - if err != nil { - return err - } - - if err := ensureSectorDirAndMetadata(false, spt); err != nil { - return err - } - - if cfg.SectorBase.PreSealedSectorsDirPath != "" && cfg.Mining.MinerAddress != address.Undef { - if err := ensureSectorDirAndMetadata(true, cfg.SectorBase.PreSealedSectorsDirPath); err != nil { - return err - } - - if err := importPreSealedSectorMetadata(ctx, rep, genesisBlock, cfg.Mining.MinerAddress); err != nil { - return err - } - } - return nil -} - -// Save the provided slice of sector metadata (corresponding to pre-sealed -// sectors) to the keyspace used by the finite-state machine. -func persistGenesisFSMState(rep repo.Repo, info []fsm.SectorInfo) error { - for idx := range info { - key := datastore.NewKey(fsm.SectorStorePrefix).ChildString(fmt.Sprint(info[idx].SectorNumber)) - - b, err := encoding.Encode(&info[idx]) - if err != nil { - return err - } - - if err := rep.Datastore().Put(key, b); err != nil { - return err - } - } - - return nil -} - -func importPreSealedSectorMetadata(ctx context.Context, rep repo.Repo, genesisBlock *block.Block, maddr address.Address) error { - stateFSM, err := createGenesisFSMState(ctx, rep, genesisBlock, maddr) - if err != nil { - return err - } - - err = persistGenesisFSMState(rep, stateFSM) - if err != nil { - return err - } - - max := abi.SectorNumber(0) - for idx := range stateFSM { - if stateFSM[idx].SectorNumber > max { - max = stateFSM[idx].SectorNumber - } - } - - // Increment the sector number counter until it is ready to dispense numbers - // outside of the range of numbers already consumed by the pre-sealed - // sectors. - cnt := sectors.NewPersistedSectorNumberCounter(rep.Datastore()) - for { - num, err := cnt.Next() - if err != nil { - return err - } - if num > max { - break - } - } - - return nil -} - -func ensureSectorDirAndMetadata(containsPreSealedSectors bool, dirPath string) error { - _, err := os.Stat(filepath.Join(dirPath, stores.MetaFile)) - if os.IsNotExist(err) { - // TODO: Set the appropriate permissions. - _ = os.MkdirAll(dirPath, 0777) - - dirMeta := stores.LocalStorageMeta{ - ID: stores.ID(uuid.New().String()), - Weight: 10, - CanSeal: true, - CanStore: true, - } - - if containsPreSealedSectors { - dirMeta.CanSeal = false - dirMeta.CanStore = false - dirMeta.Weight = 0 - } - - b, err := json.MarshalIndent(&dirMeta, "", " ") - if err != nil { - return err - } - - // TODO: Set the appropriate permissions. - if err := ioutil.WriteFile(filepath.Join(dirPath, stores.MetaFile), b, 0777); err != nil { - return err - } - } else if err != nil { - return err - } - - return nil -} - -// Produce a slice of fsm.SectorInfo (used to seed the storage finite-state -// machine with pre-sealed sectors) for a storage miner given a newly-minted -// genesis block. -func createGenesisFSMState(ctx context.Context, rep repo.Repo, genesisBlock *block.Block, maddr address.Address) ([]fsm.SectorInfo, error) { - view := state.NewViewer(cborutil.NewIpldStore(bstore.NewBlockstore(rep.Datastore()))).StateView(genesisBlock.StateRoot.Cid) - - conf, err := view.MinerSectorConfiguration(ctx, maddr) - if err != nil { - return nil, err - } - - // Loop through all the sectors in the genesis miner's proving set and - // persist to the shared (with storage-fsm) data store the relevant bits of - // sector and deal metadata. - var out []fsm.SectorInfo - - err = view.MinerSectorsForEach(ctx, maddr, func(sectorNumber abi.SectorNumber, sealedCID cid.Cid, proofType abi.RegisteredProof, dealIDs []abi.DealID) error { - pieces := make([]fsm.Piece, len(dealIDs)) - for idx := range dealIDs { - deal, err := view.MarketDealProposal(ctx, dealIDs[idx]) - if err != nil { - return err - } - - pieces[idx] = fsm.Piece{ - Piece: abi.PieceInfo{ - Size: deal.PieceSize, - PieceCID: deal.PieceCID, - }, - DealInfo: &fsm.DealInfo{ - DealID: dealIDs[idx], - DealSchedule: fsm.DealSchedule{ - StartEpoch: deal.StartEpoch, - EndEpoch: deal.EndEpoch, - }, - }, - } - } - - unsealedCID, err := view.MarketComputeDataCommitment(ctx, conf.SealProofType, dealIDs) - if err != nil { - return err - } - - out = append(out, fsm.SectorInfo{ - State: fsm.Proving, - SectorNumber: sectorNumber, - SectorType: proofType, - Pieces: pieces, - TicketValue: abi.SealRandomness{}, - TicketEpoch: 0, - PreCommit1Out: nil, - CommD: &unsealedCID, - CommR: &sealedCID, - Proof: nil, - PreCommitMessage: nil, - SeedValue: abi.InteractiveSealRandomness{}, - SeedEpoch: 0, - CommitMessage: nil, - }) - - return nil - }) - if err != nil { - return nil, err - } - - return out, nil -} diff --git a/internal/app/go-filecoin/node/message.go b/internal/app/go-filecoin/node/message.go deleted file mode 100644 index 7771ff016c..0000000000 --- a/internal/app/go-filecoin/node/message.go +++ /dev/null @@ -1,33 +0,0 @@ -package node - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func (node *Node) processMessage(ctx context.Context, pubSubMsg pubsub.Message) (err error) { - sender := pubSubMsg.GetSender() - - // ignore messages from self - if sender == node.Host().ID() { - return nil - } - - unmarshaled := &types.SignedMessage{} - if err := unmarshaled.Unmarshal(pubSubMsg.GetData()); err != nil { - return err - } - // TODO #3566 This is redundant with pubsub repeater validation. - // We should do this in one call, maybe by waiting on pool add in repeater? - err = node.Messaging.MsgSigVal.Validate(ctx, unmarshaled) - if err != nil { - return err - } - - log.Debugf("Received new message %s from peer %s", unmarshaled, pubSubMsg.GetSender()) - - _, err = node.Messaging.Inbox.Add(ctx, unmarshaled) - return err -} diff --git a/internal/app/go-filecoin/node/message_propagate_test.go b/internal/app/go-filecoin/node/message_propagate_test.go deleted file mode 100644 index 46f7f514a5..0000000000 --- a/internal/app/go-filecoin/node/message_propagate_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package node_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/specs-actors/actors/abi" - - . "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/proofs" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/version" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" - specsbig "github.com/filecoin-project/specs-actors/actors/abi/big" -) - -// TestMessagePropagation is a high level check that messages are propagated between message -// pools of connected nodes. -func TestMessagePropagation(t *testing.T) { - tf.UnitTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Generate a key and install an account actor at genesis which will be able to send messages. - genCfg := &gengen.GenesisCfg{} - require.NoError(t, gengen.GenKeys(1, "1000000")(genCfg)) - require.NoError(t, gengen.NetworkName(version.TEST)(genCfg)) - - cs := MakeChainSeed(t, genCfg) - - // Initialize the first node to be the message sender. - builder1 := test.NewNodeBuilder(t) - builder1.WithGenesisInit(cs.GenesisInitFunc) - builder1.WithBuilderOpt(VerifierConfigOption(&proofs.FakeVerifier{})) - builder1.WithBuilderOpt(MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)) - - sender := builder1.Build(ctx) - senderAddress := cs.GiveKey(t, sender, 0) - - // Initialize other nodes to receive the message. - builder2 := test.NewNodeBuilder(t) - builder2.WithGenesisInit(cs.GenesisInitFunc) - builder2.WithBuilderOpt(VerifierConfigOption(&proofs.FakeVerifier{})) - builder2.WithBuilderOpt(MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)) - receiverCount := 2 - receivers := builder2.BuildMany(ctx, receiverCount) - - nodes := append([]*Node{sender}, receivers...) - StartNodes(t, nodes) - defer StopNodes(nodes) - - // Connect nodes in series - ConnectNodes(t, nodes[0], nodes[1]) - ConnectNodes(t, nodes[1], nodes[2]) - // Wait for network connection notifications to propagate - time.Sleep(time.Millisecond * 200) - - require.Equal(t, 0, len(nodes[1].Messaging.Inbox.Pool().Pending())) - require.Equal(t, 0, len(nodes[2].Messaging.Inbox.Pool().Pending())) - require.Equal(t, 0, len(nodes[0].Messaging.Inbox.Pool().Pending())) - - fooMethod := abi.MethodNum(7232) - - t.Run("message propagates", func(t *testing.T) { - _, _, err := sender.PorcelainAPI.MessageSend( - ctx, - senderAddress, - builtin.InitActorAddr, - specsbig.NewInt(100), - types.NewGasPrice(1), - gas.Unit(5000), - fooMethod, - adt.Empty, - ) - require.NoError(t, err) - - require.NoError(t, th.WaitForIt(50, 100*time.Millisecond, func() (bool, error) { - return len(nodes[0].Messaging.Inbox.Pool().Pending()) == 1 && - len(nodes[1].Messaging.Inbox.Pool().Pending()) == 1 && - len(nodes[2].Messaging.Inbox.Pool().Pending()) == 1, nil - }), "failed to propagate messages") - - assert.True(t, nodes[0].Messaging.Inbox.Pool().Pending()[0].Message.Method == fooMethod) - assert.True(t, nodes[1].Messaging.Inbox.Pool().Pending()[0].Message.Method == fooMethod) - assert.True(t, nodes[2].Messaging.Inbox.Pool().Pending()[0].Message.Method == fooMethod) - }) -} diff --git a/internal/app/go-filecoin/node/node.go b/internal/app/go-filecoin/node/node.go deleted file mode 100644 index 238aaa81f4..0000000000 --- a/internal/app/go-filecoin/node/node.go +++ /dev/null @@ -1,706 +0,0 @@ -package node - -import ( - "context" - "fmt" - "reflect" - "runtime" - - "github.com/filecoin-project/go-address" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - bserv "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/internal/submodule" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - "github.com/filecoin-project/go-filecoin/internal/pkg/mining" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/drand" - mining_protocol "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/mining" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/storage" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/version" -) - -var log = logging.Logger("node") // nolint: deadcode - -var ( - // ErrNoMinerAddress is returned when the node is not configured to have any miner addresses. - ErrNoMinerAddress = errors.New("no miner addresses configured") -) - -// Node represents a full Filecoin node. -type Node struct { - // OfflineMode, when true, disables libp2p. - OfflineMode bool - - // ChainClock is a chainClock used by the node for chain epoch. - ChainClock clock.ChainEpochClock - - // Repo is the repo this node was created with. - // - // It contains all persistent artifacts of the filecoin node. - Repo repo.Repo - - PorcelainAPI *porcelain.API - DrandAPI *drand.API - StorageAPI *storage.API - - // - // Core services - // - - Blockstore submodule.BlockstoreSubmodule - network submodule.NetworkSubmodule - Blockservice submodule.BlockServiceSubmodule - Discovery submodule.DiscoverySubmodule - - // - // Subsystems - // - - chain submodule.ChainSubmodule - syncer submodule.SyncerSubmodule - BlockMining submodule.BlockMiningSubmodule - StorageMining *submodule.StorageMiningSubmodule - - // - // Supporting services - // - - Wallet submodule.WalletSubmodule - Messaging submodule.MessagingSubmodule - StorageNetworking submodule.StorageNetworkingSubmodule - ProofVerification submodule.ProofVerificationSubmodule - - // - // Protocols - // - - VersionTable *version.ProtocolVersionTable - StorageProtocol *submodule.StorageProtocolSubmodule - RetrievalProtocol *submodule.RetrievalProtocolSubmodule -} - -// Start boots up the node. -func (node *Node) Start(ctx context.Context) error { - if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Observability.Metrics); err != nil { - return errors.Wrap(err, "failed to setup metrics") - } - - if err := metrics.RegisterJaeger(node.network.Host.ID().Pretty(), node.Repo.Config().Observability.Tracing); err != nil { - return errors.Wrap(err, "failed to setup tracing") - } - - err := node.chain.Start(ctx, node) - if err != nil { - return err - } - - // Only set these up if there is a miner configured. - if _, err := node.MiningAddress(); err == nil { - if err := node.setupStorageMining(ctx); err != nil { - log.Errorf("setup mining failed: %v", err) - return err - } - } - - // TODO: defer establishing these API endpoints until the chain is synced when the commands - // can handle their absence: https://github.com/filecoin-project/go-filecoin/issues/3137 - err = node.setupProtocols() - if err != nil { - return errors.Wrap(err, "failed to set up protocols:") - } - - var syncCtx context.Context - syncCtx, node.syncer.CancelChainSync = context.WithCancel(context.Background()) - - // Wire up propagation of new chain heads from the chain store to other components. - head, err := node.PorcelainAPI.ChainHead() - if err != nil { - return errors.Wrap(err, "failed to get chain head") - } - go node.handleNewChainHeads(syncCtx, head) - - if !node.OfflineMode { - - // Subscribe to block pubsub topic to learn about new chain heads. - node.syncer.BlockSub, err = node.pubsubscribe(syncCtx, node.syncer.BlockTopic, node.handleBlockSub) - if err != nil { - log.Error(err) - } - - // Subscribe to the message pubsub topic to learn about messages to mine into blocks. - // TODO: defer this subscription until after mining (block production) is started: - // https://github.com/filecoin-project/go-filecoin/issues/2145. - // This is blocked by https://github.com/filecoin-project/go-filecoin/issues/2959, which - // is necessary for message_propagate_test to start mining before testing this behaviour. - node.Messaging.MessageSub, err = node.pubsubscribe(syncCtx, node.Messaging.MessageTopic, node.processMessage) - if err != nil { - return err - } - - // Start node discovery - if err := node.Discovery.Start(node); err != nil { - return err - } - - if err := node.syncer.Start(syncCtx, node); err != nil { - return err - } - - // Wire up syncing and possible mining - go node.doMiningPause(syncCtx) - } - - return nil -} - -// Subscribes a handler function to a pubsub topic. -func (node *Node) pubsubscribe(ctx context.Context, topic *pubsub.Topic, handler pubSubHandler) (pubsub.Subscription, error) { - sub, err := topic.Subscribe() - if err != nil { - return nil, errors.Wrapf(err, "failed to subscribe") - } - go node.handleSubscription(ctx, sub, handler) - return sub, nil -} - -func (node *Node) setIsMining(isMining bool) { - node.BlockMining.Mining.Lock() - defer node.BlockMining.Mining.Unlock() - node.BlockMining.Mining.IsMining = isMining -} - -func (node *Node) handleNewMiningOutput(ctx context.Context, miningOutCh <-chan mining.FullBlock) { - defer func() { - node.BlockMining.MiningDoneWg.Done() - }() - for { - select { - case <-ctx.Done(): - return - case output, ok := <-miningOutCh: - if !ok { - log.Errorf("scheduler stopped. stopping mining.") - node.StopMining(context.Background()) - return - } - - node.BlockMining.MiningDoneWg.Add(1) - go func() { - if node.IsMining() { - node.BlockMining.AddNewlyMinedBlock(ctx, output) - } - node.BlockMining.MiningDoneWg.Done() - }() - } - } - -} - -func (node *Node) handleNewChainHeads(ctx context.Context, firstHead block.TipSet) { - newHeadCh := node.chain.ChainReader.HeadEvents().Sub(chain.NewHeadTopic) - defer log.Infof("new head handler exited") - defer node.chain.ChainReader.HeadEvents().Unsub(newHeadCh) - - handler := message.NewHeadHandler(node.Messaging.Inbox, node.Messaging.Outbox, node.chain.ChainReader, firstHead) - - for { - log.Debugf("waiting for new head") - select { - case ts, ok := <-newHeadCh: - if !ok { - log.Errorf("failed new head channel receive") - return - } - newHead, ok := ts.(block.TipSet) - if !ok { - log.Errorf("non-tipset published on heaviest tipset channel") - continue - } - height, _ := newHead.Height() - log.Debugf("received new head height %s, key %s", height, newHead.Key()) - - if node.StorageMining != nil { - log.Debugf("storage mining handling new head") - if err := node.StorageMining.HandleNewHead(ctx, newHead); err != nil { - log.Error(err) - } - } - - log.Debugf("message pool handling new head") - if err := handler.HandleNewHead(ctx, newHead); err != nil { - log.Error(err) - } - case <-ctx.Done(): - return - } - } -} - -func (node *Node) cancelSubscriptions() { - if node.syncer.CancelChainSync != nil { - node.syncer.CancelChainSync() - } - - if node.syncer.BlockSub != nil { - node.syncer.BlockSub.Cancel() - node.syncer.BlockSub = nil - } - - if node.Messaging.MessageSub != nil { - node.Messaging.MessageSub.Cancel() - node.Messaging.MessageSub = nil - } -} - -// Stop initiates the shutdown of the node. -func (node *Node) Stop(ctx context.Context) { - node.StopMining(ctx) - - node.cancelSubscriptions() - node.chain.ChainReader.Stop() - - if node.StorageMining != nil { - if err := node.StorageMining.Stop(ctx); err != nil { - fmt.Printf("error stopping storage miner: %s\n", err) - } - node.StorageMining = nil - } - - if err := node.Host().Close(); err != nil { - fmt.Printf("error closing host: %s\n", err) - } - - if err := node.Repo.Close(); err != nil { - fmt.Printf("error closing repo: %s\n", err) - } - - node.Discovery.Stop() - - fmt.Println("stopping filecoin :(") -} - -func (node *Node) addNewlyMinedBlock(ctx context.Context, o mining.FullBlock) { - log.Debugf("Got a newly mined block from the mining worker: %s", o.Header) - if err := node.AddNewBlock(ctx, o); err != nil { - log.Warnf("error adding new mined block: %s. err: %s", o.Header.Cid().String(), err.Error()) - } -} - -func (node *Node) addMinedBlockSynchronous(ctx context.Context, o mining.FullBlock) error { - wait := node.syncer.ChainSyncManager.BlockProposer().WaiterForTarget(block.NewTipSetKey(o.Header.Cid())) - err := node.AddNewBlock(ctx, o) - if err != nil { - return err - } - err = wait() - return err -} - -// MiningAddress returns the address of the mining actor mining on behalf of -// the node. -func (node *Node) MiningAddress() (address.Address, error) { - addr := node.Repo.Config().Mining.MinerAddress - if addr.Empty() { - return address.Undef, ErrNoMinerAddress - } - - return addr, nil -} - -// SetupMining initializes all the functionality the node needs to start mining. -// This method is idempotent. -func (node *Node) SetupMining(ctx context.Context) error { - // ensure we have a miner actor before we even consider mining - minerAddr, err := node.MiningAddress() - if err != nil { - return errors.Wrap(err, "failed to get mining address") - } - head := node.PorcelainAPI.ChainHeadKey() - view, err := node.PorcelainAPI.MinerStateView(head) - if err != nil { - return errors.Wrap(err, "failed to load state view") - } - _, _, err = view.MinerControlAddresses(ctx, minerAddr) - if err != nil { - return errors.Wrap(err, "failed to get miner actor") - } - - // ensure we've got our storage mining submodule configured - if node.StorageMining == nil { - if err := node.setupStorageMining(ctx); err != nil { - return err - } - } - - if node.RetrievalProtocol == nil { - if err := node.setupRetrievalMining(ctx); err != nil { - return err - } - } - // ensure we have a mining worker - if node.BlockMining.MiningWorker == nil { - if node.BlockMining.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil { - return err - } - } - - if err := node.StorageMining.Start(ctx); err != nil { - fmt.Printf("error starting storage miner: %s\n", err) - } - - if err := node.StorageProtocol.StorageProvider.Start(ctx); err != nil { - fmt.Printf("error starting storage provider: %s\n", err) - } - - return nil -} - -func (node *Node) setupStorageMining(ctx context.Context) error { - if node.StorageMining != nil { - return errors.New("storage mining submodule has already been initialized") - } - - minerAddr, err := node.MiningAddress() - if err != nil { - return err - } - - head := node.Chain().ChainReader.GetHead() - status, err := node.PorcelainAPI.MinerGetStatus(ctx, minerAddr, head) - if err != nil { - return err - } - - repoPath, err := node.Repo.Path() - if err != nil { - return err - } - - sealProofType := status.SealProofType - - cborStore := node.Blockstore.CborStore - - waiter := msg.NewWaiter(node.chain.ChainReader, node.chain.MessageStore, node.Blockstore.Blockstore, cborStore) - - // TODO: rework these modules so they can be at least partially constructed during the building phase #3738 - stateViewer := state.NewViewer(cborStore) - - node.StorageMining, err = submodule.NewStorageMiningSubmodule(minerAddr, node.Repo.Datastore(), &node.chain, &node.Messaging, waiter, stateViewer, sealProofType, node.Repo, node.BlockMining.PoStGenerator) - if err != nil { - return err - } - - return node.StorageProtocol.AddStorageProvider( - ctx, - minerAddr, - &node.chain, - &node.Messaging, - waiter, - node.StorageMining.PieceManager, - node.Wallet.Signer, - node.Host(), - node.Repo.Datastore(), - node.Blockstore.Blockstore, - node.network.GraphExchange, - repoPath, - sealProofType, - stateViewer, - ) -} - -func (node *Node) setupRetrievalMining(ctx context.Context) error { - providerAddr, err := node.MiningAddress() - if err != nil { - return errors.Wrap(err, "failed to get mining address") - } - - waiter := msg.NewWaiter(node.chain.ChainReader, node.chain.MessageStore, node.Blockstore.Blockstore, node.Blockstore.CborStore) - - mgrStateViewer := paymentchannel.NewManagerStateViewer(node.Chain().ChainReader, node.Blockstore.CborStore) - paychMgr := paymentchannel.NewManager( - ctx, - node.Repo.Datastore(), - waiter, - node.Messaging.Outbox, - mgrStateViewer) - - rp, err := submodule.NewRetrievalProtocolSubmodule( - node.Blockstore.Blockstore, - node.Repo.Datastore(), - node.chain.State, - node.Host(), - providerAddr, - node.Wallet.Signer, - paychMgr, - node.PieceManager(), - ) - if err != nil { - return errors.Wrap(err, "failed to build node.RetrievalProtocol") - } - node.RetrievalProtocol = rp - return nil -} - -func (node *Node) doMiningPause(ctx context.Context) { - // doMiningPause receives state transition signals from the syncer - // dispatcher allowing syncing to make progress. - // - // When mining, the node passes these signals along to the scheduler - // pausing and continuing mining based on syncer state. - catchupCh := node.Syncer().ChainSyncManager.TransitionChannel() - for { - select { - case <-ctx.Done(): - return - case toCatchup, ok := <-catchupCh: - if !ok { - return - } - if node.BlockMining.MiningScheduler == nil { - // drop syncer transition signals if not mining - continue - } - if toCatchup { - node.BlockMining.MiningScheduler.Pause() - } else { - node.BlockMining.MiningScheduler.Continue() - } - } - } -} - -// StartMining causes the node to start feeding blocks to the mining worker and initializes -// the StorageMining for the mining address. -func (node *Node) StartMining(ctx context.Context) error { - if node.IsMining() { - return errors.New("Node is already mining") - } - - err := node.SetupMining(ctx) - if err != nil { - return errors.Wrap(err, "failed to setup mining") - } - - if node.BlockMining.MiningScheduler == nil { - node.BlockMining.MiningScheduler = mining.NewScheduler(node.BlockMining.MiningWorker, node.PorcelainAPI.ChainHead, node.ChainClock) - } else if node.BlockMining.MiningScheduler.IsStarted() { - return fmt.Errorf("miner scheduler already started") - } - - // The block mining scheduler Start() accepts a long-running context, and stopping is performed by cancellation of - // that context. - // The storage mining module and provider take the immediate context, hopefully don't run any goroutines that - // shut down when that context is done (which is ~immediately), and provide explicit Stop() methods instead. - // We should pick one consistent way of doing things. - var miningCtx context.Context - miningCtx, node.BlockMining.CancelMining = context.WithCancel(context.Background()) - - outCh, doneWg := node.BlockMining.MiningScheduler.Start(miningCtx) - - node.BlockMining.MiningDoneWg = doneWg - node.BlockMining.AddNewlyMinedBlock = node.addNewlyMinedBlock - node.BlockMining.MiningDoneWg.Add(1) - go node.handleNewMiningOutput(miningCtx, outCh) - - node.setIsMining(true) - - return nil -} - -// StopMining stops mining on new blocks. -func (node *Node) StopMining(ctx context.Context) { - node.setIsMining(false) - - if node.BlockMining.CancelMining != nil { - node.BlockMining.CancelMining() - } - - if node.BlockMining.MiningDoneWg != nil { - node.BlockMining.MiningDoneWg.Wait() - } - - if node.StorageMining != nil { - err := node.StorageMining.Stop(ctx) - if err != nil { - log.Warn("Error stopping storage miner", err) - } - } -} - -func (node *Node) handleSubscription(ctx context.Context, sub pubsub.Subscription, handler pubSubHandler) { - for { - received, err := sub.Next(ctx) - if err != nil { - if ctx.Err() != context.Canceled { - log.Errorf("error reading message from topic %s: %s", sub.Topic(), err) - } - return - } - - if err := handler(ctx, received); err != nil { - handlerName := runtime.FuncForPC(reflect.ValueOf(handler).Pointer()).Name() - if err != context.Canceled { - log.Errorf("error in handler %s for topic %s: %s", handlerName, sub.Topic(), err) - } - } - } -} - -// setupProtocols creates protocol clients and miners, then sets the node's APIs -// for each -func (node *Node) setupProtocols() error { - blockMiningAPI := mining_protocol.New( - node.MiningAddress, - node.addMinedBlockSynchronous, - node.chain.ChainReader, - node.IsMining, - node.SetupMining, - node.StartMining, - node.StopMining, - node.GetMiningWorker, - node.ChainClock, - ) - - node.BlockMining.BlockMiningAPI = &blockMiningAPI - return nil -} - -// GetMiningWorker ensures mining is setup and then returns the worker -func (node *Node) GetMiningWorker(ctx context.Context) (*mining.DefaultWorker, error) { - if err := node.SetupMining(ctx); err != nil { - return nil, err - } - return node.BlockMining.MiningWorker, nil -} - -// CreateMiningWorker creates a mining.Worker for the node using the configured -// getStateTree, getWeight, and getAncestors functions for the node -func (node *Node) CreateMiningWorker(ctx context.Context) (*mining.DefaultWorker, error) { - minerAddr, err := node.MiningAddress() - if err != nil { - return nil, errors.Wrap(err, "failed to get mining address") - } - - head := node.PorcelainAPI.ChainHeadKey() - view, err := node.PorcelainAPI.MinerStateView(head) - if err != nil { - return nil, errors.Wrapf(err, "failed to load miner state") - } - owner, _, err := view.MinerControlAddresses(ctx, minerAddr) - if err != nil { - return nil, errors.Wrapf(err, "failed to read miner control addresses") - } - - poster := node.BlockMining.PoStGenerator - if poster == nil { - poster = node.StorageMining.PoStGenerator - } - genBlk, err := node.Chain().ChainReader.GetGenesisBlock(ctx) - if err != nil { - return nil, err - } - sampler := chain.NewSampler(node.Chain().ChainReader, genBlk.Ticket) - - return mining.NewDefaultWorker(mining.WorkerParameters{ - API: node.PorcelainAPI, - - MinerAddr: minerAddr, - MinerOwnerAddr: owner, - WorkerSigner: node.Wallet.Signer, - - GetStateTree: node.chain.ChainReader.GetTipSetState, - GetWeight: node.getWeight, - Election: consensus.NewElectionMachine(node.PorcelainAPI), - TicketGen: consensus.NewTicketMachine(sampler), - TipSetMetadata: node.chain.ChainReader, - - MessageSource: node.Messaging.Inbox.Pool(), - MessageStore: node.chain.MessageStore, - MessageQualifier: consensus.NewMessagePenaltyChecker(node.Chain().State), - Blockstore: node.Blockstore.Blockstore, - Clock: node.ChainClock, - Poster: poster, - ChainState: node.chain.ChainReader, - Drand: node.Syncer().Drand, - }), nil -} - -// getWeight is the default GetWeight function for the mining worker. -func (node *Node) getWeight(ctx context.Context, ts block.TipSet) (fbig.Int, error) { - parent, err := ts.Parents() - if err != nil { - return fbig.Zero(), err - } - var baseStRoot cid.Cid - if parent.Empty() { - // use genesis state as parent state of genesis block - baseStRoot, err = node.chain.ChainReader.GetTipSetStateRoot(ts.Key()) - } else { - baseStRoot, err = node.chain.ChainReader.GetTipSetStateRoot(parent) - } - if err != nil { - return fbig.Zero(), err - } - return node.syncer.ChainSelector.Weight(ctx, ts, baseStRoot) -} - -// -- Accessors - -// Host returns the nodes host. -func (node *Node) Host() host.Host { - return node.network.Host -} - -// PieceManager returns the node's PieceManager. -func (node *Node) PieceManager() piecemanager.PieceManager { - return node.StorageMining.PieceManager -} - -// BlockService returns the nodes blockservice. -func (node *Node) BlockService() bserv.BlockService { - return node.Blockservice.Blockservice -} - -// CborStore returns the nodes cborStore. -func (node *Node) CborStore() *cborutil.IpldStore { - return node.Blockstore.CborStore -} - -// IsMining returns a boolean indicating whether the node is mining blocks. -func (node *Node) IsMining() bool { - node.BlockMining.Mining.Lock() - defer node.BlockMining.Mining.Unlock() - return node.BlockMining.Mining.IsMining -} - -// Chain returns the chain submodule. -func (node *Node) Chain() submodule.ChainSubmodule { - return node.chain -} - -// Syncer returns the syncer submodule. -func (node *Node) Syncer() submodule.SyncerSubmodule { - return node.syncer -} - -// Network returns the network submodule. -func (node *Node) Network() submodule.NetworkSubmodule { - return node.network -} diff --git a/internal/app/go-filecoin/node/node_test.go b/internal/app/go-filecoin/node/node_test.go deleted file mode 100644 index 12f388c34e..0000000000 --- a/internal/app/go-filecoin/node/node_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package node_test - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/proofs" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -func TestNodeConstruct(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithGenesisInit(gengen.DefaultGenesis) - builder.WithBuilderOpt(node.FakeProofVerifierBuilderOpts()...) - nd := builder.Build(ctx) - assert.NotNil(t, nd.Host) - - nd.Stop(context.Background()) -} - -func TestNodeNetworking(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithGenesisInit(gengen.DefaultGenesis) - builder.WithBuilderOpt(node.FakeProofVerifierBuilderOpts()...) - nds := builder.BuildMany(ctx, 2) - nd1, nd2 := nds[0], nds[1] - - pinfo := peer.AddrInfo{ - ID: nd2.Host().ID(), - Addrs: nd2.Host().Addrs(), - } - - err := nd1.Host().Connect(ctx, pinfo) - assert.NoError(t, err) - - nd1.Stop(ctx) - nd2.Stop(ctx) -} - -func TestConnectsToBootstrapNodes(t *testing.T) { - tf.UnitTest(t) - - t.Run("no bootstrap nodes no problem", func(t *testing.T) { - ctx := context.Background() - - r := repo.NewInMemoryRepo() - r.Config().Swarm.Address = "/ip4/0.0.0.0/tcp/0" - - require.NoError(t, node.Init(ctx, r, gengen.DefaultGenesis)) - r.Config().Bootstrap.Addresses = []string{} - opts, err := node.OptionsFromRepo(r) - require.NoError(t, err) - - nd, err := node.New(ctx, opts...) - require.NoError(t, err) - assert.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - }) - - t.Run("connects to bootstrap nodes", func(t *testing.T) { - ctx := context.Background() - - // These are two bootstrap nodes we'll connect to. - builder := test.NewNodeBuilder(t) - builder.WithGenesisInit(gengen.DefaultGenesis) - builder.WithBuilderOpt(node.FakeProofVerifierBuilderOpts()...) - nds := builder.BuildMany(ctx, 2) - node.StartNodes(t, nds) - nd1, nd2 := nds[0], nds[1] - - // Gotta be a better way to do this? - peer1 := fmt.Sprintf("%s/ipfs/%s", nd1.Host().Addrs()[0].String(), nd1.Host().ID().Pretty()) - peer2 := fmt.Sprintf("%s/ipfs/%s", nd2.Host().Addrs()[0].String(), nd2.Host().ID().Pretty()) - - // Create a node with the nodes above as bootstrap nodes. - r := repo.NewInMemoryRepo() - r.Config().Swarm.Address = "/ip4/0.0.0.0/tcp/0" - - require.NoError(t, node.Init(ctx, r, gengen.DefaultGenesis)) - r.Config().Bootstrap.Addresses = []string{peer1, peer2} - - opts, err := node.OptionsFromRepo(r) - require.NoError(t, err) - nd, err := node.New(ctx, opts...) - require.NoError(t, err) - nd.Discovery.Bootstrapper.MinPeerThreshold = 2 - nd.Discovery.Bootstrapper.Period = 10 * time.Millisecond - assert.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - // Ensure they're connected. - connected := false - // poll until we are connected, to avoid flaky tests - for i := 0; i <= 30; i++ { - l1 := len(nd.Host().Network().ConnsToPeer(nd1.Host().ID())) - l2 := len(nd.Host().Network().ConnsToPeer(nd2.Host().ID())) - - connected = l1 == 1 && l2 == 1 - if connected { - break - } - time.Sleep(10 * time.Millisecond) - } - - assert.True(t, connected, "failed to connect") - }) -} - -func TestNodeInit(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithGenesisInit(gengen.DefaultGenesis) - builder.WithBuilderOpt(node.FakeProofVerifierBuilderOpts()...) - builder.WithBuilderOpt(node.OfflineMode(true)) - - nd := builder.Build(ctx) - - assert.NoError(t, nd.Start(ctx)) - - assert.NotEqual(t, 0, nd.PorcelainAPI.ChainHeadKey().Len()) - nd.Stop(ctx) -} - -func TestNodeStartMining(t *testing.T) { - t.Skip("Skip pending storage market integration #3731") - tf.UnitTest(t) - - ctx := context.Background() - - seed := node.MakeChainSeed(t, node.MakeTestGenCfg(t, 100)) - builder := test.NewNodeBuilder(t) - builder.WithInitOpt(node.PeerKeyOpt(node.PeerKeys[0])) - builder.WithGenesisInit(seed.GenesisInitFunc) - minerNode := builder.Build(ctx) - - seed.GiveKey(t, minerNode, 0) - seed.GiveMiner(t, minerNode, 0) // TODO: update to accommodate new go-fil-markets integration - // Start mining give error for fail to get miner actor from the heaviest tipset stateroot - assert.Contains(t, minerNode.StartMining(ctx).Error(), "failed to setup mining") - - assert.NoError(t, minerNode.Start(ctx)) - - t.Run("Start/Stop/Start results in a MiningScheduler that is started", func(t *testing.T) { - assert.NoError(t, minerNode.StartMining(ctx)) - defer minerNode.StopMining(ctx) - assert.True(t, minerNode.BlockMining.MiningScheduler.IsStarted()) - minerNode.StopMining(ctx) - assert.False(t, minerNode.BlockMining.MiningScheduler.IsStarted()) - assert.NoError(t, minerNode.StartMining(ctx)) - assert.True(t, minerNode.BlockMining.MiningScheduler.IsStarted()) - }) - - t.Run("Start + Start gives an error message saying mining is already started", func(t *testing.T) { - assert.NoError(t, minerNode.StartMining(ctx)) - defer minerNode.StopMining(ctx) - err := minerNode.StartMining(ctx) - assert.Error(t, err, "node is already mining") - }) -} - -func TestOptionWithError(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - r := repo.NewInMemoryRepo() - assert.NoError(t, node.Init(ctx, r, gengen.DefaultGenesis)) - - opts, err := node.OptionsFromRepo(r) - assert.NoError(t, err) - - scaryErr := errors.New("i am an error grrrr") - errOpt := func(c *node.Builder) error { - return scaryErr - } - - opts = append(opts, errOpt) - - _, err = node.New(ctx, opts...) - assert.Error(t, err, scaryErr) - -} - -func TestNodeConfig(t *testing.T) { - tf.UnitTest(t) - - defaultCfg := config.NewDefaultConfig() - - // fake mining - verifier := &proofs.FakeVerifier{} - - configBlockTime := 99 - configPropagationDelay := 20 - - builderOptions := []node.BuilderOpt{ - node.VerifierConfigOption(verifier), - node.BlockTime(time.Duration(configBlockTime)), - node.PropagationDelay(time.Duration(configPropagationDelay)), - } - - initOpts := []node.InitOpt{} - - builder := test.NewNodeBuilder(t) - builder.WithGenesisInit(gengen.DefaultGenesis) - builder.WithInitOpt(initOpts...) - builder.WithBuilderOpt(builderOptions...) - builder.WithBuilderOpt(node.OfflineMode(true)) - - n := builder.Build(context.Background()) - cfg := n.Repo.Config() - - assert.Equal(t, true, n.OfflineMode) - assert.Equal(t, defaultCfg.Mining, cfg.Mining) - assert.Equal(t, &config.SwarmConfig{ - Address: "/ip4/127.0.0.1/tcp/0", - }, cfg.Swarm) -} diff --git a/internal/app/go-filecoin/node/test/api.go b/internal/app/go-filecoin/node/test/api.go deleted file mode 100644 index ed421bdfce..0000000000 --- a/internal/app/go-filecoin/node/test/api.go +++ /dev/null @@ -1,145 +0,0 @@ -package test - -import ( - "context" - "encoding/json" - "fmt" - "os" - "strings" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// NodeAPI wraps an in-process Node to provide a command API server and client for testing. -type NodeAPI struct { - node *node.Node - tb testing.TB -} - -// NewNodeAPI creates a wrangler for a node. -func NewNodeAPI(node *node.Node, tb testing.TB) *NodeAPI { - return &NodeAPI{node, tb} -} - -// RunNodeAPI creates a new API server and `Run()`s it. -func RunNodeAPI(ctx context.Context, node *node.Node, tb testing.TB) (client *Client, stop func()) { - api := NewNodeAPI(node, tb) - return api.Run(ctx) -} - -// Node returns the node backing the API. -func (a *NodeAPI) Node() *node.Node { - return a.node -} - -// Run start s a command API server for the node. -// Returns a client proxy and a function to terminate the NodeAPI server. -func (a *NodeAPI) Run(ctx context.Context) (client *Client, stop func()) { - ready := make(chan interface{}) - terminate := make(chan os.Signal, 1) - - go func() { - err := commands.RunAPIAndWait(ctx, a.node, a.node.Repo.Config().API, ready, terminate) - require.NoError(a.tb, err) - }() - <-ready - - addr, err := a.node.Repo.APIAddr() - require.NoError(a.tb, err) - require.NotEmpty(a.tb, addr, "empty API address") - - return &Client{addr, a.tb}, func() { - close(terminate) - } -} - -// Client is an in-process client to a command API. -type Client struct { - address string - tb testing.TB -} - -// Address returns the address string to which the client sends command RPCs. -func (c *Client) Address() string { - return c.address -} - -// Run runs a CLI command and returns its output. -func (c *Client) Run(ctx context.Context, command ...string) *th.CmdOutput { - c.tb.Helper() - args := []string{ - "go-filecoin", // A dummy first arg is required, simulating shell invocation. - fmt.Sprintf("--cmdapiaddr=%s", c.address), - } - args = append(args, command...) - - // Create pipes for the client to write stdout and stderr. - readStdOut, writeStdOut, err := os.Pipe() - require.NoError(c.tb, err) - readStdErr, writeStdErr, err := os.Pipe() - require.NoError(c.tb, err) - var readStdin *os.File // no stdin needed - - exitCode, err := commands.Run(ctx, args, readStdin, writeStdOut, writeStdErr) - // Close the output side of the pipes so that ReadAll() on the read ends can complete. - require.NoError(c.tb, writeStdOut.Close()) - require.NoError(c.tb, writeStdErr.Close()) - - out := th.ReadOutput(c.tb, command, readStdOut, readStdErr) - if err != nil { - out.SetInvocationError(err) - } else { - out.SetStatus(exitCode) - } - require.NoError(c.tb, err, "client execution error") - - return out -} - -// RunSuccess runs a command and asserts that it succeeds (status of zero and logs no errors). -func (c *Client) RunSuccess(ctx context.Context, command ...string) *th.CmdOutput { - output := c.Run(ctx, command...) - output.AssertSuccess() - return output -} - -// RunFail runs a command and asserts that it fails with a specified message on stderr. -func (c *Client) RunFail(ctx context.Context, err string, command ...string) *th.CmdOutput { - output := c.Run(ctx, command...) - output.AssertFail(err) - return output -} - -// RunJSON runs a command, asserts success, and parses the response as JSON. -func (c *Client) RunJSON(ctx context.Context, command ...string) map[string]interface{} { - out := c.RunSuccess(ctx, command...) - var parsed map[string]interface{} - require.NoError(c.tb, json.Unmarshal([]byte(out.ReadStdout()), &parsed)) - return parsed -} - -// RunMarshaledJSON runs a command, asserts success, and marshals the JSON response. -func (c *Client) RunMarshaledJSON(ctx context.Context, result interface{}, command ...string) { - out := c.RunSuccess(ctx, command...) - require.NoError(c.tb, json.Unmarshal([]byte(out.ReadStdout()), &result)) -} - -// RunSuccessFirstLine executes the given command, asserts success and returns -// the first line of stdout. -func (c *Client) RunSuccessFirstLine(ctx context.Context, args ...string) string { - return c.RunSuccessLines(ctx, args...)[0] -} - -// RunSuccessLines executes the given command, asserts success and returns -// an array of lines of the stdout. -func (c *Client) RunSuccessLines(ctx context.Context, args ...string) []string { - output := c.RunSuccess(ctx, args...) - result := output.ReadStdoutTrimNewlines() - return strings.Split(result, "\n") -} diff --git a/internal/app/go-filecoin/node/test/builder.go b/internal/app/go-filecoin/node/test/builder.go deleted file mode 100644 index b528e5c2b2..0000000000 --- a/internal/app/go-filecoin/node/test/builder.go +++ /dev/null @@ -1,138 +0,0 @@ -package test - -import ( - "context" - "io/ioutil" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/genesis" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -// NodeBuilder creates and configures Filecoin nodes for in-process testing. -// This is intended to replace use of GenNode and the various other node construction entry points -// that end up there. -// Note that (August 2019) there are two things called "config": the configuration read in from -// file to the config.Config structure, and node.Config which is really just some dependency -// injection. This builder avoids exposing the latter directly. -type NodeBuilder struct { - // Initialisation function for the genesis block and state. - gif genesis.InitFunc - // Options to the repo initialisation. - initOpts []node.InitOpt - // Mutations to be applied to node config after initialisation. - configMutations []node.ConfigOpt - // Mutations to be applied to the node builder config before building. - builderOpts []node.BuilderOpt - - tb testing.TB -} - -// NewNodeBuilder creates a new node builder. -func NewNodeBuilder(tb testing.TB) *NodeBuilder { - return &NodeBuilder{ - gif: gengen.MakeGenesisFunc(gengen.NetworkName("gfctest")), - initOpts: []node.InitOpt{}, - configMutations: []node.ConfigOpt{ - // Default configurations that make sense for integration tests. - // The can be overridden by subsequent `withConfigChanges`. - node.ConfigOpt(func(c *config.Config) { - // Bind only locally, defer port selection until binding. - c.API.Address = "/ip4/127.0.0.1/tcp/0" - c.Swarm.Address = "/ip4/127.0.0.1/tcp/0" - }), - }, - builderOpts: []node.BuilderOpt{}, - tb: tb, - } -} - -// WithGenesisInit sets the built nodes' genesis function. -func (b *NodeBuilder) WithGenesisInit(gif genesis.InitFunc) *NodeBuilder { - b.gif = gif - return b -} - -// WithInitOpt adds one or more options to repo initialisation. -func (b *NodeBuilder) WithInitOpt(opts ...node.InitOpt) *NodeBuilder { - b.initOpts = append(b.initOpts, opts...) - return b -} - -// WithBuilderOpt adds one or more node building options to node creation. -func (b *NodeBuilder) WithBuilderOpt(opts ...node.BuilderOpt) *NodeBuilder { - b.builderOpts = append(b.builderOpts, opts...) - return b -} - -// WithConfig adds a configuration mutation function to be invoked after repo initialisation. -func (b *NodeBuilder) WithConfig(cm node.ConfigOpt) *NodeBuilder { - b.configMutations = append(b.configMutations, cm) - return b -} - -// Build creates a node as specified by this builder. -// This many be invoked multiple times to create many nodes. -func (b *NodeBuilder) Build(ctx context.Context) *node.Node { - // Initialise repo. - repo := repo.NewInMemoryRepo() - - // Apply configuration changes (must happen before node.OptionsFromRepo()). - sectorDir, err := ioutil.TempDir("", "go-fil-test-sectors") - b.requireNoError(err) - repo.Config().SectorBase.RootDirPath = sectorDir - for _, m := range b.configMutations { - m(repo.Config()) - } - - b.requireNoError(node.Init(ctx, repo, b.gif, b.initOpts...)) - - // Initialize the node. - repoConfigOpts, err := node.OptionsFromRepo(repo) - b.requireNoError(err) - - nd, err := node.New(ctx, append(repoConfigOpts, b.builderOpts...)...) - b.requireNoError(err) - return nd -} - -// BuildAndStart build a node and starts it. -func (b *NodeBuilder) BuildAndStart(ctx context.Context) *node.Node { - n := b.Build(ctx) - err := n.Start(ctx) - b.requireNoError(err) - return n -} - -// BuildAndStartAPI is a convenience function composing BuildAndStart with -// RunNodeAPI -func (b *NodeBuilder) BuildAndStartAPI(ctx context.Context) (*node.Node, *Client, func()) { - n := b.BuildAndStart(ctx) - c, apiDone := RunNodeAPI(ctx, n, b.tb) - done := func() { - apiDone() - n.Stop(ctx) - } - return n, c, done -} - -func (b *NodeBuilder) requireNoError(err error) { - b.tb.Helper() - require.NoError(b.tb, err) -} - -// BuildMany builds numNodes nodes with the builder's configuration. -func (b *NodeBuilder) BuildMany(ctx context.Context, numNodes int) []*node.Node { - var out []*node.Node - for i := 0; i < numNodes; i++ { - nd := b.Build(ctx) - out = append(out, nd) - } - - return out -} diff --git a/internal/app/go-filecoin/node/test/setup.go b/internal/app/go-filecoin/node/test/setup.go deleted file mode 100644 index 61c02bb60d..0000000000 --- a/internal/app/go-filecoin/node/test/setup.go +++ /dev/null @@ -1,170 +0,0 @@ -package test - -import ( - "context" - "encoding/json" - "os" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/build/project" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -const blockTime = builtin.EpochDurationSeconds * time.Second - -// MustCreateNodesWithBootstrap creates an in-process test setup capable of testing communication between nodes. -// Every setup will have one bootstrap node (the first node that is called) that is setup to have power to mine. -// All of the proofs for the set-up are fake (but additional nodes will still need to create miners and add storage to -// gain power). All nodes will be started and connected to each other. The returned cancel function ensures all nodes -// are stopped when the test is over. -func MustCreateNodesWithBootstrap(ctx context.Context, t *testing.T, additionalNodes uint) ([]*node.Node, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - nodes := make([]*node.Node, 1+additionalNodes) - - // create bootstrap miner - seed, genCfg, fakeClock, chainClock := CreateBootstrapSetup(t) - nodes[0] = CreateBootstrapMiner(ctx, t, seed, chainClock, genCfg) - - // create additional nodes - for i := uint(0); i < additionalNodes; i++ { - node := NewNodeBuilder(t). - WithGenesisInit(seed.GenesisInitFunc). - WithConfig(node.DefaultAddressConfigOpt(seed.Addr(t, int(i+1)))). - WithBuilderOpt(node.PoStGeneratorOption(&consensus.TestElectionPoster{})). - WithBuilderOpt(node.FakeProofVerifierBuilderOpts()...). - WithBuilderOpt(node.ChainClockConfigOption(chainClock)). - WithBuilderOpt(node.DrandConfigOption(drand.NewFake(chainClock.StartTimeOfEpoch(0)))). - Build(ctx) - addr := seed.GiveKey(t, node, int(i+1)) - err := node.PorcelainAPI.ConfigSet("wallet.defaultAddress", addr.String()) - require.NoError(t, err) - err = node.Start(ctx) - require.NoError(t, err) - nodes[i+1] = node - } - - // connect all nodes - for i := 0; i < len(nodes); i++ { - for j := 0; j < i; j++ { - node.ConnectNodes(t, nodes[i], nodes[j]) - } - } - - // start simulated mining and wait for shutdown - go func() { - for { - select { - case <-ctx.Done(): - return - default: - RequireMineOnce(ctx, t, fakeClock, nodes[0]) - } - } - }() - - return nodes, cancel -} - -func RequireMineOnce(ctx context.Context, t *testing.T, fakeClock clock.Fake, node *node.Node) *block.Block { - fakeClock.Advance(blockTime) - blk, err := node.BlockMining.BlockMiningAPI.MiningOnce(ctx) - - // fail only if ctx not done - select { - case <-ctx.Done(): - return nil - default: - require.NoError(t, err) - } - - return blk -} - -func CreateBootstrapSetup(t *testing.T) (*node.ChainSeed, *gengen.GenesisCfg, clock.Fake, clock.ChainEpochClock) { - // set up paths and fake clock. - genTime := int64(1000000000) - fakeClock := clock.NewFake(time.Unix(genTime, 0)) - propDelay := 6 * time.Second - - // Load genesis config fixture. - genCfgPath := project.Root("fixtures/setup.json") - genCfg := loadGenesisConfig(t, genCfgPath) - genCfg.Miners = append(genCfg.Miners, &gengen.CreateStorageMinerConfig{ - Owner: 5, - SealProofType: constants.DevSealProofType, - }) - seed := node.MakeChainSeed(t, genCfg) - chainClock := clock.NewChainClockFromClock(uint64(genTime), blockTime, propDelay, fakeClock) - - return seed, genCfg, fakeClock, chainClock -} - -func CreateBootstrapMiner(ctx context.Context, t *testing.T, seed *node.ChainSeed, chainClock clock.ChainEpochClock, genCfg *gengen.GenesisCfg) *node.Node { - // set up paths and fake clock. - presealPath := project.Root("fixtures/genesis-sectors") - minerAddress, err := address.NewIDAddress(106) - require.NoError(t, err) - - // create bootstrap miner - bootstrapMiner := NewNodeBuilder(t). - WithGenesisInit(seed.GenesisInitFunc). - WithBuilderOpt(node.FakeProofVerifierBuilderOpts()...). - WithBuilderOpt(node.PoStGeneratorOption(&consensus.TestElectionPoster{})). - WithBuilderOpt(node.ChainClockConfigOption(chainClock)). - WithBuilderOpt(node.DrandConfigOption(drand.NewFake(chainClock.StartTimeOfEpoch(0)))). - WithBuilderOpt(node.MonkeyPatchSetProofTypeOption(constants.DevRegisteredSealProof)). - WithConfig(func(c *config.Config) { - c.SectorBase.PreSealedSectorsDirPath = presealPath - c.Mining.MinerAddress = minerAddress - }). - Build(ctx) - - addr := seed.GiveKey(t, bootstrapMiner, 0) - err = bootstrapMiner.PorcelainAPI.ConfigSet("wallet.defaultAddress", addr.String()) - require.NoError(t, err) - - _, _, err = initNodeGenesisMiner(ctx, t, bootstrapMiner, seed, genCfg.Miners[0].Owner) - require.NoError(t, err) - err = bootstrapMiner.Start(ctx) - require.NoError(t, err) - - return bootstrapMiner -} - -func initNodeGenesisMiner(ctx context.Context, t *testing.T, nd *node.Node, seed *node.ChainSeed, minerIdx int) (address.Address, address.Address, error) { - seed.GiveKey(t, nd, minerIdx) - miner, owner := seed.GiveMiner(t, nd, 0) - - genesisBlock, err := nd.Chain().ChainReader.GetGenesisBlock(ctx) - require.NoError(t, err) - - err = node.InitSectors(ctx, nd.Repo, genesisBlock) - require.NoError(t, err) - return miner, owner, err -} - -func loadGenesisConfig(t *testing.T, path string) *gengen.GenesisCfg { - configFile, err := os.Open(path) - if err != nil { - t.Errorf("failed to open config file %s: %s", path, err) - } - defer func() { _ = configFile.Close() }() - - var cfg gengen.GenesisCfg - if err := json.NewDecoder(configFile).Decode(&cfg); err != nil { - t.Errorf("failed to parse config: %s", err) - } - return &cfg -} diff --git a/internal/app/go-filecoin/node/testing.go b/internal/app/go-filecoin/node/testing.go deleted file mode 100644 index e8dde7ac2a..0000000000 --- a/internal/app/go-filecoin/node/testing.go +++ /dev/null @@ -1,246 +0,0 @@ -package node - -import ( - "context" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - ds "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/fixtures/fortest" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/proofs" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -// ChainSeed is a generalized struct for configuring node -type ChainSeed struct { - info *gengen.RenderedGenInfo - bstore blockstore.Blockstore -} - -// MakeChainSeed creates a chain seed struct (see above) from a given -// genesis config -func MakeChainSeed(t *testing.T, cfg *gengen.GenesisCfg) *ChainSeed { - t.Helper() - - mds := ds.NewMapDatastore() - bstore := blockstore.NewBlockstore(mds) - - info, err := gengen.GenGen(context.TODO(), cfg, bstore) - require.NoError(t, err) - - return &ChainSeed{ - info: info, - bstore: bstore, - } -} - -// GenesisInitFunc is a th.GenesisInitFunc using the chain seed -func (cs *ChainSeed) GenesisInitFunc(cst cbor.IpldStore, bs blockstore.Blockstore) (*block.Block, error) { - keys, err := cs.bstore.AllKeysChan(context.TODO()) - if err != nil { - return nil, err - } - - for k := range keys { - blk, err := cs.bstore.Get(k) - if err != nil { - return nil, err - } - - if err := bs.Put(blk); err != nil { - return nil, err - } - } - - var blk block.Block - if err := cst.Get(context.TODO(), cs.info.GenesisCid, &blk); err != nil { - return nil, err - } - - return &blk, nil -} - -// GiveKey gives the given key to the given node -func (cs *ChainSeed) GiveKey(t *testing.T, nd *Node, key int) address.Address { - t.Helper() - bcks := nd.Wallet.Wallet.Backends(wallet.DSBackendType) - require.Len(t, bcks, 1, "expected to get exactly one datastore backend") - - dsb := bcks[0].(*wallet.DSBackend) - kinfo := cs.info.Keys[key] - require.NoError(t, dsb.ImportKey(kinfo)) - - addr, err := kinfo.Address() - require.NoError(t, err) - - return addr -} - -// GiveMiner gives the specified miner to the node. Returns the address and the owner addresss -func (cs *ChainSeed) GiveMiner(t *testing.T, nd *Node, which int) (address.Address, address.Address) { - t.Helper() - cfg := nd.Repo.Config() - m := cs.info.Miners[which] - cfg.Mining.MinerAddress = m.Address - - require.NoError(t, nd.Repo.ReplaceConfig(cfg)) - - ownerAddr, err := cs.info.Keys[m.Owner].Address() - require.NoError(t, err) - - return m.Address, ownerAddr -} - -// Addr returns the address for the given key -func (cs *ChainSeed) Addr(t *testing.T, key int) address.Address { - t.Helper() - k := cs.info.Keys[key] - - a, err := k.Address() - if err != nil { - t.Fatal(err) - } - - return a -} - -// ConfigOpt mutates a node config post initialization -type ConfigOpt func(*config.Config) - -// MinerConfigOpt is a config option that sets a node's miner address to one of -// the chain seed's miner addresses -func (cs *ChainSeed) MinerConfigOpt(which int) ConfigOpt { - return func(cfg *config.Config) { - m := cs.info.Miners[which] - cfg.Mining.MinerAddress = m.Address - } -} - -// MinerInitOpt is a node init option that imports the key for the miner's owner -func (cs *ChainSeed) MinerInitOpt(which int) InitOpt { - kwhich := cs.info.Miners[which].Owner - kinfo := cs.info.Keys[kwhich] - return ImportKeyOpt(kinfo) -} - -// KeyInitOpt is a node init option that imports one of the chain seed's -// keys to a node's wallet -func (cs *ChainSeed) KeyInitOpt(which int) InitOpt { - kinfo := cs.info.Keys[which] - return ImportKeyOpt(kinfo) -} - -// FixtureChainSeed returns the genesis function that -func FixtureChainSeed(t *testing.T) *ChainSeed { - return MakeChainSeed(t, &fortest.TestGenGenConfig) -} - -// DefaultAddressConfigOpt is a node config option setting the default address -func DefaultAddressConfigOpt(addr address.Address) ConfigOpt { - return func(cfg *config.Config) { - cfg.Wallet.DefaultAddress = addr - } -} - -// ConnectNodes connects two nodes together -func ConnectNodes(t *testing.T, a, b *Node) { - t.Helper() - pi := peer.AddrInfo{ - ID: b.Host().ID(), - Addrs: b.Host().Addrs(), - } - - err := a.Host().Connect(context.TODO(), pi) - if err != nil { - t.Fatal(err) - } -} - -// FakeProofVerifierBuilderOpts returns default configuration for testing -func FakeProofVerifierBuilderOpts() []BuilderOpt { - return []BuilderOpt{ - VerifierConfigOption(&proofs.FakeVerifier{}), - } -} - -// StartNodes starts some nodes, failing on any error. -func StartNodes(t *testing.T, nds []*Node) { - t.Helper() - for _, nd := range nds { - if err := nd.Start(context.Background()); err != nil { - t.Fatal(err) - } - } -} - -// StopNodes initiates shutdown of some nodes. -func StopNodes(nds []*Node) { - for _, nd := range nds { - nd.Stop(context.Background()) - } -} - -// MustCreateStorageMinerResult contains the result of a CreateStorageMiner command -type MustCreateStorageMinerResult struct { - MinerAddress *address.Address - Err error -} - -// PeerKeys are a list of keys for peers that can be used in testing. -var PeerKeys = []crypto.PrivKey{ - mustGenKey(101), - mustGenKey(102), -} - -// MakeTestGenCfg returns a genesis configuration used for tests. -// This config has one miner with numSectors sectors and two accounts, -// the first is the miner's owner/worker and the accounts both have 10000 FIL -func MakeTestGenCfg(t *testing.T, numSectors int) *gengen.GenesisCfg { - commCfgs, err := gengen.MakeCommitCfgs(numSectors) - require.NoError(t, err) - return &gengen.GenesisCfg{ - KeysToGen: 2, - Miners: []*gengen.CreateStorageMinerConfig{ - { - Owner: 0, - PeerID: mustPeerID(PeerKeys[0]).Pretty(), - CommittedSectors: commCfgs, - SealProofType: constants.DevSealProofType, - }, - }, - Network: "gfctest", - PreallocatedFunds: []string{ - "10000", - "10000", - }, - } -} - -func mustGenKey(seed int64) crypto.PrivKey { - r := rand.New(rand.NewSource(seed)) - priv, _, err := crypto.GenerateEd25519Key(r) - if err != nil { - panic(err) - } - - return priv -} - -func mustPeerID(k crypto.PrivKey) peer.ID { - pid, err := peer.IDFromPrivateKey(k) - if err != nil { - panic(err) - } - return pid -} diff --git a/internal/app/go-filecoin/paths/paths.go b/internal/app/go-filecoin/paths/paths.go deleted file mode 100644 index 1ab36ef4e1..0000000000 --- a/internal/app/go-filecoin/paths/paths.go +++ /dev/null @@ -1,55 +0,0 @@ -package paths - -import ( - "os" - "path/filepath" - - "github.com/mitchellh/go-homedir" -) - -// node repo path defaults -const filPathVar = "FIL_PATH" -const defaultRepoDir = "~/.filecoin" - -// node sector storage path defaults -const filSectorPathVar = "FIL_SECTOR_PATH" -const defaultSectorDir = ".filecoin_sectors" -const defaultPieceStagingDir = "pieces" - -// GetRepoPath returns the path of the filecoin repo from a potential override -// string, the FIL_PATH environment variable and a default of ~/.filecoin/repo. -func GetRepoPath(override string) (string, error) { - // override is first precedence - if override != "" { - return homedir.Expand(override) - } - // Environment variable is second precedence - envRepoDir := os.Getenv(filPathVar) - if envRepoDir != "" { - return homedir.Expand(envRepoDir) - } - // Default is third precedence - return homedir.Expand(defaultRepoDir) -} - -// GetSectorPath returns the path of the filecoin sector storage from a -// potential override string, the FIL_SECTOR_PATH environment variable and a -// default of repoPath/../.filecoin_sectors. -func GetSectorPath(override, repoPath string) (string, error) { - // override is first precedence - if override != "" { - return homedir.Expand(override) - } - // Environment variable is second precedence - envRepoDir := os.Getenv(filSectorPathVar) - if envRepoDir != "" { - return homedir.Expand(envRepoDir) - } - // Default is third precedence: repoPath/../defaultSectorDir - return homedir.Expand(filepath.Join(repoPath, "../", defaultSectorDir)) -} - -// PieceStagingDir returns the path to the piece staging directory repo path -func PieceStagingDir(repoPath string) (string, error) { - return homedir.Expand(filepath.Join(repoPath, "../", defaultPieceStagingDir)) -} diff --git a/internal/app/go-filecoin/paymentchannel/integration_test.go b/internal/app/go-filecoin/paymentchannel/integration_test.go deleted file mode 100644 index 54806484a4..0000000000 --- a/internal/app/go-filecoin/paymentchannel/integration_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package paymentchannel_test - -import ( - "context" - "math/big" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - spect "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - retrievalmarketconnector "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/connectors/retrieval_market" - pch "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - paychtest "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel/testing" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// TestAddFundsToChannel verifies that a call to GetOrCreatePaymentChannel sends -// funds to the actor if it already exists -func TestPaymentChannel(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - chainBuilder, bs, genTs := testSetup2(ctx, t) - ds := dss.MutexWrap(datastore.NewMapDatastore()) - - balance := abi.NewTokenAmount(1000000) - initActorUtil := paychtest.NewFakeInitActorUtil(ctx, t, balance) - root, err := chainBuilder.GetTipSetStateRoot(genTs.Key()) - require.NoError(t, err) - - initialChannelAmt := abi.NewTokenAmount(1200) - _, client, miner, paychID, paych := initActorUtil.StubCtorSendResponse(initialChannelAmt) - fakeProvider := message.NewFakeProvider(t) - fakeProvider.Builder = chainBuilder - clientActor := actor.NewActor(builtin.AccountActorCodeID, balance, root) - fakeProvider.SetHead(genTs.Key()) - fakeProvider.SetActor(client, clientActor) - - viewer := paychtest.NewFakeStateViewer(t) - - pchMgr := pch.NewManager(context.Background(), ds, initActorUtil, initActorUtil, viewer) - - viewer.GetFakeStateView().AddActorWithState(paych, client, miner, address.Undef) - - rmc := retrievalmarketconnector.NewRetrievalMarketClientFakeAPI(t) - - connector := retrievalmarketconnector.NewRetrievalClientConnector(bs, fakeProvider, rmc, pchMgr) - assert.NotNil(t, connector) - tok, err := encoding.Encode(genTs.Key()) - require.NoError(t, err) - - addr, mcid, err := connector.GetOrCreatePaymentChannel(ctx, client, miner, initialChannelAmt, tok) - require.NoError(t, err) - assert.Equal(t, address.Undef, addr) - - addr, err = connector.WaitForPaymentChannelCreation(mcid) - require.NoError(t, err) - assert.Equal(t, paych, addr) - - // make sure the channel info is there - chinfo, err := pchMgr.GetPaymentChannelInfo(paych) - require.NoError(t, err) - require.Equal(t, paych, chinfo.UniqueAddr) - - paychActorUtil := paychtest.FakePaychActorUtil{ - T: t, - Balance: types.NewAttoFIL(initialChannelAmt.Int), - PaychAddr: paych, - PaychIDAddr: paychID, - Client: client, - ClientID: spect.NewIDAddr(t, 999), - Miner: miner, - } - - fakeProvider.SetHead(genTs.Key()) - fakeProvider.SetActor(client, clientActor) - - viewer.GetFakeStateView().AddActorWithState(paychActorUtil.PaychAddr, paychActorUtil.Client, paychActorUtil.Miner, address.Undef) - assert.NotNil(t, connector) - - addVal := abi.NewTokenAmount(333) - expCid := paychActorUtil.StubSendFundsResponse(paychActorUtil.Client, addVal, exitcode.Ok, 1) - - // set up sends and waits to go to the payment channel actor util / harness - initActorUtil.DelegateSender(paychActorUtil.Send) - initActorUtil.DelegateWaiter(paychActorUtil.Wait) - - addr, mcid, err = connector.GetOrCreatePaymentChannel(ctx, client, miner, addVal, tok) - require.NoError(t, err) - assert.Equal(t, paychActorUtil.PaychAddr, addr) - assert.True(t, mcid.Equals(expCid)) - - err = connector.WaitForPaymentChannelAddFunds(mcid) - require.NoError(t, err) - - expBal := types.NewAttoFIL(big.NewInt(1533)) - assert.True(t, expBal.Equals(paychActorUtil.Balance)) -} - -func testSetup2(ctx context.Context, t *testing.T) (*chain.Builder, bstore.Blockstore, block.TipSet) { - _, builder, genTs, cs, st1 := requireNewEmptyChainStore(ctx, t) - rootBlk := builder.AppendBlockOnBlocks() - block.RequireNewTipSet(t, rootBlk) - require.NoError(t, cs.SetHead(ctx, genTs)) - root, err := st1.Commit(ctx) - require.NoError(t, err) - - // add tipset and state to chainstore - require.NoError(t, cs.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ - TipSet: genTs, - TipSetStateRoot: root, - TipSetReceipts: types.EmptyReceiptsCID, - })) - - ds := repo.NewInMemoryRepo().ChainDatastore() - bs := bstore.NewBlockstore(ds) - return builder, bs, genTs -} - -func requireNewEmptyChainStore(ctx context.Context, t *testing.T) (cid.Cid, *chain.Builder, block.TipSet, *chain.Store, state.Tree) { - store := cbor.NewMemCborStore() - - // Cribbed from chain/store_test - st1 := state.NewState(store) - root, err := st1.Commit(ctx) - require.NoError(t, err) - - // link testing state to test block - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - - // setup chain store - ds := r.Datastore() - cs := chain.NewStore(ds, store, chain.NewStatusReporter(), genTS.At(0).Cid()) - return root, builder, genTS, cs, st1 -} diff --git a/internal/app/go-filecoin/paymentchannel/manager.go b/internal/app/go-filecoin/paymentchannel/manager.go deleted file mode 100644 index 1781cc7942..0000000000 --- a/internal/app/go-filecoin/paymentchannel/manager.go +++ /dev/null @@ -1,403 +0,0 @@ -package paymentchannel - -import ( - "bytes" - "context" - "sync" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - initActor "github.com/filecoin-project/specs-actors/actors/builtin/init" - paychActor "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - xerrors "github.com/pkg/errors" - "github.com/prometheus/common/log" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -var defaultGasPrice = types.NewAttoFILFromFIL(actor.DefaultGasCost) -var defaultGasLimit = gas.NewGas(5000) -var zeroAmt = abi.NewTokenAmount(0) - -// Manager manages payment channel actor and the data paymentChannels operations. -type Manager struct { - ctx context.Context - paymentChannels *paychStore - sender MsgSender - waiter MsgWaiter - stateViewer ActorStateViewer -} - -// PaymentChannelStorePrefix is the prefix used in the datastore -var PaymentChannelStorePrefix = "/retrievaldeals/paymentchannel" - -// MsgWaiter is an interface for waiting for a message to appear on chain -type MsgWaiter interface { - Wait(ctx context.Context, msgCid cid.Cid, lookback uint64, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error -} - -// MsgSender is an interface for something that can post messages on chain -type MsgSender interface { - // Send posts a message to the chain - Send(ctx context.Context, - from, to address.Address, - value types.AttoFIL, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - bcast bool, - method abi.MethodNum, - params interface{}) (out cid.Cid, pubErrCh chan error, err error) -} - -// ActorStateViewer is an interface to StateViewer that the Manager uses -type ActorStateViewer interface { - GetStateView(ctx context.Context, tok shared.TipSetToken) (ManagerStateView, error) -} - -// NewManager creates and returns a new paymentchannel.Manager -func NewManager(ctx context.Context, ds datastore.Batching, waiter MsgWaiter, sender MsgSender, viewer ActorStateViewer) *Manager { - s := statestore.New(namespace.Wrap(ds, datastore.NewKey(PaymentChannelStorePrefix))) - - store := paychStore{store: s} - - return &Manager{ctx, &store, sender, waiter, viewer} -} - -// AllocateLane adds a new lane to a payment channel entry -func (pm *Manager) AllocateLane(paychAddr address.Address) (laneID uint64, err error) { - err = pm.paymentChannels.Mutate(paychAddr, func(info *ChannelInfo) error { - laneID = info.NextLane - info.NextLane++ - info.NextNonce++ - return nil - }) - return laneID, err -} - -// GetPaymentChannelByAccounts looks up a payment channel via payer/payee -// returns an empty ChannelInfo if not found. -func (pm *Manager) GetPaymentChannelByAccounts(payer, payee address.Address) (*ChannelInfo, error) { - var chinfos []ChannelInfo - var found ChannelInfo - - if err := pm.paymentChannels.List(&chinfos); err != nil { - return nil, err - } - for _, chinfo := range chinfos { - if chinfo.From == payer && chinfo.To == payee { - found = chinfo - break - } - } - return &found, nil -} - -// GetPaymentChannelInfo retrieves channel info from the paymentChannels. -// Assumes channel exists. -func (pm *Manager) GetPaymentChannelInfo(paychAddr address.Address) (*ChannelInfo, error) { - storedState := pm.paymentChannels.Get(paychAddr) - if storedState == nil { - return nil, xerrors.New("no stored state") - } - var chinfo ChannelInfo - if err := storedState.Get(&chinfo); err != nil { - return nil, err - } - return &chinfo, nil -} - -// CreatePaymentChannel will send the message to the InitActor to create a paych.Actor. -// If successful, a new payment channel entry will be persisted to the -// paymentChannels via a message wait handler. Returns the created payment channel address -func (pm *Manager) CreatePaymentChannel(client, miner address.Address, amt abi.TokenAmount) (address.Address, cid.Cid, error) { - chinfo, err := pm.GetPaymentChannelByAccounts(client, miner) - if err != nil { - return address.Undef, cid.Undef, err - } - if !chinfo.IsZero() { - return address.Undef, cid.Undef, xerrors.Errorf("payment channel exists for client %s, miner %s", client, miner) - } - pm.paymentChannels.storeLk.Lock() - - execParams, err := PaychActorCtorExecParamsFor(client, miner) - if err != nil { - pm.paymentChannels.storeLk.Unlock() - return address.Undef, cid.Undef, err - } - - mcid, _, err := pm.sender.Send( - pm.ctx, - client, - builtin.InitActorAddr, - types.NewAttoFIL(amt.Int), - defaultGasPrice, - defaultGasLimit, - true, - builtin.MethodsInit.Exec, - &execParams, - ) - if err != nil { - pm.paymentChannels.storeLk.Unlock() - return address.Undef, cid.Undef, err - } - go pm.handlePaychCreateResult(pm.ctx, mcid, client, miner) - return address.Undef, mcid, nil -} - -// AddVoucherToChannel saves a new signed voucher entry to the payment store -// Assumes paychAddr channel has already been created. -// Called by retrieval client connector -func (pm *Manager) AddVoucherToChannel(paychAddr address.Address, voucher *paychActor.SignedVoucher) error { - return pm.saveNewVoucher(paychAddr, voucher, nil) -} - -// AddVoucher saves voucher to the store -// If payment channel record does not exist in store, it will be created. -// Each new voucher amount must be > the last largest voucher by at least `expected` -// Called by retrieval provider connector -func (pm *Manager) AddVoucher(paychAddr address.Address, voucher *paychActor.SignedVoucher, proof []byte, expected big.Int, tok shared.TipSetToken) (abi.TokenAmount, error) { - has, err := pm.ChannelExists(paychAddr) - if err != nil { - return zeroAmt, err - } - if !has { - return pm.providerCreatePaymentChannelWithVoucher(paychAddr, voucher, proof, tok) - } - - chinfo, err := pm.GetPaymentChannelInfo(paychAddr) - if err != nil { - return zeroAmt, err - } - // check that this voucher amount is sufficiently larger than the last, largest voucher amount. - largest := chinfo.LargestVoucherAmount() - delta := abi.TokenAmount{Int: abi.NewTokenAmount(0).Sub(voucher.Amount.Int, largest.Int)} - if expected.LessThan(delta) { - return zeroAmt, xerrors.Errorf("voucher amount insufficient") - } - if err = pm.saveNewVoucher(paychAddr, voucher, proof); err != nil { - return zeroAmt, err - } - - return delta, nil -} - -// ChannelExists returns whether paychAddr has a store entry, + error -// Exported for retrieval provider -func (pm *Manager) ChannelExists(paychAddr address.Address) (bool, error) { - return pm.paymentChannels.Has(paychAddr) -} - -// PaychActorCtorExecParamsFor constructs parameters to send a message to InitActor -// To construct a paychActor -func PaychActorCtorExecParamsFor(client, miner address.Address) (initActor.ExecParams, error) { - - ctorParams := paychActor.ConstructorParams{From: client, To: miner} - marshaled, err := encoding.Encode(ctorParams) - if err != nil { - return initActor.ExecParams{}, err - } - - p := initActor.ExecParams{ - CodeCID: builtin.PaymentChannelActorCodeID, - ConstructorParams: marshaled, - } - return p, nil -} - -// GetMinerWorkerAddress gets a miner worker address from the miner address -func (pm *Manager) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { - view, err := pm.stateViewer.GetStateView(ctx, tok) - if err != nil { - return address.Undef, err - } - _, fcworker, err := view.MinerControlAddresses(ctx, miner) - return fcworker, err -} - -func (pm *Manager) WaitForCreatePaychMessage(ctx context.Context, mcid cid.Cid) (address.Address, error) { - var newPaychAddr address.Address - - handleResult := func(b *block.Block, sm *types.SignedMessage, mr *vm.MessageReceipt) error { - var res initActor.ExecReturn - if err := encoding.Decode(mr.ReturnValue, &res); err != nil { - return err - } - - newPaychAddr = res.RobustAddress - return nil - } - - err := pm.waiter.Wait(pm.ctx, mcid, msg.DefaultMessageWaitLookback, handleResult) - if err != nil { - return address.Undef, err - } - return newPaychAddr, nil -} - -func (pm *Manager) AddFundsToChannel(paychAddr address.Address, amt abi.TokenAmount) (cid.Cid, error) { - var chinfo ChannelInfo - st := pm.paymentChannels.Get(paychAddr) - if err := st.Get(&chinfo); err != nil { - return cid.Undef, err - } - - mcid, _, err := pm.sender.Send(context.TODO(), chinfo.From, paychAddr, amt, defaultGasPrice, defaultGasLimit, true, builtin.MethodSend, nil) - if err != nil { - return cid.Undef, err - } - // TODO: track amts in paych store by lane: https://github.com/filecoin-project/go-filecoin/issues/4046 - return mcid, nil -} - -func (pm *Manager) WaitForAddFundsMessage(ctx context.Context, mcid cid.Cid) error { - handleResult := func(b *block.Block, sm *types.SignedMessage, mr *vm.MessageReceipt) error { - if mr.ExitCode != exitcode.Ok { - return xerrors.Errorf("Add funds failed with exitcode %d", mr.ExitCode) - } - return nil - } - return pm.waiter.Wait(pm.ctx, mcid, msg.DefaultMessageWaitLookback, handleResult) -} - -// WaitForPaychCreateMsg waits for mcid to appear on chain and returns the robust address of the -// created payment channel -// TODO: set up channel tracking before knowing paych addr: https://github.com/filecoin-project/go-filecoin/issues/4045 -// -func (pm *Manager) handlePaychCreateResult(ctx context.Context, mcid cid.Cid, client, miner address.Address) { - defer pm.paymentChannels.storeLk.Unlock() - var paychAddr address.Address - - handleResult := func(_ *block.Block, _ *types.SignedMessage, mr *vm.MessageReceipt) error { - if mr.ExitCode != exitcode.Ok { - return xerrors.Errorf("create message failed with exit code %d", mr.ExitCode) - } - - var decodedReturn initActor.ExecReturn - if err := decodedReturn.UnmarshalCBOR(bytes.NewReader(mr.ReturnValue)); err != nil { - return err - } - paychAddr = decodedReturn.RobustAddress - return nil - } - - if err := pm.waiter.Wait(ctx, mcid, msg.DefaultMessageWaitLookback, handleResult); err != nil { - log.Errorf("payment channel creation failed because: %s", err.Error()) - return - } - - // TODO check again to make sure a payment channel has not been created for this From/To - chinfo := ChannelInfo{ - From: client, - To: miner, - NextLane: 0, - NextNonce: 1, - UniqueAddr: paychAddr, - } - if err := pm.paymentChannels.Begin(paychAddr, &chinfo); err != nil { - log.Error(err) - } -} - -// Called ONLY in context of a retrieval provider. -func (pm *Manager) providerCreatePaymentChannelWithVoucher(paychAddr address.Address, voucher *paychActor.SignedVoucher, proof []byte, tok shared.TipSetToken) (abi.TokenAmount, error) { - pm.paymentChannels.storeLk.Lock() - defer pm.paymentChannels.storeLk.Unlock() - view, err := pm.stateViewer.GetStateView(pm.ctx, tok) - if err != nil { - return zeroAmt, err - } - from, to, err := view.PaychActorParties(pm.ctx, paychAddr) - if err != nil { - return zeroAmt, err - } - // needs to "allocate" a lane as well as storing a voucher so this bumps - // lane once and nonce twice - chinfo := ChannelInfo{ - From: from, - To: to, - NextLane: 1, - NextNonce: 2, - UniqueAddr: paychAddr, - Vouchers: []*VoucherInfo{{Voucher: voucher, Proof: proof}}, - } - if err = pm.paymentChannels.Begin(paychAddr, &chinfo); err != nil { - return zeroAmt, err - } - return voucher.Amount, nil -} - -// saveNewVoucher saves a voucher to an existing payment channel -func (pm *Manager) saveNewVoucher(paychAddr address.Address, voucher *paychActor.SignedVoucher, proof []byte) error { - has, err := pm.paymentChannels.Has(paychAddr) - if err != nil { - return err - } - if !has { - return xerrors.Errorf("channel does not exist %s", paychAddr.String()) - } - if err := pm.paymentChannels.Mutate(paychAddr, func(info *ChannelInfo) error { - if info.NextLane <= voucher.Lane { - return xerrors.Errorf("lane does not exist %d", voucher.Lane) - } - if info.HasVoucher(voucher) { - return xerrors.Errorf("voucher already saved") - } - info.NextNonce++ - info.Vouchers = append(info.Vouchers, &VoucherInfo{ - Voucher: voucher, - Proof: proof, - }) - return nil - }); err != nil { - return err - } - return nil -} - -// paychStore is a thin threadsafe wrapper for StateStore -type paychStore struct { - storeLk sync.RWMutex - store *statestore.StateStore -} - -type mutator func(info *ChannelInfo) error - -func (ps *paychStore) Mutate(addr address.Address, m mutator) error { - ps.storeLk.Lock() - defer ps.storeLk.Unlock() - return ps.store.Get(addr).Mutate(m) -} -func (ps *paychStore) List(info *[]ChannelInfo) error { - ps.storeLk.RLock() - defer ps.storeLk.RUnlock() - return ps.store.List(info) -} -func (ps *paychStore) Get(addr address.Address) *statestore.StoredState { - ps.storeLk.RLock() - defer ps.storeLk.RUnlock() - return ps.store.Get(addr) -} -func (ps *paychStore) Has(addr address.Address) (bool, error) { - ps.storeLk.RLock() - defer ps.storeLk.RUnlock() - return ps.store.Has(addr) -} - -func (ps *paychStore) Begin(addr address.Address, info *ChannelInfo) error { - return ps.store.Begin(addr, info) -} diff --git a/internal/app/go-filecoin/paymentchannel/manager_test.go b/internal/app/go-filecoin/paymentchannel/manager_test.go deleted file mode 100644 index 83768ede34..0000000000 --- a/internal/app/go-filecoin/paymentchannel/manager_test.go +++ /dev/null @@ -1,353 +0,0 @@ -package paymentchannel_test - -import ( - "context" - "errors" - "fmt" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - paychActor "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - spect "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - . "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - paychtest "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel/testing" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestManager_GetPaymentChannelInfo(t *testing.T) { - tf.UnitTest(t) - t.Run("returns err if info does not exist", func(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - ctx := context.Background() - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - viewer := paychtest.NewFakeStateViewer(t) - m := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - res, err := m.GetPaymentChannelInfo(spect.NewIDAddr(t, 1020)) - assert.EqualError(t, err, "No state for /t01020: datastore: key not found") - assert.Nil(t, res) - }) -} - -func TestManager_CreatePaymentChannel(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - viewer := paychtest.NewFakeStateViewer(t) - balance := abi.NewTokenAmount(301) - - t.Run("happy path", func(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - testAPI.ExpectedMsgCid = shared_testutil.GenerateCids(1)[0] - - m := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - clientAddr, minerAddr, paychUniqueAddr, _ := requireSetupPaymentChannel(t, testAPI, m, balance) - exists, err := m.ChannelExists(paychUniqueAddr) - require.NoError(t, err) - assert.True(t, exists) - - chinfo, err := m.GetPaymentChannelInfo(paychUniqueAddr) - require.NoError(t, err) - require.NotNil(t, chinfo) - expectedChinfo := ChannelInfo{ - NextLane: 0, - NextNonce: 1, - From: clientAddr, - To: minerAddr, - UniqueAddr: paychUniqueAddr, - } - assert.Equal(t, expectedChinfo, *chinfo) - }) - t.Run("returns err and does not create channel if Send fails", func(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - testAPI.MsgSendErr = errors.New("sendboom") - clientAddr := spect.NewIDAddr(t, rand.Uint64()) - minerAddr := spect.NewIDAddr(t, rand.Uint64()) - paych := spect.NewActorAddr(t, "paych") - blockHeight := uint64(1234) - m := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - - testAPI.ExpectedMsgCid, testAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, clientAddr, minerAddr, paych, balance, exitcode.Ok, blockHeight) - - addr, mcid, err := m.CreatePaymentChannel(clientAddr, minerAddr, balance) - assert.EqualError(t, err, "sendboom") - assert.True(t, mcid.Equals(cid.Undef)) - assert.Equal(t, address.Undef, addr) - }) - - t.Run("errors if payment channel exists", func(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - m := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - clientAddr, minerAddr, _, _ := requireSetupPaymentChannel(t, testAPI, m, types.ZeroAttoFIL) - _, mcid, err := m.CreatePaymentChannel(clientAddr, minerAddr, balance) - assert.EqualError(t, err, "payment channel exists for client t0901, miner t0902") - assert.True(t, mcid.Equals(cid.Undef)) - }) -} - -func TestManager_AllocateLane(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - ds := dss.MutexWrap(datastore.NewMapDatastore()) - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - balance := big.NewInt(301) - - viewer := paychtest.NewFakeStateViewer(t) - m := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - clientAddr, minerAddr, paychUniqueAddr, _ := requireSetupPaymentChannel(t, testAPI, m, balance) - - t.Run("saves a new lane", func(t *testing.T) { - lane, err := m.AllocateLane(paychUniqueAddr) - require.NoError(t, err) - assert.Equal(t, uint64(0), lane) - - chinfo, err := m.GetPaymentChannelInfo(paychUniqueAddr) - require.NoError(t, err) - require.NotNil(t, chinfo) - expectedChinfo := ChannelInfo{ - NextLane: 1, - NextNonce: 2, - From: clientAddr, - To: minerAddr, - UniqueAddr: paychUniqueAddr, - } - - assert.Equal(t, expectedChinfo, *chinfo) - }) - - t.Run("errors if update lane doesn't exist", func(t *testing.T) { - badAddr := spect.NewActorAddr(t, "nonexistent") - lane, err := m.AllocateLane(badAddr) - expErr := fmt.Sprintf("No state for /%s", badAddr.String()) - assert.EqualError(t, err, expErr) - assert.Zero(t, lane) - }) -} - -// AddVoucherToChannel is called by a retrieval client -func TestManager_AddVoucherToChannel(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - amt := big.NewInt(300) - balance := big.NewInt(301) - sig := crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte("doesntmatter")} - root := shared_testutil.GenerateCids(1)[0] - - v := paychActor.SignedVoucher{ - Nonce: 2, - TimeLockMax: abi.ChainEpoch(12345), - TimeLockMin: abi.ChainEpoch(12346), - Amount: amt, - Signature: &sig, - SecretPreimage: []uint8{}, - } - newV := v - newV.Amount = abi.NewTokenAmount(500) - - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - - t.Run("happy path", func(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - viewer := paychtest.NewFakeStateViewer(t) - manager := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - clientAddr, minerAddr, paychUniqueAddr, _ := requireSetupPaymentChannel(t, testAPI, manager, balance) - lane, err := manager.AllocateLane(paychUniqueAddr) - require.NoError(t, err) - v.Lane = lane - testAPI.ExpectedMsgCid, testAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, clientAddr, minerAddr, paychUniqueAddr, balance, exitcode.Ok, 42) - - assert.NoError(t, manager.AddVoucherToChannel(paychUniqueAddr, &v)) - }) - - t.Run("errors if channel doesn't exist", func(t *testing.T) { - _, manager := setupViewerManager(ctx, t, root) - assert.EqualError(t, manager.AddVoucherToChannel(spect.NewActorAddr(t, "not-there"), &v), "channel does not exist t2bfuuk4wniuwo2tfso3bfar55hf4d6zq4fbcagui") - }) - - t.Run("returns error if lane does not exist", func(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - viewer := paychtest.NewFakeStateViewer(t) - manager := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - _, _, paychUniqueAddr, _ := requireSetupPaymentChannel(t, testAPI, manager, balance) - assert.EqualError(t, manager.AddVoucherToChannel(paychUniqueAddr, &v), "lane does not exist 0") - }) -} - -// AddVoucher is called by a retrieval provider -func TestManager_AddVoucher(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - paychAddr := spect.NewActorAddr(t, "abcd123") - paychIDAddr := spect.NewIDAddr(t, 103) - clientAddr := spect.NewIDAddr(t, 99) - minerAddr := spect.NewIDAddr(t, 100) - - root := shared_testutil.GenerateCids(1)[0] - proof := []byte("proof") - amt := big.NewInt(300) - sig := crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte("doesntmatter")} - v := paychActor.SignedVoucher{ - Nonce: 2, - TimeLockMax: abi.ChainEpoch(12345), - TimeLockMin: abi.ChainEpoch(12346), - Lane: 0, - Amount: amt, - Signature: &sig, - SecretPreimage: []uint8{}, - } - - tsk := block.NewTipSetKey(root) - tok, err := encoding.Encode(tsk) - require.NoError(t, err) - - t.Run("Adding a valid voucher creates a payment channel info and saves the voucher", func(t *testing.T) { - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - ds := dss.MutexWrap(datastore.NewMapDatastore()) - viewer := paychtest.NewFakeStateViewer(t) - manager := NewManager(context.Background(), ds, testAPI, testAPI, viewer) - viewer.GetFakeStateView().AddActorWithState(paychAddr, clientAddr, minerAddr, address.Undef) - - increment := int64(10) - // increment voucher amount by 10, expect 10 - numVouchers := 3 - for i := 0; i < numVouchers; i++ { - newV := v - newV.Amount = abi.NewTokenAmount(increment * int64(i+1)) - resAmt, err := manager.AddVoucher(paychAddr, &newV, proof, abi.NewTokenAmount(increment), tok) - require.NoError(t, err) - assert.True(t, resAmt.Equals(abi.NewTokenAmount(increment))) - } - has, err := manager.ChannelExists(paychAddr) - require.NoError(t, err) - assert.True(t, has) - chinfo, err := manager.GetPaymentChannelInfo(paychAddr) - require.NoError(t, err) - require.NotNil(t, chinfo) - assert.Len(t, chinfo.Vouchers, numVouchers) - assert.Equal(t, abi.NewTokenAmount(30), chinfo.Vouchers[2].Voucher.Amount) - }) - - t.Run("returns error if we try to save the same voucher", func(t *testing.T) { - viewer, manager := setupViewerManager(ctx, t, root) - viewer.GetFakeStateView().AddActorWithState(paychAddr, clientAddr, minerAddr, paychIDAddr) - resAmt, err := manager.AddVoucher(paychAddr, &v, []byte("porkchops"), abi.NewTokenAmount(1), tok) - require.NoError(t, err) - assert.Equal(t, amt, resAmt) - - resAmt, err = manager.AddVoucher(paychAddr, &v, []byte("porkchops"), abi.NewTokenAmount(1), tok) - assert.EqualError(t, err, "voucher already saved") - assert.Equal(t, abi.NewTokenAmount(0), resAmt) - }) - - t.Run("returns error if marshaling fails", func(t *testing.T) { - viewer, manager := setupViewerManager(ctx, t, root) - viewer.GetFakeStateView().AddActorWithState(paychAddr, clientAddr, address.Undef, address.Undef) - resAmt, err := manager.AddVoucher(paychAddr, &v, []byte("applesauce"), abi.NewTokenAmount(1), tok) - assert.EqualError(t, err, "cannot marshal undefined address") - assert.Equal(t, abi.NewTokenAmount(0), resAmt) - }) - - t.Run("returns error if cannot get actor state/parties", func(t *testing.T) { - viewer, manager := setupViewerManager(ctx, t, root) - sv := viewer.GetFakeStateView() - sv.AddActorWithState(paychAddr, clientAddr, minerAddr, paychIDAddr) - sv.PaychActorPartiesErr = errors.New("boom") - resAmt, err := manager.AddVoucher(paychAddr, &v, []byte("porkchops"), abi.NewTokenAmount(1), tok) - assert.EqualError(t, err, "boom") - assert.Equal(t, abi.NewTokenAmount(0), resAmt) - }) - - t.Run("returns error if voucher amount is insufficient", func(t *testing.T) { - viewer, manager := setupViewerManager(ctx, t, root) - - viewer.GetFakeStateView().AddActorWithState(paychAddr, clientAddr, minerAddr, paychIDAddr) - resAmt, err := manager.AddVoucher(paychAddr, &v, []byte("porkchops"), abi.NewTokenAmount(1), tok) - require.NoError(t, err) - _, err = manager.AllocateLane(paychAddr) - require.NoError(t, err) - - // newV.Amount - v.Amount (300) must be > expected (100), or returns error - amounts := []uint64{10, 300, 399} - for _, amt := range amounts { - newV := v - newV.Amount = types.NewAttoFILFromFIL(amt) - resAmt, err = manager.AddVoucher(paychAddr, &newV, []byte("porkchops"), abi.NewTokenAmount(1), tok) - assert.EqualError(t, err, "voucher amount insufficient") - assert.Equal(t, abi.NewTokenAmount(0), resAmt) - } - }) - -} - -func TestManager_GetMinerWorker(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - minerAddr := spect.NewIDAddr(t, 100) - minerWorkerAddr := spect.NewIDAddr(t, 101) - root := shared_testutil.GenerateCids(1)[0] - viewer, manager := setupViewerManager(ctx, t, root) - sv := viewer.GetFakeStateView() - - tsk := block.NewTipSetKey(root) - tok, err := encoding.Encode(tsk) - require.NoError(t, err) - - t.Run("happy path", func(t *testing.T) { - viewer.GetFakeStateView().AddMinerWithState(minerAddr, minerWorkerAddr) - res, err := manager.GetMinerWorkerAddress(ctx, minerAddr, tok) - assert.NoError(t, err) - assert.Equal(t, minerWorkerAddr, res) - }) - - t.Run("returns error if getting control addr fails", func(t *testing.T) { - sv.AddMinerWithState(minerAddr, minerWorkerAddr) - sv.MinerControlErr = errors.New("boom") - _, err := manager.GetMinerWorkerAddress(ctx, minerAddr, tok) - assert.EqualError(t, err, "boom") - }) -} - -func setupViewerManager(ctx context.Context, t *testing.T, root cid.Cid) (*paychtest.FakeStateViewer, *Manager) { - testAPI := paychtest.NewFakePaymentChannelAPI(ctx, t) - ds := dss.MutexWrap(datastore.NewMapDatastore()) - viewer := paychtest.NewFakeStateViewer(t) - return viewer, NewManager(context.Background(), ds, testAPI, testAPI, viewer) -} - -func requireSetupPaymentChannel(t *testing.T, testAPI *paychtest.FakePaymentChannelAPI, m *Manager, balance abi.TokenAmount) (address.Address, address.Address, address.Address, uint64) { - clientAddr := spect.NewIDAddr(t, 901) - minerAddr := spect.NewIDAddr(t, 902) - paychUniqueAddr := spect.NewActorAddr(t, "abcd123") - blockHeight := uint64(1234) - - testAPI.ExpectedMsgCid, testAPI.ExpectedResult = paychtest.GenCreatePaychActorMessage(t, clientAddr, minerAddr, paychUniqueAddr, balance, exitcode.Ok, blockHeight) - - _, mcid, err := m.CreatePaymentChannel(clientAddr, minerAddr, balance) - require.NoError(t, err) - - chinfo, err := m.GetPaymentChannelInfo(paychUniqueAddr) - require.NoError(t, err) - require.Equal(t, paychUniqueAddr, chinfo.UniqueAddr) - - // give goroutine a chance to update channel store - assert.True(t, testAPI.ExpectedMsgCid.Equals(testAPI.ActualWaitCid)) - assert.True(t, testAPI.ExpectedMsgCid.Equals(mcid)) - - return clientAddr, minerAddr, paychUniqueAddr, blockHeight -} diff --git a/internal/app/go-filecoin/paymentchannel/mgr_state_view_api.go b/internal/app/go-filecoin/paymentchannel/mgr_state_view_api.go deleted file mode 100644 index 81eebb56d7..0000000000 --- a/internal/app/go-filecoin/paymentchannel/mgr_state_view_api.go +++ /dev/null @@ -1,53 +0,0 @@ -package paymentchannel - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" -) - -// ManagerStateViewer is a wrapper for state viewer and state view to fulfill requirements for -// the paymentchannel.Manager -type ManagerStateViewer struct { - reader chainReader - viewer *state.Viewer -} - -// ManagerStateView is the subset of StateView that the ManagerStateViewer needs. -type ManagerStateView interface { - PaychActorParties(ctx context.Context, paychAddr address.Address) (from, to address.Address, err error) - MinerControlAddresses(ctx context.Context, addr address.Address) (owner, worker address.Address, err error) -} - -// ChainReader is the subset of the ChainReadWriter API that the Manager uses -type chainReader interface { - GetTipSetStateRoot(block.TipSetKey) (cid.Cid, error) -} - -// NewManagerStateViewer initializes a new ManagerStateViewer -func NewManagerStateViewer(cr chainReader, cs *cborutil.IpldStore) *ManagerStateViewer { - stateViewer := state.NewViewer(cs) - return &ManagerStateViewer{cr, stateViewer} -} - -// GetStateView gets a state view for the provided token `tok` -func (msv *ManagerStateViewer) GetStateView(ctx context.Context, tok shared.TipSetToken) (ManagerStateView, error) { - var tsk block.TipSetKey - if err := encoding.Decode(tok, &tsk); err != nil { - return nil, fmt.Errorf("failed to marshal TipSetToken into a TipSetKey: %w", err) - } - - root, err := msv.reader.GetTipSetStateRoot(tsk) - if err != nil { - return nil, fmt.Errorf("failed to get tip state: %w", err) - } - return msv.viewer.StateView(root), nil -} diff --git a/internal/app/go-filecoin/paymentchannel/testing/fake_init_actor_util.go b/internal/app/go-filecoin/paymentchannel/testing/fake_init_actor_util.go deleted file mode 100644 index 26ed211111..0000000000 --- a/internal/app/go-filecoin/paymentchannel/testing/fake_init_actor_util.go +++ /dev/null @@ -1,167 +0,0 @@ -package testing - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/filecoin-project/specs-actors/support/mock" - spect "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// FakeInitActorUtil fulfils the MsgSender and MsgWaiter interfaces for a Manager -// via the specs_actors mock runtime. It executes init.Actor exports directly. -type FakeInitActorUtil struct { - t *testing.T - ctx context.Context - *mock.Runtime - *initActorHarness - newActor, newActorID, caller address.Address - result MsgResult - msgSender sender - msgWaiter waiter -} - -// NewFakeInitActorUtil initializes a FakeInitActorUtil and constructs -// the InitActor. -func NewFakeInitActorUtil(ctx context.Context, t *testing.T, balance abi.TokenAmount) *FakeInitActorUtil { - - builder := mock.NewBuilder(context.Background(), builtin.InitActorAddr). - WithBalance(balance, abi.NewTokenAmount(0)) - - fai := &FakeInitActorUtil{ - ctx: ctx, - Runtime: builder.Build(t), - initActorHarness: new(initActorHarness), - t: t, - } - fai.constructInitActor() - fai.Runtime.Verify() - fai.msgSender = fai.defaultSend - fai.msgWaiter = fai.defaultWait - return fai -} - -type sender func(ctx context.Context, - from, to address.Address, - value types.AttoFIL, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - bcast bool, - method abi.MethodNum, - params interface{}) (out cid.Cid, pubErrCh chan error, err error) - -type waiter func(_ context.Context, msgCid cid.Cid, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error - -// Send simulates posting to chain but calls actor code directly -func (fai *FakeInitActorUtil) Send(ctx context.Context, - from, to address.Address, - value types.AttoFIL, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - bcast bool, - method abi.MethodNum, - params interface{}) (out cid.Cid, pubErrCh chan error, err error) { - - return fai.msgSender(ctx, from, to, value, gasPrice, gasLimit, bcast, method, params) -} -func (fai *FakeInitActorUtil) defaultSend(ctx context.Context, - from, to address.Address, - value types.AttoFIL, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - bcast bool, - method abi.MethodNum, - params interface{}) (out cid.Cid, pubErrCh chan error, err error) { - execParams, ok := params.(*init_.ExecParams) - require.True(fai.t, ok) - fai.ExecAndVerify(from, value, execParams) - return fai.result.MsgCid, nil, nil -} - -// Wait simulates waiting for the result of a message and calls the callback `cb` -func (fai *FakeInitActorUtil) Wait(ctx context.Context, msgCid cid.Cid, lookback uint64, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - return fai.msgWaiter(ctx, msgCid, cb) -} - -func (fai *FakeInitActorUtil) defaultWait(_ context.Context, msgCid cid.Cid, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - require.Equal(fai.t, msgCid, fai.result.MsgCid) - res := fai.result - return cb(res.Block, res.Msg, res.Rcpt) -} - -// DelegateSender allows test to delegate a sender function -func (fai *FakeInitActorUtil) DelegateSender(delegate sender) { - fai.msgSender = delegate -} - -// DelegateWaiter allows test to deletate a waiter function -func (fai *FakeInitActorUtil) DelegateWaiter(delegate waiter) { - fai.msgWaiter = delegate -} - -// StubCtorSendResponse sets up addresses for the initActor and generates -// message responses from the call to create a new payment channel -func (fai *FakeInitActorUtil) StubCtorSendResponse(msgVal abi.TokenAmount) (msgCid cid.Cid, client, miner, idaddr, uniqueaddr address.Address) { - fai.caller = spect.NewActorAddr(fai.t, "client account addr") - fai.newActor = spect.NewActorAddr(fai.t, "new paych actor addr") - fai.newActorID = spect.NewIDAddr(fai.t, 100) - miner = spect.NewActorAddr(fai.t, "miner account addr") - fai.Runtime.SetNewActorAddress(fai.newActor) - - msgCid, msgRes := GenCreatePaychActorMessage(fai.t, fai.caller, miner, fai.newActor, msgVal, exitcode.Ok, 42) - fai.result = msgRes - return msgCid, fai.caller, miner, fai.newActorID, fai.newActor -} - -// ExecAndVerify sets up init actor to execute a constructor given a caller and value -func (fai *FakeInitActorUtil) ExecAndVerify(caller address.Address, value abi.TokenAmount, params *init_.ExecParams) { - a := fai.initActorHarness - expParams := runtime.CBORBytes(params.ConstructorParams) - - fai.Runtime.SetReceived(value) - fai.Runtime.SetCaller(caller, builtin.AccountActorCodeID) - fai.Runtime.ExpectCreateActor(builtin.PaymentChannelActorCodeID, fai.newActorID) - - fai.Runtime.ExpectSend(fai.newActorID, builtin.MethodConstructor, expParams, value, nil, exitcode.Ok) - exret := a.execAndVerify(fai.Runtime, params) - require.Equal(fai.t, fai.newActor, exret.RobustAddress) - require.Equal(fai.t, fai.newActorID, exret.IDAddress) -} - -// constructInitActor constructs an initActor harness with the fai mock runtime, so that initActor exports -// can be tested in go-filecoin. -func (fai *FakeInitActorUtil) constructInitActor() { - fai.Runtime.SetCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) - fai.Runtime.ExpectValidateCallerAddr(builtin.SystemActorAddr) - h := &initActorHarness{} - ret := fai.Runtime.Call(h.Constructor, &init_.ConstructorParams{NetworkName: "mock"}) - require.Nil(fai.t, ret) - fai.initActorHarness = h -} - -// actor harnesses should be very lightweight. -type initActorHarness struct { - init_.Actor - t testing.TB -} - -func (h *initActorHarness) execAndVerify(rt *mock.Runtime, params *init_.ExecParams) *init_.ExecReturn { - rt.ExpectValidateCallerAny() - ret := rt.Call(h.Exec, params).(*init_.ExecReturn) - require.NotNil(h.t, ret) - rt.Verify() - return ret -} diff --git a/internal/app/go-filecoin/paymentchannel/testing/fake_mgr_api.go b/internal/app/go-filecoin/paymentchannel/testing/fake_mgr_api.go deleted file mode 100644 index 1d18f5b58b..0000000000 --- a/internal/app/go-filecoin/paymentchannel/testing/fake_mgr_api.go +++ /dev/null @@ -1,172 +0,0 @@ -package testing - -import ( - "context" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - initActor "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - spect "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// FakePaymentChannelAPI mocks some needed APIs for a payment channel manager -type FakePaymentChannelAPI struct { - t *testing.T - ctx context.Context - - Balance types.AttoFIL - - ActualWaitCid cid.Cid - ExpectedMsgCid cid.Cid - ExpectedResult MsgResult - ActualResult MsgResult - - MsgSendErr error - MsgWaitErr error -} - -// MsgResult stores test message receipts -type MsgResult struct { - Block *block.Block - Msg *types.SignedMessage - DecodedParams interface{} - MsgCid cid.Cid - Rcpt *vm.MessageReceipt -} - -var msgRcptsUndef = MsgResult{} - -// NewFakePaymentChannelAPI creates a new mock payment channel API -func NewFakePaymentChannelAPI(ctx context.Context, t *testing.T) *FakePaymentChannelAPI { - return &FakePaymentChannelAPI{ - t: t, - ctx: ctx, - } -} - -// API methods - -// Wait mocks waiting for a message to be mined -func (f *FakePaymentChannelAPI) Wait(_ context.Context, msgCid cid.Cid, lookback uint64, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - if f.MsgWaitErr != nil { - return f.MsgWaitErr - } - f.ActualWaitCid = msgCid - return cb(f.ExpectedResult.Block, f.ExpectedResult.Msg, f.ExpectedResult.Rcpt) -} - -// Send mocks sending a message on chain -func (f *FakePaymentChannelAPI) Send(_ context.Context, - from, to address.Address, - value types.AttoFIL, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - bcast bool, - method abi.MethodNum, - params interface{}) (out cid.Cid, pubErrCh chan error, err error) { - - if f.MsgSendErr != nil { - return cid.Undef, nil, f.MsgSendErr - } - if f.ExpectedResult == msgRcptsUndef || f.ExpectedMsgCid == cid.Undef { - f.t.Fatal("no message or no cid registered") - } - - expMessage := f.ExpectedResult.Msg.Message - require.Equal(f.t, f.ExpectedResult.DecodedParams, params) - require.Equal(f.t, expMessage.GasLimit, gasLimit) - require.Equal(f.t, expMessage.GasPrice, gasPrice) - require.Equal(f.t, expMessage.From, from) - require.Equal(f.t, expMessage.To, to) - require.Equal(f.t, expMessage.Value, value) - require.Equal(f.t, expMessage.Method, method) - require.True(f.t, bcast) - return f.ExpectedMsgCid, nil, nil -} - -// testing methods - -// GenCreatePaychActorMessage sets up a message response, with desired exit code and block height -// for creating a payment channel actor -func GenCreatePaychActorMessage( - t *testing.T, - clientAccountAddr, minerAccountAddr, paychUniqueAddr address.Address, - amt abi.TokenAmount, - code exitcode.ExitCode, - height uint64) (cid.Cid, MsgResult) { - - newcid := shared_testutil.GenerateCids(1)[0] - - msg := types.NewUnsignedMessage(clientAccountAddr, builtin.InitActorAddr, 1, - types.NewAttoFIL(amt.Int), builtin.MethodsInit.Exec, []byte{}) - msg.GasPrice = types.NewAttoFILFromFIL(100) - msg.GasLimit = gas.NewGas(5000) - - params, err := paymentchannel.PaychActorCtorExecParamsFor(clientAccountAddr, minerAccountAddr) - if err != nil { - t.Fatal("could not construct send params") - } - msg.Params = requireEncode(t, ¶ms) - - retVal := initActor.ExecReturn{ - IDAddress: spect.NewIDAddr(t, rand.Uint64()), // IDAddress is currently unused - RobustAddress: paychUniqueAddr, - } - - emptySig := crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte{'0'}} - return newcid, MsgResult{ - Block: &block.Block{Height: abi.ChainEpoch(height)}, - Msg: &types.SignedMessage{Message: *msg, Signature: emptySig}, - MsgCid: newcid, - Rcpt: &vm.MessageReceipt{ExitCode: code, ReturnValue: requireEncode(t, &retVal)}, - DecodedParams: ¶ms, - } -} - -// GenSendFundsMessage sets up a message response, with desired exit code and block height -// for a message that just sends funds between two addresses -func GenSendFundsMessage( - from, to address.Address, - amt abi.TokenAmount, - code exitcode.ExitCode, - height uint64) (cid.Cid, MsgResult) { - newcid := shared_testutil.GenerateCids(1)[0] - - msg := types.NewUnsignedMessage(from, to, 2, - types.NewAttoFIL(amt.Int), builtin.MethodSend, []byte{}) - msg.GasPrice = types.NewAttoFILFromFIL(100) - msg.GasLimit = gas.NewGas(5000) - emptySig := crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte{'0'}} - return newcid, MsgResult{ - Block: &block.Block{Height: abi.ChainEpoch(height)}, - Msg: &types.SignedMessage{Message: *msg, Signature: emptySig}, - MsgCid: newcid, - Rcpt: &vm.MessageReceipt{ExitCode: code}, - } -} - -func requireEncode(t *testing.T, params interface{}) []byte { - encodedParams, err := encoding.Encode(params) - if err != nil { - t.Fatal(err.Error()) - } - return encodedParams -} - -var _ paymentchannel.MsgSender = &FakePaymentChannelAPI{} -var _ paymentchannel.MsgWaiter = &FakePaymentChannelAPI{} diff --git a/internal/app/go-filecoin/paymentchannel/testing/fake_paych_actor_util.go b/internal/app/go-filecoin/paymentchannel/testing/fake_paych_actor_util.go deleted file mode 100644 index e5b37edc61..0000000000 --- a/internal/app/go-filecoin/paymentchannel/testing/fake_paych_actor_util.go +++ /dev/null @@ -1,80 +0,0 @@ -package testing - -import ( - "context" - "math/big" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// FakePaychActorUtil fulfils the MsgSender and MsgWaiter interfaces for a Manager -// via the specs_actors mock runtime. It executes paych.Actor exports directly. -type FakePaychActorUtil struct { - *testing.T - Balance types.AttoFIL - PaychAddr, PaychIDAddr, Client, ClientID, Miner address.Address - SendErr error - result MsgResult -} - -// Send stubs a message Sender -func (fai *FakePaychActorUtil) Send(ctx context.Context, - from, to address.Address, - value types.AttoFIL, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - bcast bool, - method abi.MethodNum, - params interface{}) (mcid cid.Cid, pubErrCh chan error, err error) { - - if fai.result != msgRcptsUndef { - mcid = fai.result.MsgCid - } - - fai.doSend(value) - return mcid, pubErrCh, fai.SendErr -} - -// Wait stubs a message Waiter -func (fai *FakePaychActorUtil) Wait(_ context.Context, _ cid.Cid, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - res := fai.result - return cb(res.Block, res.Msg, res.Rcpt) -} - -// StubSendFundsMessage sets expectations for a message that just sends funds to the actor -func (fai *FakePaychActorUtil) StubSendFundsResponse(from address.Address, amt abi.TokenAmount, code exitcode.ExitCode, height int64) cid.Cid { - newCID := shared_testutil.GenerateCids(1)[0] - - msg := types.NewUnsignedMessage(from, fai.PaychAddr, 1, amt, builtin.MethodSend, []byte{}) - msg.GasPrice = abi.NewTokenAmount(100) - msg.GasLimit = gas.NewGas(5000) - - emptySig := crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte{'0'}} - fai.result = MsgResult{ - Block: &block.Block{Height: abi.ChainEpoch(height)}, - Msg: &types.SignedMessage{Message: *msg, Signature: emptySig}, - DecodedParams: nil, - MsgCid: newCID, - Rcpt: &vm.MessageReceipt{ExitCode: code}, - } - return newCID -} - -func (fai *FakePaychActorUtil) doSend(amt abi.TokenAmount) { - require.Equal(fai, amt, fai.result.Msg.Message.Value) - nb := big.NewInt(0).Add(fai.Balance.Int, amt.Int) - fai.Balance = types.NewAttoFIL(nb) -} diff --git a/internal/app/go-filecoin/paymentchannel/testing/fake_state_viewer.go b/internal/app/go-filecoin/paymentchannel/testing/fake_state_viewer.go deleted file mode 100644 index 807c6f4228..0000000000 --- a/internal/app/go-filecoin/paymentchannel/testing/fake_state_viewer.go +++ /dev/null @@ -1,78 +0,0 @@ -package testing - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" -) - -type FakeStateViewer struct { - view *FakeStateView -} - -// NewFakeStateViewer initializes a new FakeStateViewer -func NewFakeStateViewer(t *testing.T) *FakeStateViewer { - view := &FakeStateView{ - t: t, - actors: make(map[address.Address]*FakeActorState), - } - return &FakeStateViewer{view: view} -} - -// GetStateView returns the fake state view as a paymentchannel.ManagerStateView interface -func (f FakeStateViewer) GetStateView(ctx context.Context, tok shared.TipSetToken) (paymentchannel.ManagerStateView, error) { - return f.view, nil -} - -// GetFakeStateView returns the FakeStateView as itself so test setup can be done. -func (f FakeStateViewer) GetFakeStateView() *FakeStateView { - return f.view -} - -// FakeStateView mocks a state view for payment channel actor testing -type FakeStateView struct { - t *testing.T - actors map[address.Address]*FakeActorState - PaychActorPartiesErr, ResolveAddressAtErr, MinerControlErr error -} - -var _ paymentchannel.ManagerStateView = new(FakeStateView) - -// FakeActorState is a mock actor state containing test info -type FakeActorState struct { - To, From, IDAddr, MinerWorker address.Address -} - -// MinerControlAddresses mocks returning miner worker and miner actor address -func (f *FakeStateView) MinerControlAddresses(_ context.Context, addr address.Address) (owner, worker address.Address, err error) { - actorState, ok := f.actors[addr] - if !ok { - f.t.Fatalf("actor doesn't exist: %s", addr.String()) - } - return address.Undef, actorState.MinerWorker, f.MinerControlErr -} - -// PaychActorParties mocks returning the From and To addrs of a paych.Actor -func (f *FakeStateView) PaychActorParties(_ context.Context, paychAddr address.Address) (from, to address.Address, err error) { - st, ok := f.actors[paychAddr] - if !ok { - f.t.Fatalf("actor does not exist %s", paychAddr.String()) - } - return st.From, st.To, f.PaychActorPartiesErr -} - -// AddActorWithState sets up a mock state for actorAddr -func (f *FakeStateView) AddActorWithState(actorAddr, from, to, id address.Address) { - f.actors[actorAddr] = &FakeActorState{to, from, id, address.Undef} -} - -// AddMinerWithState sets up a mock state for a miner actor with a worker address -func (f *FakeStateView) AddMinerWithState(minerActor, minerWorker address.Address) { - f.actors[minerActor] = &FakeActorState{MinerWorker: minerWorker} -} - -var _ paymentchannel.ActorStateViewer = &FakeStateViewer{} diff --git a/internal/app/go-filecoin/paymentchannel/types.go b/internal/app/go-filecoin/paymentchannel/types.go deleted file mode 100644 index 254e75243c..0000000000 --- a/internal/app/go-filecoin/paymentchannel/types.go +++ /dev/null @@ -1,56 +0,0 @@ -package paymentchannel - -import ( - "reflect" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" -) - -//go:generate cbor-gen-for ChannelInfo VoucherInfo - -// The key for the store is the payment channel's "robust" or "unique" address - -// ChannelInfo is the primary payment channel record -// UniqueAddr: aka RobustAddr, used externally to refer to payment channel -// Duplicated in data due to need to check for existing payment channel since you can't -// iterate over key/value pairs in statestore at present -type ChannelInfo struct { - UniqueAddr, From, To address.Address - NextLane, NextNonce uint64 - Vouchers []*VoucherInfo // All vouchers submitted for this channel -} - -// IsZero returns whether it is a zeroed/blank ChannelInfo -func (ci *ChannelInfo) IsZero() bool { - return ci.UniqueAddr.Empty() && ci.To.Empty() && ci.From.Empty() && - ci.NextLane == 0 && len(ci.Vouchers) == 0 -} - -// HasVoucher returns true if `voucher` is already in `info` -func (ci *ChannelInfo) HasVoucher(voucher *paych.SignedVoucher) bool { - for _, v := range ci.Vouchers { - if reflect.DeepEqual(*v.Voucher, *voucher) { - return true - } - } - return false -} - -// LargestVoucherAmount returns the largest stored voucher amount -func (ci *ChannelInfo) LargestVoucherAmount() abi.TokenAmount { - res := abi.NewTokenAmount(0) - for _, v := range ci.Vouchers { - if v.Voucher.Amount.GreaterThan(res) { - res = v.Voucher.Amount - } - } - return res -} - -// VoucherInfo is a record of a voucher submitted for a payment channel -type VoucherInfo struct { - Voucher *paych.SignedVoucher - Proof []byte -} diff --git a/internal/app/go-filecoin/paymentchannel/types_cbor_gen.go b/internal/app/go-filecoin/paymentchannel/types_cbor_gen.go deleted file mode 100644 index 9ace18622f..0000000000 --- a/internal/app/go-filecoin/paymentchannel/types_cbor_gen.go +++ /dev/null @@ -1,240 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package paymentchannel - -import ( - "fmt" - "io" - - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{134}); err != nil { - return err - } - - // t.UniqueAddr (address.Address) (struct) - if err := t.UniqueAddr.MarshalCBOR(w); err != nil { - return err - } - - // t.From (address.Address) (struct) - if err := t.From.MarshalCBOR(w); err != nil { - return err - } - - // t.To (address.Address) (struct) - if err := t.To.MarshalCBOR(w); err != nil { - return err - } - - // t.NextLane (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.NextLane))); err != nil { - return err - } - - // t.NextNonce (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.NextNonce))); err != nil { - return err - } - - // t.Vouchers ([]*paymentchannel.VoucherInfo) (slice) - if len(t.Vouchers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Vouchers was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Vouchers)))); err != nil { - return err - } - for _, v := range t.Vouchers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 6 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.UniqueAddr (address.Address) (struct) - - { - - if err := t.UniqueAddr.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.From (address.Address) (struct) - - { - - if err := t.From.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.To (address.Address) (struct) - - { - - if err := t.To.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.NextLane (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.NextLane = uint64(extra) - // t.NextNonce (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.NextNonce = uint64(extra) - // t.Vouchers ([]*paymentchannel.VoucherInfo) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Vouchers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Vouchers = make([]*VoucherInfo, extra) - } - for i := 0; i < int(extra); i++ { - - var v VoucherInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Vouchers[i] = &v - } - - return nil -} - -func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Voucher (paych.SignedVoucher) (struct) - if err := t.Voucher.MarshalCBOR(w); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Proof)))); err != nil { - return err - } - if _, err := w.Write(t.Proof); err != nil { - return err - } - return nil -} - -func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Voucher (paych.SignedVoucher) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Voucher = new(paych.SignedVoucher) - if err := t.Voucher.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Proof = make([]byte, extra) - if _, err := io.ReadFull(br, t.Proof); err != nil { - return err - } - return nil -} diff --git a/internal/app/go-filecoin/plumbing/api.go b/internal/app/go-filecoin/plumbing/api.go deleted file mode 100644 index cc2ef2157d..0000000000 --- a/internal/app/go-filecoin/plumbing/api.go +++ /dev/null @@ -1,351 +0,0 @@ -package plumbing - -import ( - "context" - "io" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" - ma "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cfg" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/dag" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/net" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" -) - -// API is the plumbing implementation, the irreducible set of calls required -// to implement protocols and user/network-facing features. You probably should -// depend on the higher level porcelain.API instead of this api, as it includes -// these calls in addition to higher level convenience calls to make them more -// ergonomic. -type API struct { - logger logging.EventLogger - - chain *cst.ChainStateReadWriter - syncer *cst.ChainSyncProvider - config *cfg.Config - dag *dag.DAG - expected consensus.Protocol - msgPool *message.Pool - msgPreviewer *msg.Previewer - msgWaiter *msg.Waiter - network *net.Network - outbox *message.Outbox - pieceManager func() piecemanager.PieceManager - wallet *wallet.Wallet -} - -// APIDeps contains all the API's dependencies -type APIDeps struct { - Chain *cst.ChainStateReadWriter - Sync *cst.ChainSyncProvider - Config *cfg.Config - DAG *dag.DAG - Expected consensus.Protocol - MsgPool *message.Pool - MsgPreviewer *msg.Previewer - MsgWaiter *msg.Waiter - Network *net.Network - Outbox *message.Outbox - PieceManager func() piecemanager.PieceManager - Wallet *wallet.Wallet -} - -// New constructs a new instance of the API. -func New(deps *APIDeps) *API { - return &API{ - logger: logging.Logger("porcelain"), - chain: deps.Chain, - syncer: deps.Sync, - config: deps.Config, - dag: deps.DAG, - expected: deps.Expected, - msgPool: deps.MsgPool, - msgPreviewer: deps.MsgPreviewer, - msgWaiter: deps.MsgWaiter, - network: deps.Network, - outbox: deps.Outbox, - pieceManager: deps.PieceManager, - wallet: deps.Wallet, - } -} - -// ActorGet returns an actor from the latest state on the chain -func (api *API) ActorGet(ctx context.Context, addr address.Address) (*actor.Actor, error) { - return api.chain.GetActor(ctx, addr) -} - -// ActorGetSignature returns the signature of the given actor's given method. -// The function signature is typically used to enable a caller to decode the -// output of an actor method call (message). -func (api *API) ActorGetSignature(ctx context.Context, actorAddr address.Address, method abi.MethodNum) (_ vm.ActorMethodSignature, err error) { - return api.chain.GetActorSignature(ctx, actorAddr, method) -} - -// ActorLs returns a channel with actors from the latest state on the chain -func (api *API) ActorLs(ctx context.Context) (<-chan state.GetAllActorsResult, error) { - return api.chain.LsActors(ctx) -} - -// BlockTime returns the block time used by the consensus protocol. -func (api *API) BlockTime() time.Duration { - return api.expected.BlockTime() -} - -// ConfigSet sets the given parameters at the given path in the local config. -// The given path may be either a single field name, or a dotted path to a field. -// The JSON value may be either a single value or a whole data structure to be replace. -// For example: -// ConfigSet("datastore.path", "dev/null") and ConfigSet("datastore", "{\"path\":\"dev/null\"}") -// are the same operation. -func (api *API) ConfigSet(dottedPath string, paramJSON string) error { - return api.config.Set(dottedPath, paramJSON) -} - -// ConfigGet gets config parameters from the given path. -// The path may be either a single field name, or a dotted path to a field. -func (api *API) ConfigGet(dottedPath string) (interface{}, error) { - return api.config.Get(dottedPath) -} - -// ChainGetBlock gets a block by CID -func (api *API) ChainGetBlock(ctx context.Context, id cid.Cid) (*block.Block, error) { - return api.chain.GetBlock(ctx, id) -} - -// ChainGetMessages gets a message collection by CID -func (api *API) ChainGetMessages(ctx context.Context, metaCid cid.Cid) ([]*types.UnsignedMessage, []*types.SignedMessage, error) { - return api.chain.GetMessages(ctx, metaCid) -} - -// ChainGetReceipts gets a receipt collection by CID -func (api *API) ChainGetReceipts(ctx context.Context, id cid.Cid) ([]vm.MessageReceipt, error) { - return api.chain.GetReceipts(ctx, id) -} - -// ChainHeadKey returns the head tipset key -func (api *API) ChainHeadKey() block.TipSetKey { - return api.chain.Head() -} - -// ChainSetHead sets `key` as the new head of this chain iff it exists in the nodes chain store. -func (api *API) ChainSetHead(ctx context.Context, key block.TipSetKey) error { - return api.chain.SetHead(ctx, key) -} - -// ChainTipSet returns the tipset at the given key -func (api *API) ChainTipSet(key block.TipSetKey) (block.TipSet, error) { - return api.chain.GetTipSet(key) -} - -// ChainLs returns an iterator of tipsets from head to genesis -func (api *API) ChainLs(ctx context.Context) (*chain.TipsetIterator, error) { - return api.chain.Ls(ctx) -} - -func (api *API) SampleChainRandomness(ctx context.Context, head block.TipSetKey, tag acrypto.DomainSeparationTag, - epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - return api.chain.SampleChainRandomness(ctx, head, tag, epoch, entropy) -} - -// SyncerStatus returns the current status of the active or last active chain sync operation. -func (api *API) SyncerStatus() status.Status { - return api.syncer.Status() -} - -// ChainSyncHandleNewTipSet submits a chain head to the syncer for processing. -func (api *API) ChainSyncHandleNewTipSet(ci *block.ChainInfo) error { - return api.syncer.HandleNewTipSet(ci) -} - -// ChainExport exports the chain from `head` up to and including the genesis block to `out` -func (api *API) ChainExport(ctx context.Context, head block.TipSetKey, out io.Writer) error { - return api.chain.ChainExport(ctx, head, out) -} - -// ChainImport imports a chain from `in`. -func (api *API) ChainImport(ctx context.Context, in io.Reader) (block.TipSetKey, error) { - return api.chain.ChainImport(ctx, in) -} - -// OutboxQueues lists addresses with non-empty outbox queues (in no particular order). -func (api *API) OutboxQueues() []address.Address { - return api.outbox.Queue().Queues() -} - -// OutboxQueueLs lists messages in the queue for an address. -func (api *API) OutboxQueueLs(sender address.Address) []*message.Queued { - return api.outbox.Queue().List(sender) -} - -// OutboxQueueClear clears messages in the queue for an address/ -func (api *API) OutboxQueueClear(ctx context.Context, sender address.Address) { - api.outbox.Queue().Clear(ctx, sender) -} - -// MessagePoolPending lists messages un-mined in the pool -func (api *API) MessagePoolPending() []*types.SignedMessage { - return api.msgPool.Pending() -} - -// MessagePoolGet fetches a message from the pool. -func (api *API) MessagePoolGet(cid cid.Cid) (value *types.SignedMessage, ok bool) { - return api.msgPool.Get(cid) -} - -// MessagePoolRemove removes a message from the message pool. -func (api *API) MessagePoolRemove(cid cid.Cid) { - api.msgPool.Remove(cid) -} - -// MessagePreview previews the Gas cost of a message by running it locally on the client and -// recording the amount of Gas used. -func (api *API) MessagePreview(ctx context.Context, from, to address.Address, method abi.MethodNum, params ...interface{}) (gas.Unit, error) { - return api.msgPreviewer.Preview(ctx, from, to, method, params...) -} - -// StateView loads the state view for a tipset, i.e. the state *after* the application of the tipset's messages. -func (api *API) StateView(baseKey block.TipSetKey) (*appstate.View, error) { - return api.chain.StateView(baseKey) -} - -// MessageSend sends a message. It uses the default from address if none is given and signs the -// message using the wallet. This call "sends" in the sense that it enqueues the -// message in the msg pool and broadcasts it to the network; it does not wait for the -// message to go on chain. Note that no default from address is provided. The error -// channel returned receives either nil or an error and is immediately closed after -// the message is published to the network to signal that the publish is complete. -func (api *API) MessageSend(ctx context.Context, from, to address.Address, value types.AttoFIL, gasPrice types.AttoFIL, gasLimit gas.Unit, method abi.MethodNum, params interface{}) (cid.Cid, chan error, error) { - return api.outbox.Send(ctx, from, to, value, gasPrice, gasLimit, true, method, params) -} - -//SignedMessageSend sends a siged message. -func (api *API) SignedMessageSend(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, chan error, error) { - return api.outbox.SignedSend(ctx, smsg, true) -} - -// MessageWait invokes the callback when a message with the given cid appears on chain. -// It will find the message in both the case that it is already on chain and -// the case that it appears in a newly mined block. An error is returned if one is -// encountered or if the context is canceled. Otherwise, it waits forever for the message -// to appear on chain. -func (api *API) MessageWait(ctx context.Context, msgCid cid.Cid, lookback uint64, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - return api.msgWaiter.Wait(ctx, msgCid, lookback, cb) -} - -// NetworkGetBandwidthStats gets stats on the current bandwidth usage of the network -func (api *API) NetworkGetBandwidthStats() metrics.Stats { - return api.network.GetBandwidthStats() -} - -// NetworkGetPeerAddresses gets the current addresses of the node -func (api *API) NetworkGetPeerAddresses() []ma.Multiaddr { - return api.network.GetPeerAddresses() -} - -// NetworkGetPeerID gets the current peer id of the node -func (api *API) NetworkGetPeerID() peer.ID { - return api.network.GetPeerID() -} - -// NetworkFindProvidersAsync issues a findProviders query to the filecoin network content router. -func (api *API) NetworkFindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo { - return api.network.Router.FindProvidersAsync(ctx, key, count) -} - -// NetworkGetClosestPeers issues a getClosestPeers query to the filecoin network. -func (api *API) NetworkGetClosestPeers(ctx context.Context, key string) (<-chan peer.ID, error) { - return api.network.GetClosestPeers(ctx, key) -} - -// NetworkPing sends echo request packets over the network. -func (api *API) NetworkPing(ctx context.Context, pid peer.ID) (<-chan ping.Result, error) { - return api.network.Pinger.Ping(ctx, pid) -} - -// NetworkFindPeer searches the libp2p router for a given peer id -func (api *API) NetworkFindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo, error) { - return api.network.FindPeer(ctx, peerID) -} - -// NetworkConnect connects to peers at the given addresses -func (api *API) NetworkConnect(ctx context.Context, addrs []string) (<-chan net.ConnectionResult, error) { - return api.network.Connect(ctx, addrs) -} - -// NetworkPeers lists peers currently available on the network -func (api *API) NetworkPeers(ctx context.Context, verbose, latency, streams bool) (*net.SwarmConnInfos, error) { - return api.network.Peers(ctx, verbose, latency, streams) -} - -// WalletAddresses gets addresses from the wallet -func (api *API) WalletAddresses() []address.Address { - return api.wallet.Addresses() -} - -// WalletNewAddress generates a new wallet address -func (api *API) WalletNewAddress(protocol address.Protocol) (address.Address, error) { - return wallet.NewAddress(api.wallet, protocol) -} - -// WalletImport adds a given set of KeyInfos to the wallet -func (api *API) WalletImport(kinfos ...*crypto.KeyInfo) ([]address.Address, error) { - return api.wallet.Import(kinfos...) -} - -// WalletExport returns the KeyInfos for the given wallet addresses -func (api *API) WalletExport(addrs []address.Address) ([]*crypto.KeyInfo, error) { - return api.wallet.Export(addrs) -} - -// DAGGetNode returns the associated DAG node for the passed in CID. -func (api *API) DAGGetNode(ctx context.Context, ref string) (interface{}, error) { - return api.dag.GetNode(ctx, ref) -} - -// DAGGetFileSize returns the file size for a given Cid -func (api *API) DAGGetFileSize(ctx context.Context, c cid.Cid) (uint64, error) { - return api.dag.GetFileSize(ctx, c) -} - -// DAGCat returns an iostream with a piece of data stored on the merkeldag with -// the given cid. -func (api *API) DAGCat(ctx context.Context, c cid.Cid) (io.Reader, error) { - return api.dag.Cat(ctx, c) -} - -// DAGImportData adds data from an io reader to the merkledag and returns the -// Cid of the given data. Once the data is in the DAG, it can fetched from the -// node via Bitswap and a copy will be kept in the blockstore. -func (api *API) DAGImportData(ctx context.Context, data io.Reader) (ipld.Node, error) { - return api.dag.ImportData(ctx, data) -} - -// PieceManager returns the piece manager -func (api *API) PieceManager() piecemanager.PieceManager { - return api.pieceManager() -} diff --git a/internal/app/go-filecoin/plumbing/cfg/config.go b/internal/app/go-filecoin/plumbing/cfg/config.go deleted file mode 100644 index 21533cabd6..0000000000 --- a/internal/app/go-filecoin/plumbing/cfg/config.go +++ /dev/null @@ -1,35 +0,0 @@ -package cfg - -import ( - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "sync" -) - -// Config is plumbing implementation for setting and retrieving values from local config. -type Config struct { - repo repo.Repo - lock sync.Mutex -} - -// NewConfig returns a new Config. -func NewConfig(repo repo.Repo) *Config { - return &Config{repo: repo} -} - -// Set sets a value in config -func (s *Config) Set(dottedKey string, jsonString string) error { - s.lock.Lock() - defer s.lock.Unlock() - - cfg := s.repo.Config() - if err := cfg.Set(dottedKey, jsonString); err != nil { - return err - } - - return s.repo.ReplaceConfig(cfg) -} - -// Get gets a value from config -func (s *Config) Get(dottedKey string) (interface{}, error) { - return s.repo.Config().Get(dottedKey) -} diff --git a/internal/app/go-filecoin/plumbing/cfg/config_test.go b/internal/app/go-filecoin/plumbing/cfg/config_test.go deleted file mode 100644 index 5dc76b7540..0000000000 --- a/internal/app/go-filecoin/plumbing/cfg/config_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package cfg - -import ( - "testing" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestConfigGet(t *testing.T) { - tf.UnitTest(t) - - t.Run("emits the referenced config value", func(t *testing.T) { - repo := repo.NewInMemoryRepo() - cfgAPI := NewConfig(repo) - - out, err := cfgAPI.Get("bootstrap") - - require.NoError(t, err) - expected := config.NewDefaultConfig().Bootstrap - assert.Equal(t, expected, out) - }) - - t.Run("failure cases fail", func(t *testing.T) { - repo := repo.NewInMemoryRepo() - cfgAPI := NewConfig(repo) - - _, err := cfgAPI.Get("nonexistantkey") - assert.EqualError(t, err, "key: nonexistantkey invalid for config") - - _, err = cfgAPI.Get("bootstrap.nope") - assert.EqualError(t, err, "key: bootstrap.nope invalid for config") - - _, err = cfgAPI.Get(".inval.id-key") - assert.EqualError(t, err, "key: .inval.id-key invalid for config") - }) -} - -func TestConfigSet(t *testing.T) { - tf.UnitTest(t) - - t.Run("sets the config value", func(t *testing.T) { - defaultCfg := config.NewDefaultConfig() - - repo := repo.NewInMemoryRepo() - cfgAPI := NewConfig(repo) - - jsonBlob := `{"addresses": ["bootup1", "bootup2"]}` - - err := cfgAPI.Set("bootstrap", jsonBlob) - require.NoError(t, err) - out, err := cfgAPI.Get("bootstrap") - require.NoError(t, err) - - // validate output - expected := config.NewDefaultConfig().Bootstrap - expected.Addresses = []string{"bootup1", "bootup2"} - assert.Equal(t, expected, out) - - // validate config write - cfg := repo.Config() - assert.Equal(t, expected, cfg.Bootstrap) - assert.Equal(t, defaultCfg.Datastore, cfg.Datastore) - - err = cfgAPI.Set("api.address", ":1234") - require.NoError(t, err) - assert.Equal(t, ":1234", cfg.API.Address) - - testAddr := vmaddr.RequireIDAddress(t, 100).String() - err = cfgAPI.Set("mining.minerAddress", testAddr) - require.NoError(t, err) - assert.Equal(t, testAddr, cfg.Mining.MinerAddress.String()) - - err = cfgAPI.Set("wallet.defaultAddress", testAddr) - require.NoError(t, err) - assert.Equal(t, testAddr, cfg.Wallet.DefaultAddress.String()) - - testSwarmAddr := "/ip4/0.0.0.0/tcp/0" - err = cfgAPI.Set("swarm.address", testSwarmAddr) - require.NoError(t, err) - assert.Equal(t, testSwarmAddr, cfg.Swarm.Address) - - err = cfgAPI.Set("datastore.path", "/dev/null") - require.NoError(t, err) - assert.Equal(t, "/dev/null", cfg.Datastore.Path) - }) - - t.Run("failure cases fail", func(t *testing.T) { - repo := repo.NewInMemoryRepo() - cfgAPI := NewConfig(repo) - - // bad key - jsonBlob := `{"addresses": ["bootup1", "bootup2"]}` - - err := cfgAPI.Set("botstrap", jsonBlob) - assert.EqualError(t, err, "json: unknown field \"botstrap\"") - - // bad value type (bootstrap is a struct not a list) - jsonBlobBadType := `["bootup1", "bootup2"]` - err = cfgAPI.Set("bootstrap", jsonBlobBadType) - assert.Error(t, err) - - // bad JSON - jsonBlobInvalid := `{"addresses": [bootup1, "bootup2"]}` - - err = cfgAPI.Set("bootstrap", jsonBlobInvalid) - assert.EqualError(t, err, "json: cannot unmarshal string into Go struct field Config.bootstrap of type config.BootstrapConfig") - - // bad address - jsonBlobBadAddr := "f4cqnyc0muxjajygqavu645m8ja04vckk2kcorrupt" - err = cfgAPI.Set("wallet.defaultAddress", jsonBlobBadAddr) - assert.EqualError(t, err, address.ErrUnknownProtocol.Error()) - }) - -} diff --git a/internal/app/go-filecoin/plumbing/cst/chain_state.go b/internal/app/go-filecoin/plumbing/cst/chain_state.go deleted file mode 100644 index 5c190c6be4..0000000000 --- a/internal/app/go-filecoin/plumbing/cst/chain_state.go +++ /dev/null @@ -1,349 +0,0 @@ -package cst - -import ( - "context" - "fmt" - "io" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - initactor "github.com/filecoin-project/specs-actors/actors/builtin/init" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - offline "github.com/ipfs/go-ipfs-exchange-offline" - format "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - merkdag "github.com/ipfs/go-merkledag" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/dag" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/slashing" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - vmstate "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -var logStore = logging.Logger("plumbing/chain_store") - -type chainReadWriter interface { - GetHead() block.TipSetKey - GetGenesisBlock(ctx context.Context) (*block.Block, error) - GetTipSet(block.TipSetKey) (block.TipSet, error) - GetTipSetState(context.Context, block.TipSetKey) (vmstate.Tree, error) - GetTipSetStateRoot(block.TipSetKey) (cid.Cid, error) - SetHead(context.Context, block.TipSet) error - ReadOnlyStateStore() cborutil.ReadOnlyIpldStore -} - -// ChainStateReadWriter composes a: -// ChainReader providing read access to the chain and its associated state. -// ChainWriter providing write access to the chain head. -type ChainStateReadWriter struct { - readWriter chainReadWriter - bstore blockstore.Blockstore // Provides chain blocks. - messageProvider chain.MessageProvider - actors vm.ActorCodeLoader - cborutil.ReadOnlyIpldStore -} - -type actorStore struct { - ctx context.Context - cborutil.ReadOnlyIpldStore -} - -func (as *actorStore) Context() context.Context { - return as.ctx -} - -type carStore struct { - store blockstore.Blockstore -} - -func newCarStore(bs blockstore.Blockstore) *carStore { - return &carStore{bs} -} - -func (cs *carStore) Put(b blocks.Block) error { - return cs.store.Put(b) -} - -type actorNotRegisteredError struct{} - -func (e actorNotRegisteredError) Error() string { - return "actor not registered" -} - -func (e actorNotRegisteredError) ActorNotFound() bool { - return true -} - -var ( - // ErrNoMethod is returned by Get when there is no method signature (eg, transfer). - ErrNoMethod = errors.New("no method") - // ErrNoActorImpl is returned by Get when the actor implementation doesn't exist, eg - // the actor address is an empty actor, an address that has received a transfer of FIL - // but hasn't yet been upgraded to an account actor. (The actor implementation might - // also genuinely be missing, which is not expected.) - ErrNoActorImpl = errors.New("no actor implementation") -) - -// NewChainStateReadWriter returns a new ChainStateReadWriter. -func NewChainStateReadWriter(crw chainReadWriter, messages chain.MessageProvider, bs blockstore.Blockstore, ba vm.ActorCodeLoader) *ChainStateReadWriter { - return &ChainStateReadWriter{ - readWriter: crw, - bstore: bs, - messageProvider: messages, - actors: ba, - ReadOnlyIpldStore: crw.ReadOnlyStateStore(), - } -} - -// Head returns the head tipset -func (chn *ChainStateReadWriter) Head() block.TipSetKey { - return chn.readWriter.GetHead() -} - -// GetTipSet returns the tipset at the given key -func (chn *ChainStateReadWriter) GetTipSet(key block.TipSetKey) (block.TipSet, error) { - return chn.readWriter.GetTipSet(key) -} - -// Ls returns an iterator over tipsets from head to genesis. -func (chn *ChainStateReadWriter) Ls(ctx context.Context) (*chain.TipsetIterator, error) { - ts, err := chn.readWriter.GetTipSet(chn.readWriter.GetHead()) - if err != nil { - return nil, err - } - return chain.IterAncestors(ctx, chn.readWriter, ts), nil -} - -// GetBlock gets a block by CID -func (chn *ChainStateReadWriter) GetBlock(ctx context.Context, id cid.Cid) (*block.Block, error) { - bsblk, err := chn.bstore.Get(id) - if err != nil { - return nil, err - } - return block.DecodeBlock(bsblk.RawData()) -} - -func (chn *ChainStateReadWriter) ReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - blk, err := chn.bstore.Get(obj) - if err != nil { - return nil, err - } - - return blk.RawData(), nil -} - -// GetMessages gets a message collection by CID returned as unsigned bls and signed secp -func (chn *ChainStateReadWriter) GetMessages(ctx context.Context, metaCid cid.Cid) ([]*types.UnsignedMessage, []*types.SignedMessage, error) { - secp, bls, err := chn.messageProvider.LoadMessages(ctx, metaCid) - if err != nil { - return []*types.UnsignedMessage{}, []*types.SignedMessage{}, err - } - return bls, secp, nil -} - -// GetReceipts gets a receipt collection by CID. -func (chn *ChainStateReadWriter) GetReceipts(ctx context.Context, id cid.Cid) ([]vm.MessageReceipt, error) { - return chn.messageProvider.LoadReceipts(ctx, id) -} - -// SampleChainRandomness computes randomness seeded by a ticket from the chain `head` at `sampleHeight`. -func (chn *ChainStateReadWriter) SampleChainRandomness(ctx context.Context, head block.TipSetKey, tag acrypto.DomainSeparationTag, - epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - genBlk, err := chn.readWriter.GetGenesisBlock(ctx) - if err != nil { - return nil, err - } - rnd := crypto.ChainRandomnessSource{Sampler: chain.NewRandomnessSamplerAtHead(chn.readWriter, genBlk.Ticket, head)} - return rnd.Randomness(ctx, tag, epoch, entropy) -} - -// GetActor returns an actor from the latest state on the chain -func (chn *ChainStateReadWriter) GetActor(ctx context.Context, addr address.Address) (*actor.Actor, error) { - return chn.GetActorAt(ctx, chn.readWriter.GetHead(), addr) -} - -// GetTipSetStateRoot produces the state root for the provided tipset key. -func (chn *ChainStateReadWriter) GetTipSetStateRoot(ctx context.Context, tipKey block.TipSetKey) (cid.Cid, error) { - return chn.readWriter.GetTipSetStateRoot(tipKey) -} - -// GetActorAt returns an actor at a specified tipset key. -func (chn *ChainStateReadWriter) GetActorAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address) (*actor.Actor, error) { - st, err := chn.readWriter.GetTipSetState(ctx, tipKey) - if err != nil { - return nil, errors.Wrap(err, "failed to load latest state") - } - - idAddr, err := chn.ResolveAddressAt(ctx, tipKey, addr) - if err != nil { - return nil, err - } - - actr, found, err := st.GetActor(ctx, idAddr) - if err != nil { - return nil, err - } - if !found { - return nil, types.ErrNotFound - } - return actr, nil -} - -// GetActorStateAt returns the root state of an actor at a given point in the chain (specified by tipset key) -func (chn *ChainStateReadWriter) GetActorStateAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address, out interface{}) error { - act, err := chn.GetActorAt(ctx, tipKey, addr) - if err != nil { - return err - } - - blk, err := chn.bstore.Get(act.Head.Cid) - if err != nil { - return err - } - - return encoding.Decode(blk.RawData(), out) -} - -// ResolveAddressAt resolves ID address for actor -func (chn *ChainStateReadWriter) ResolveAddressAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address) (address.Address, error) { - st, err := chn.readWriter.GetTipSetState(ctx, tipKey) - if err != nil { - return address.Undef, errors.Wrap(err, "failed to load latest state") - } - - init, found, err := st.GetActor(ctx, builtin.InitActorAddr) - if err != nil { - return address.Undef, err - } - if !found { - return address.Undef, errors.Wrapf(err, "no actor at address %s", addr) - } - - blk, err := chn.bstore.Get(init.Head.Cid) - if err != nil { - return address.Undef, err - } - - var state initactor.State - err = encoding.Decode(blk.RawData(), &state) - if err != nil { - return address.Undef, err - } - - return state.ResolveAddress(&actorStore{ctx, chn.ReadOnlyIpldStore}, addr) -} - -// LsActors returns a channel with actors from the latest state on the chain -func (chn *ChainStateReadWriter) LsActors(ctx context.Context) (<-chan vmstate.GetAllActorsResult, error) { - st, err := chn.readWriter.GetTipSetState(ctx, chn.readWriter.GetHead()) - if err != nil { - return nil, err - } - return st.GetAllActors(ctx), nil -} - -// GetActorSignature returns the signature of the given actor's given method. -// The function signature is typically used to enable a caller to decode the -// output of an actor method call (message). -func (chn *ChainStateReadWriter) GetActorSignature(ctx context.Context, actorAddr address.Address, method abi.MethodNum) (vm.ActorMethodSignature, error) { - if method == builtin.MethodSend { - return nil, ErrNoMethod - } - - actor, err := chn.GetActor(ctx, actorAddr) - if err != nil { - return nil, errors.Wrap(err, "failed to get actor") - } else if actor.Empty() { - return nil, ErrNoActorImpl - } - - // Dragons: this is broken, we need to ask the VM for the impl, it might need to apply migrations based on epoch - executable, err := chn.actors.GetActorImpl(actor.Code.Cid) - if err != nil { - return nil, errors.Wrap(err, "failed to load actor code") - } - - signature, err := executable.Signature(method) - if err != nil { - return nil, fmt.Errorf("missing export: %d", method) - } - - return signature, nil -} - -// SetHead sets `key` as the new head of this chain iff it exists in the nodes chain store. -func (chn *ChainStateReadWriter) SetHead(ctx context.Context, key block.TipSetKey) error { - headTs, err := chn.readWriter.GetTipSet(key) - if err != nil { - return err - } - return chn.readWriter.SetHead(ctx, headTs) -} - -// ReadOnlyStateStore returns a read-only state store. -func (chn *ChainStateReadWriter) ReadOnlyStateStore() cborutil.ReadOnlyIpldStore { - return chn.readWriter.ReadOnlyStateStore() -} - -// ChainExport exports the chain from `head` up to and including the genesis block to `out` -func (chn *ChainStateReadWriter) ChainExport(ctx context.Context, head block.TipSetKey, out io.Writer) error { - headTS, err := chn.GetTipSet(head) - if err != nil { - return err - } - logStore.Infof("starting CAR file export: %s", head.String()) - if err := chain.Export(ctx, headTS, chn.readWriter, chn.messageProvider, chn, out); err != nil { - return err - } - logStore.Infof("exported CAR file with head: %s", head.String()) - return nil -} - -// ChainImport imports a chain from `in`. -func (chn *ChainStateReadWriter) ChainImport(ctx context.Context, in io.Reader) (block.TipSetKey, error) { - logStore.Info("starting CAR file import") - headKey, err := chain.Import(ctx, newCarStore(chn.bstore), in) - if err != nil { - return block.UndefTipSet.Key(), err - } - logStore.Infof("imported CAR file with head: %s", headKey) - return headKey, nil -} - -// ChainStateTree returns the state tree as a slice of IPLD nodes at the passed stateroot cid `c`. -func (chn *ChainStateReadWriter) ChainStateTree(ctx context.Context, c cid.Cid) ([]format.Node, error) { - offl := offline.Exchange(chn.bstore) - blkserv := blockservice.New(chn.bstore, offl) - dserv := merkdag.NewDAGService(blkserv) - return dag.NewDAG(dserv).RecursiveGet(ctx, c) -} - -func (chn *ChainStateReadWriter) StateView(key block.TipSetKey) (*state.View, error) { - root, err := chn.readWriter.GetTipSetStateRoot(key) - if err != nil { - return nil, errors.Wrapf(err, "failed to get state root for %s", key.String()) - } - return state.NewView(chn, root), nil -} - -func (chn *ChainStateReadWriter) AccountStateView(key block.TipSetKey) (state.AccountStateView, error) { - return chn.StateView(key) -} - -func (chn *ChainStateReadWriter) FaultStateView(key block.TipSetKey) (slashing.FaultStateView, error) { - return chn.StateView(key) -} diff --git a/internal/app/go-filecoin/plumbing/cst/chain_sync.go b/internal/app/go-filecoin/plumbing/cst/chain_sync.go deleted file mode 100644 index a7019b7a75..0000000000 --- a/internal/app/go-filecoin/plumbing/cst/chain_sync.go +++ /dev/null @@ -1,38 +0,0 @@ -package cst - -import ( - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status" -) - -type chainSync interface { - BlockProposer() chainsync.BlockProposer - Status() status.Status -} - -// ChainSyncProvider provides access to chain sync operations and their status. -type ChainSyncProvider struct { - sync chainSync -} - -// NewChainSyncProvider returns a new ChainSyncProvider. -func NewChainSyncProvider(chainSyncer chainSync) *ChainSyncProvider { - return &ChainSyncProvider{ - sync: chainSyncer, - } -} - -// Status returns the chains current status, this includes whether or not the syncer is currently -// running, the chain being synced, and the time it started processing said chain. -func (chs *ChainSyncProvider) Status() status.Status { - return chs.sync.Status() -} - -// HandleNewTipSet extends the Syncer's chain store with the given tipset if they -// represent a valid extension. It limits the length of new chains it will -// attempt to validate and caches invalid blocks it has encountered to -// help prevent DOS. -func (chs *ChainSyncProvider) HandleNewTipSet(ci *block.ChainInfo) error { - return chs.sync.BlockProposer().SendOwnBlock(ci) -} diff --git a/internal/app/go-filecoin/plumbing/dag/dag.go b/internal/app/go-filecoin/plumbing/dag/dag.go deleted file mode 100644 index 9cbd45276e..0000000000 --- a/internal/app/go-filecoin/plumbing/dag/dag.go +++ /dev/null @@ -1,156 +0,0 @@ -package dag - -import ( - "context" - "fmt" - "io" - - "github.com/ipfs/go-cid" - chunk "github.com/ipfs/go-ipfs-chunker" - format "github.com/ipfs/go-ipld-format" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ipfs/go-merkledag" - dag "github.com/ipfs/go-merkledag" - "github.com/ipfs/go-path" - "github.com/ipfs/go-path/resolver" - "github.com/ipfs/go-unixfs" - imp "github.com/ipfs/go-unixfs/importer" - uio "github.com/ipfs/go-unixfs/io" - "github.com/pkg/errors" -) - -// DAG is a service for accessing the merkledag -type DAG struct { - dserv format.DAGService // Provides access to state tree. -} - -// NewDAG creates a DAG with a given DAGService -func NewDAG(dserv ipld.DAGService) *DAG { - return &DAG{ - dserv: dserv, - } -} - -// GetNode returns the associated DAG node for the passed in CID. -func (dag *DAG) GetNode(ctx context.Context, ref string) (interface{}, error) { - parsedRef, err := path.ParsePath(ref) - if err != nil { - return nil, err - } - - resolver := resolver.NewBasicResolver(dag.dserv) - - objc, rem, err := resolver.ResolveToLastNode(ctx, parsedRef) - if err != nil { - return nil, err - } - - obj, err := dag.dserv.Get(ctx, objc) - if err != nil { - return nil, err - } - - var out interface{} = obj - if len(rem) > 0 { - final, _, err := obj.Resolve(rem) - if err != nil { - return nil, err - } - out = final - } - - return out, nil -} - -// GetFileSize returns the file size for a given Cid -func (dag *DAG) GetFileSize(ctx context.Context, c cid.Cid) (uint64, error) { - fnode, err := dag.dserv.Get(ctx, c) - if err != nil { - return 0, err - } - switch n := fnode.(type) { - case *merkledag.ProtoNode: - return unixfs.DataSize(n.Data()) - case *merkledag.RawNode: - return n.Size() - default: - return 0, fmt.Errorf("unrecognized node type: %T", fnode) - } -} - -// Cat returns an iostream with a piece of data stored on the merkeldag with -// the given cid. -// -// TODO: this goes back to 'how is data stored and referenced' -// For now, lets just do things the ipfs way. -// https://github.com/filecoin-project/specs/issues/136 -func (dag *DAG) Cat(ctx context.Context, c cid.Cid) (uio.DagReader, error) { - data, err := dag.dserv.Get(ctx, c) - if err != nil { - return nil, err - } - return uio.NewDagReader(ctx, data, dag.dserv) -} - -// ImportData adds data from an io stream to the merkledag and returns the Cid -// of the given data -func (dag *DAG) ImportData(ctx context.Context, data io.Reader) (ipld.Node, error) { - bufds := ipld.NewBufferedDAG(ctx, dag.dserv) - - spl := chunk.DefaultSplitter(data) - - nd, err := imp.BuildDagFromReader(bufds, spl) - if err != nil { - return nil, err - } - return nd, bufds.Commit() -} - -// RecursiveGet will walk the dag in order (depth first) starting at the given root `c`. -func (dag *DAG) RecursiveGet(ctx context.Context, c cid.Cid) ([]ipld.Node, error) { - collector := dagCollector{ - dagserv: dag.dserv, - } - return collector.collectState(ctx, c) -} - -// -// Helpers for recursive dag get. -// - -type dagCollector struct { - dagserv format.DAGService - state []format.Node -} - -// collectState recursively walks the state tree starting with `stateRoot` and returns it as a slice of IPLD nodes. -// Calling this method does not have any side effects. -func (dc *dagCollector) collectState(ctx context.Context, stateRoot cid.Cid) ([]format.Node, error) { - dagNd, err := dc.dagserv.Get(ctx, stateRoot) - if err != nil { - return nil, errors.Wrapf(err, "failed to load stateroot from dagservice %s", stateRoot) - } - dc.addState(dagNd) - seen := cid.NewSet() - for _, l := range dagNd.Links() { - if err := dag.Walk(ctx, dc.getLinks, l.Cid, seen.Visit); err != nil { - return nil, errors.Wrapf(err, "dag service failed walking stateroot %s", stateRoot) - } - } - return dc.state, nil - -} - -func (dc *dagCollector) getLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { - nd, err := dc.dagserv.Get(ctx, c) - if err != nil { - return nil, errors.Wrapf(err, "failed to load link from dagservice %s", c) - } - dc.addState(nd) - return nd.Links(), nil - -} - -func (dc *dagCollector) addState(nd format.Node) { - dc.state = append(dc.state, nd) -} diff --git a/internal/app/go-filecoin/plumbing/msg/previewer.go b/internal/app/go-filecoin/plumbing/msg/previewer.go deleted file mode 100644 index e93d2c11df..0000000000 --- a/internal/app/go-filecoin/plumbing/msg/previewer.go +++ /dev/null @@ -1,47 +0,0 @@ -package msg - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// Abstracts over a store of blockchain state. -type previewerChainReader interface { - GetHead() block.TipSetKey - GetTipSetState(context.Context, block.TipSetKey) (state.Tree, error) - GetTipSet(block.TipSetKey) (block.TipSet, error) -} - -// Dragons: delete -type messagePreviewer interface { -} - -// Previewer calculates the amount of Gas needed for a command -type Previewer struct { - // To get the head tipset state root. - chainReader previewerChainReader - // To load the tree for the head tipset state root. - cst cbor.IpldStore - // For vm storage. - bs bstore.Blockstore - // To to preview messages - processor messagePreviewer -} - -// NewPreviewer constructs a Previewer. -func NewPreviewer(chainReader previewerChainReader, cst cbor.IpldStore, bs bstore.Blockstore, processor messagePreviewer) *Previewer { - return &Previewer{chainReader, cst, bs, processor} -} - -// Preview sends a read-only message to an actor. -func (p *Previewer) Preview(ctx context.Context, optFrom, to address.Address, method abi.MethodNum, params ...interface{}) (gas.Unit, error) { - panic("unimplemented") -} diff --git a/internal/app/go-filecoin/plumbing/msg/testing.go b/internal/app/go-filecoin/plumbing/msg/testing.go deleted file mode 100644 index 698b7b8412..0000000000 --- a/internal/app/go-filecoin/plumbing/msg/testing.go +++ /dev/null @@ -1,53 +0,0 @@ -package msg - -import ( - "context" - "testing" - - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/genesis" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" -) - -type commonDeps struct { - repo repo.Repo - wallet *wallet.Wallet - chainStore *chain.Store - messages *chain.MessageStore - blockstore bstore.Blockstore - cst cbor.IpldStore -} - -func requiredCommonDeps(t *testing.T, gif genesis.InitFunc) *commonDeps { // nolint: deadcode - r := repo.NewInMemoryRepo() - bs := bstore.NewBlockstore(r.Datastore()) - return requireCommonDepsWithGifAndBlockstore(t, gif, r, bs) -} - -// This version is useful if you are installing actors with consensus.AddActor and you -// need to set some actor state up ahead of time (actor state is ultimately found in the -// block store). -func requireCommonDepsWithGifAndBlockstore(t *testing.T, gif genesis.InitFunc, r repo.Repo, bs bstore.Blockstore) *commonDeps { - cst := cborutil.NewIpldStore(bs) - chainStore, err := chain.Init(context.Background(), r, bs, cst, gif) - require.NoError(t, err) - messageStore := chain.NewMessageStore(bs) - backend, err := wallet.NewDSBackend(r.WalletDatastore()) - require.NoError(t, err) - wallet := wallet.New(backend) - - return &commonDeps{ - repo: r, - wallet: wallet, - chainStore: chainStore, - messages: messageStore, - blockstore: bs, - cst: cst, - } -} diff --git a/internal/app/go-filecoin/plumbing/msg/waiter.go b/internal/app/go-filecoin/plumbing/msg/waiter.go deleted file mode 100644 index 677a1f2592..0000000000 --- a/internal/app/go-filecoin/plumbing/msg/waiter.go +++ /dev/null @@ -1,323 +0,0 @@ -package msg - -import ( - "context" - "fmt" - - "github.com/cskr/pubsub" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/ipfs/go-cid" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -var log = logging.Logger("messageimpl") - -var DefaultMessageWaitLookback uint64 = 2 // in most cases, this should be enough to avoid races. - -// Abstracts over a store of blockchain state. -type waiterChainReader interface { - GetHead() block.TipSetKey - GetTipSet(block.TipSetKey) (block.TipSet, error) - GetTipSetState(context.Context, block.TipSetKey) (state.Tree, error) - GetTipSetReceiptsRoot(block.TipSetKey) (cid.Cid, error) - HeadEvents() *pubsub.PubSub -} - -// Waiter waits for a message to appear on chain. -type Waiter struct { - chainReader waiterChainReader - messageProvider chain.MessageProvider - cst cbor.IpldStore - bs bstore.Blockstore -} - -// ChainMessage is an on-chain message with its block and receipt. -type ChainMessage struct { - Message *types.SignedMessage - Block *block.Block - Receipt *vm.MessageReceipt -} - -// WaitPredicate is a function that identifies a message and returns true when found. -type WaitPredicate func(msg *types.SignedMessage, msgCid cid.Cid) bool - -// NewWaiter returns a new Waiter. -func NewWaiter(chainStore waiterChainReader, messages chain.MessageProvider, bs bstore.Blockstore, cst cbor.IpldStore) *Waiter { - return &Waiter{ - chainReader: chainStore, - cst: cst, - bs: bs, - messageProvider: messages, - } -} - -// Find searches the blockchain history (but doesn't wait). -func (w *Waiter) Find(ctx context.Context, lookback uint64, pred WaitPredicate) (*ChainMessage, bool, error) { - headTipSet, err := w.chainReader.GetTipSet(w.chainReader.GetHead()) - if err != nil { - return nil, false, err - } - return w.findMessage(ctx, headTipSet, lookback, pred) -} - -// WaitPredicate invokes the callback when the passed predicate succeeds. -// See api description. -// -// Note: this method does too much -- the callback should just receive the tipset -// containing the message and the caller should pull the receipt out of the block -// if in fact that's what it wants to do, using something like receiptFromTipset. -// Something like receiptFromTipset is necessary because not every message in -// a block will have a receipt in the tipset: it might be a duplicate message. -// This method will always check for the message in the current head tipset. -// A lookback parameter > 1 will cause this method to check for the message in -// up to that many previous tipsets on the chain of the current head. -func (w *Waiter) WaitPredicate(ctx context.Context, lookback uint64, pred WaitPredicate, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - ch := w.chainReader.HeadEvents().Sub(chain.NewHeadTopic) - defer func() { - w.chainReader.HeadEvents().Unsub(ch, chain.NewHeadTopic) - }() - - head, err := w.chainReader.GetTipSet(w.chainReader.GetHead()) - if err != nil { - return err - } - - chainMsg, found, err := w.findMessage(ctx, head, lookback, pred) - if err != nil { - return err - } - if found { - return cb(chainMsg.Block, chainMsg.Message, chainMsg.Receipt) - } - - chainMsg, found, err = w.waitForMessage(ctx, ch, head, pred) - if err != nil { - return err - } - if found { - return cb(chainMsg.Block, chainMsg.Message, chainMsg.Receipt) - } - return err -} - -// Wait uses WaitPredicate to invoke the callback when a message with the given cid appears on chain. -func (w *Waiter) Wait(ctx context.Context, msgCid cid.Cid, lookback uint64, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - log.Infof("Calling Waiter.Wait CID: %s", msgCid.String()) - - pred := func(msg *types.SignedMessage, c cid.Cid) bool { - return c.Equals(msgCid) - } - - return w.WaitPredicate(ctx, lookback, pred, cb) -} - -// findMessage looks for a matching in the chain and returns the message, -// block and receipt, when it is found. Returns the found message/block or nil -// if now block with the given CID exists in the chain. -// The lookback parameter is the number of tipsets in the past this method will check before giving up. -func (w *Waiter) findMessage(ctx context.Context, head block.TipSet, lookback uint64, pred WaitPredicate) (*ChainMessage, bool, error) { - var err error - for iterator := chain.IterAncestors(ctx, w.chainReader, head); err == nil && !iterator.Complete(); err = iterator.Next() { - msg, found, err := w.receiptForTipset(ctx, iterator.Value(), pred) - if err != nil { - log.Errorf("Waiter.Wait: %s", err) - return nil, false, err - } - if found { - return msg, true, nil - } - - lookback-- - if lookback <= 0 { - break - } - } - return nil, false, err -} - -// waitForMessage looks for a matching message in a channel of tipsets and returns -// the message, block and receipt, when it is found. Reads until the channel is -// closed or the context done. Returns the found message/block (or nil if the -// channel closed without finding it), whether it was found, or an error. -func (w *Waiter) waitForMessage(ctx context.Context, ch <-chan interface{}, head block.TipSet, pred WaitPredicate) (*ChainMessage, bool, error) { - lastHead := head - for { - select { - case <-ctx.Done(): - return nil, false, ctx.Err() - case raw, more := <-ch: - if !more { - return nil, false, nil - } - switch raw := raw.(type) { - case error: - e := raw.(error) - log.Errorf("Waiter.Wait: %s", e) - return nil, false, e - case block.TipSet: - msg, found, err := w.receiptForChain(ctx, raw, lastHead, pred) - if err != nil { - return nil, false, err - } - if found { - return msg, found, nil - } - lastHead = raw - // otherwise continue waiting - default: - return nil, false, fmt.Errorf("unexpected type in channel: %T", raw) - } - } - } -} - -func (w *Waiter) receiptForChain(ctx context.Context, ts block.TipSet, prevTs block.TipSet, pred WaitPredicate) (*ChainMessage, bool, error) { - // New tipsets typically have the previous head as a parent, so handle this cheap case - parents, err := ts.Parents() - if err != nil { - return nil, false, err - } - - if parents.Equals(prevTs.Key()) { - return w.receiptForTipset(ctx, ts, pred) - } - - // check all tipsets up to the last common ancestor of the last tipset we have seen - _, newChain, err := chain.CollectTipsToCommonAncestor(ctx, w.chainReader, prevTs, ts) - if err != nil { - return nil, false, err - } - - for _, ts := range newChain { - msg, found, err := w.receiptForTipset(ctx, ts, pred) - if err != nil { - return nil, false, err - } - if found { - return msg, found, nil - } - } - return nil, false, nil -} - -func (w *Waiter) receiptForTipset(ctx context.Context, ts block.TipSet, pred WaitPredicate) (*ChainMessage, bool, error) { - // The targetMsg might be the CID of either a signed SECP message or an unsigned - // BLS message. - // This accumulates the CIDs of the messages as they appear on chain (signed or unsigned) - // but then unwraps them all, obtaining the CID of the unwrapped SECP message body if - // applicable. This unwrapped message CID is then used to find the target message in the - // unwrapped de-duplicated tipset messages, and thence the corresponding receipt by index. - tsMessages := make([][]*types.UnsignedMessage, ts.Len()) - for i := 0; i < ts.Len(); i++ { - blk := ts.At(i) - secpMsgs, blsMsgs, err := w.messageProvider.LoadMessages(ctx, blk.Messages.Cid) - if err != nil { - return nil, false, err - } - - originalCids := make([]cid.Cid, len(blsMsgs)+len(secpMsgs)) - unwrappedMsgs := make([]*types.UnsignedMessage, len(blsMsgs)+len(secpMsgs)) - wrappedMsgs := make([]*types.SignedMessage, len(blsMsgs)+len(secpMsgs)) - for j, msg := range blsMsgs { - c, err := msg.Cid() - if err != nil { - return nil, false, err - } - originalCids[j] = c - unwrappedMsgs[j] = msg - wrappedMsgs[j] = &types.SignedMessage{Message: *msg} - } - for j, msg := range secpMsgs { - c, err := msg.Cid() - if err != nil { - return nil, false, err - } - originalCids[len(blsMsgs)+j] = c - unwrappedMsgs[len(blsMsgs)+j] = &msg.Message // Unwrap - wrappedMsgs[len(blsMsgs)+j] = msg - } - tsMessages[i] = unwrappedMsgs - - for k, wrapped := range wrappedMsgs { - if pred(wrapped, originalCids[k]) { - // Take CID of the unwrapped message, which might be different from the original. - unwrappedTarget, err := wrapped.Message.Cid() - if err != nil { - return nil, false, err - } - - recpt, err := w.receiptByIndex(ctx, ts.Key(), unwrappedTarget, tsMessages) - if err != nil { - return nil, false, errors.Wrap(err, "error retrieving receipt from tipset") - } - return &ChainMessage{wrappedMsgs[k], blk, recpt}, true, nil - } - } - } - return nil, false, nil -} - -func (w *Waiter) receiptByIndex(ctx context.Context, tsKey block.TipSetKey, targetCid cid.Cid, messages [][]*types.UnsignedMessage) (*vm.MessageReceipt, error) { - receiptCid, err := w.chainReader.GetTipSetReceiptsRoot(tsKey) - if err != nil { - return nil, err - } - - receipts, err := w.messageProvider.LoadReceipts(ctx, receiptCid) - if err != nil { - return nil, err - } - - deduped, err := deduppedMessages(messages) - if err != nil { - return nil, err - } - - receiptIndex := 0 - for _, blkMessages := range deduped { - for _, msg := range blkMessages { - msgCid, err := msg.Cid() - if err != nil { - return nil, err - } - - if msgCid.Equals(targetCid) { - if receiptIndex >= len(receipts) { - return nil, errors.Errorf("could not find message receipt at index %d", receiptIndex) - } - return &receipts[receiptIndex], nil - } - receiptIndex++ - } - } - return nil, errors.Errorf("could not find message cid %s in dedupped messages", targetCid.String()) -} - -func deduppedMessages(tsMessages [][]*types.UnsignedMessage) ([][]*types.UnsignedMessage, error) { - allMessages := make([][]*types.UnsignedMessage, len(tsMessages)) - msgFilter := make(map[cid.Cid]struct{}) - - for i, blkMessages := range tsMessages { - for _, msg := range blkMessages { - mCid, err := msg.Cid() - if err != nil { - return nil, err - } - - _, found := msgFilter[mCid] - if !found { - allMessages[i] = append(allMessages[i], msg) - msgFilter[mCid] = struct{}{} - } - } - } - return allMessages, nil -} diff --git a/internal/app/go-filecoin/plumbing/msg/waiter_test.go b/internal/app/go-filecoin/plumbing/msg/waiter_test.go deleted file mode 100644 index b0848e4897..0000000000 --- a/internal/app/go-filecoin/plumbing/msg/waiter_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package msg - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var mockSigner, _ = types.NewMockSignersAndKeyInfo(10) - -var newSignedMessage = types.NewSignedMessageForTestGetter(mockSigner) - -func testWaitHelp(wg *sync.WaitGroup, t *testing.T, waiter *Waiter, expectMsg *types.SignedMessage, expectError bool, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) { - expectCid, err := expectMsg.Cid() - if cb == nil { - cb = func(b *block.Block, msg *types.SignedMessage, - rcp *vm.MessageReceipt) error { - assert.True(t, types.SmsgCidsEqual(expectMsg, msg)) - if wg != nil { - wg.Done() - } - - return nil - } - } - assert.NoError(t, err) - - err = waiter.Wait(context.Background(), expectCid, DefaultMessageWaitLookback, cb) - assert.Equal(t, expectError, err != nil) -} - -type smsgs []*types.SignedMessage -type smsgsSet [][]*types.SignedMessage - -func setupTest(t *testing.T) (cbor.IpldStore, *chain.Store, *chain.MessageStore, *Waiter) { - d := requiredCommonDeps(t, gengen.DefaultGenesis) - return d.cst, d.chainStore, d.messages, NewWaiter(d.chainStore, d.messages, d.blockstore, d.cst) -} - -func TestWait(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - cst, chainStore, msgStore, waiter := setupTest(t) - - testWaitExisting(ctx, t, cst, chainStore, msgStore, waiter) - testWaitNew(ctx, t, cst, chainStore, msgStore, waiter) -} - -func testWaitExisting(ctx context.Context, t *testing.T, cst cbor.IpldStore, chainStore *chain.Store, msgStore *chain.MessageStore, waiter *Waiter) { - m1, m2 := newSignedMessage(), newSignedMessage() - head := chainStore.GetHead() - headTipSet, err := chainStore.GetTipSet(head) - require.NoError(t, err) - chainWithMsgs := newChainWithMessages(cst, msgStore, headTipSet, smsgsSet{smsgs{m1, m2}}) - ts := chainWithMsgs[len(chainWithMsgs)-1] - require.Equal(t, 1, ts.Len()) - require.NoError(t, chainStore.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ - TipSet: ts, - TipSetStateRoot: ts.ToSlice()[0].StateRoot.Cid, - TipSetReceipts: ts.ToSlice()[0].MessageReceipts.Cid, - })) - require.NoError(t, chainStore.SetHead(ctx, ts)) - - testWaitHelp(nil, t, waiter, m1, false, nil) - testWaitHelp(nil, t, waiter, m2, false, nil) -} - -func testWaitNew(ctx context.Context, t *testing.T, cst cbor.IpldStore, chainStore *chain.Store, msgStore *chain.MessageStore, waiter *Waiter) { - var wg sync.WaitGroup - - _, _ = newSignedMessage(), newSignedMessage() // flush out so we get distinct messages from testWaitExisting - m3, m4 := newSignedMessage(), newSignedMessage() - head := chainStore.GetHead() - headTipSet, err := chainStore.GetTipSet(head) - require.NoError(t, err) - chainWithMsgs := newChainWithMessages(cst, msgStore, headTipSet, smsgsSet{smsgs{m3, m4}}) - - wg.Add(2) - go testWaitHelp(&wg, t, waiter, m3, false, nil) - go testWaitHelp(&wg, t, waiter, m4, false, nil) - time.Sleep(10 * time.Millisecond) - - ts := chainWithMsgs[len(chainWithMsgs)-1] - require.Equal(t, 1, ts.Len()) - require.NoError(t, chainStore.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ - TipSet: ts, - TipSetStateRoot: ts.ToSlice()[0].StateRoot.Cid, - TipSetReceipts: ts.ToSlice()[0].MessageReceipts.Cid, - })) - require.NoError(t, chainStore.SetHead(ctx, ts)) - - wg.Wait() -} - -func TestWaitError(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - cst, chainStore, msgStore, waiter := setupTest(t) - - testWaitError(ctx, t, cst, chainStore, msgStore, waiter) -} - -func testWaitError(ctx context.Context, t *testing.T, cst cbor.IpldStore, chainStore *chain.Store, msgStore *chain.MessageStore, waiter *Waiter) { - m1, m2, m3, m4 := newSignedMessage(), newSignedMessage(), newSignedMessage(), newSignedMessage() - head := chainStore.GetHead() - headTipSet, err := chainStore.GetTipSet(head) - require.NoError(t, err) - chain := newChainWithMessages(cst, msgStore, headTipSet, smsgsSet{smsgs{m1, m2}}, smsgsSet{smsgs{m3, m4}}) - // set the head without putting the ancestor block in the chainStore. - err = chainStore.SetHead(ctx, chain[len(chain)-1]) - assert.Nil(t, err) - - testWaitHelp(nil, t, waiter, m2, true, nil) -} - -func TestWaitRespectsContextCancel(t *testing.T) { - tf.UnitTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - _, _, _, waiter := setupTest(t) - - failIfCalledCb := func(b *block.Block, msg *types.SignedMessage, - rcp *vm.MessageReceipt) error { - assert.Fail(t, "Should not be called -- message doesnt exist") - return nil - } - - var err error - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - err = waiter.Wait(ctx, types.CidFromString(t, "somecid"), DefaultMessageWaitLookback, failIfCalledCb) - }() - - cancel() - - select { - case <-doneCh: - assert.Error(t, err) - case <-time.After(2 * time.Second): - assert.Fail(t, "Wait should have returned when context was canceled") - } -} - -// NewChainWithMessages creates a chain of tipsets containing the given messages -// and stores them in the given store. Note the msg arguments are slices of -// slices of messages -- each slice of slices goes into a successive tipset, -// and each slice within this slice goes into a block of that tipset -func newChainWithMessages(store cbor.IpldStore, msgStore *chain.MessageStore, root block.TipSet, msgSets ...[][]*types.SignedMessage) []block.TipSet { - var tipSets []block.TipSet - parents := root - height := abi.ChainEpoch(0) - stateRootCidGetter := types.NewCidForTestGetter() - - // only add root to the chain if it is not the zero-valued-tipset - if parents.Defined() { - for i := 0; i < parents.Len(); i++ { - mustPut(store, parents.At(i)) - } - tipSets = append(tipSets, parents) - height, _ = parents.Height() - height++ - } - emptyTxMeta, err := msgStore.StoreMessages(context.Background(), []*types.SignedMessage{}, []*types.UnsignedMessage{}) - if err != nil { - panic(err) - } - emptyReceiptsCid, err := msgStore.StoreReceipts(context.Background(), []vm.MessageReceipt{}) - if err != nil { - panic(err) - } - - for _, tsMsgs := range msgSets { - var blocks []*block.Block - receipts := []vm.MessageReceipt{} - // If a message set does not contain a slice of messages then - // add a tipset with no messages and a single block to the chain - if len(tsMsgs) == 0 { - child := &block.Block{ - Height: height, - Parents: parents.Key(), - Messages: e.NewCid(emptyTxMeta), - MessageReceipts: e.NewCid(emptyReceiptsCid), - } - mustPut(store, child) - blocks = append(blocks, child) - } - for _, msgs := range tsMsgs { - for _, msg := range msgs { - c, err := msg.Cid() - if err != nil { - panic(err) - } - receipts = append(receipts, vm.MessageReceipt{ExitCode: 0, ReturnValue: c.Bytes(), GasUsed: gas.Zero}) - } - txMeta, err := msgStore.StoreMessages(context.Background(), msgs, []*types.UnsignedMessage{}) - if err != nil { - panic(err) - } - - child := &block.Block{ - Messages: e.NewCid(txMeta), - Parents: parents.Key(), - Height: height, - StateRoot: e.NewCid(stateRootCidGetter()), // Differentiate all blocks - } - blocks = append(blocks, child) - } - receiptCid, err := msgStore.StoreReceipts(context.TODO(), receipts) - if err != nil { - panic(err) - } - - for _, blk := range blocks { - blk.MessageReceipts = e.NewCid(receiptCid) - mustPut(store, blk) - } - - ts, err := block.NewTipSet(blocks...) - if err != nil { - panic(err) - } - tipSets = append(tipSets, ts) - parents = ts - height++ - } - - return tipSets -} - -// mustPut stores the thingy in the store or panics if it cannot. -func mustPut(store cbor.IpldStore, thingy interface{}) cid.Cid { - cid, err := store.Put(context.Background(), thingy) - if err != nil { - panic(err) - } - return cid -} diff --git a/internal/app/go-filecoin/porcelain/api.go b/internal/app/go-filecoin/porcelain/api.go deleted file mode 100644 index a5965de54d..0000000000 --- a/internal/app/go-filecoin/porcelain/api.go +++ /dev/null @@ -1,148 +0,0 @@ -package porcelain - -import ( - "context" - "io" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// API is the porcelain implementation, a set of convenience calls written on the -// plumbing api, to be used to build user facing features and protocols. -// -// The porcelain.API provides porcelain calls **as well as the plumbing calls**. -// This is because most consumers depend on a combination of porcelain and plumbing -// calls. Flattening both apis into a single implementation enables consumers to take -// a single dependency and not have to know which api a call comes from. The mechanism -// is embedding: the plumbing implementation is embedded in the porcelain implementation, making -// all the embedded type (plumbing) calls available on the embedder type (porcelain). -// Providing a single implementation on which to depend also enables consumers to choose -// at what level to mock out their dependencies: low (plumbing) or high (porcelain). -// We ensure that porcelain calls only depend on the narrow subset of the plumbing api -// on which they depend by implementing them in free functions that take their specific -// subset of the plumbing.api. The porcelain.API delegates porcelain calls to these -// free functions. -// -// If you are implementing a user facing feature or a protocol this is probably the implementation -// you should depend on. Define the subset of it that you use in an interface in your package -// take this implementation as a dependency. -type API struct { - *plumbing.API -} - -// New returns a new porcelain.API. -func New(plumbing *plumbing.API) *API { - return &API{plumbing} -} - -// ChainHead returns the current head tipset -func (a *API) ChainHead() (block.TipSet, error) { - return ChainHead(a) -} - -// ChainGetFullBlock returns the full block given the header cid -func (a *API) ChainGetFullBlock(ctx context.Context, id cid.Cid) (*block.FullBlock, error) { - return GetFullBlock(ctx, a, id) -} - -// MessagePoolWait waits for the message pool to have at least messageCount unmined messages. -// It's useful for integration testing. -func (a *API) MessagePoolWait(ctx context.Context, messageCount uint) ([]*types.SignedMessage, error) { - return MessagePoolWait(ctx, a, messageCount) -} - -// MinerCreate creates a miner -func (a *API) MinerCreate( - ctx context.Context, - accountAddr address.Address, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - sealProofType abi.RegisteredProof, - pid peer.ID, - collateral types.AttoFIL, -) (_ address.Address, err error) { - return MinerCreate(ctx, a, accountAddr, gasPrice, gasLimit, sealProofType, pid, collateral) -} - -// MinerPreviewCreate previews the Gas cost of creating a miner -func (a *API) MinerPreviewCreate( - ctx context.Context, - fromAddr address.Address, - sectorSize abi.SectorSize, - pid peer.ID, -) (usedGas gas.Unit, err error) { - return MinerPreviewCreate(ctx, a, fromAddr, sectorSize, pid) -} - -// MinerGetStatus queries for status of a miner. -func (a *API) MinerGetStatus(ctx context.Context, minerAddr address.Address, baseKey block.TipSetKey) (MinerStatus, error) { - return MinerGetStatus(ctx, a, minerAddr, baseKey) -} - -// ProtocolParameters fetches the current protocol configuration parameters. -func (a *API) ProtocolParameters(ctx context.Context) (*ProtocolParams, error) { - return ProtocolParameters(ctx, a) -} - -// WalletBalance returns the current balance of the given wallet address. -func (a *API) WalletBalance(ctx context.Context, address address.Address) (abi.TokenAmount, error) { - return WalletBalance(ctx, a, address) -} - -// WalletDefaultAddress returns a default wallet address from the config. -// If none is set it picks the first address in the wallet and sets it as the default in the config. -func (a *API) WalletDefaultAddress() (address.Address, error) { - return WalletDefaultAddress(a) -} - -// SealPieceIntoNewSector writes the provided piece into a new sector -func (a *API) SealPieceIntoNewSector(ctx context.Context, dealID abi.DealID, dealStart, dealEnd abi.ChainEpoch, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) error { - return SealPieceIntoNewSector(ctx, a, dealID, dealStart, dealEnd, pieceSize, pieceReader) -} - -// PingMinerWithTimeout pings a storage or retrieval miner, waiting the given -// timeout and returning desciptive errors. -func (a *API) PingMinerWithTimeout( - ctx context.Context, - minerPID peer.ID, - timeout time.Duration, -) error { - return PingMinerWithTimeout(ctx, minerPID, timeout, a) -} - -// MinerSetWorkerAddress sets the miner worker address to the provided address -func (a *API) MinerSetWorkerAddress(ctx context.Context, toAddr address.Address, gasPrice types.AttoFIL, gasLimit gas.Unit) (cid.Cid, error) { - return MinerSetWorkerAddress(ctx, a, toAddr, gasPrice, gasLimit) -} - -// MessageWaitDone blocks until the message is on chain -func (a *API) MessageWaitDone(ctx context.Context, msgCid cid.Cid) (*vm.MessageReceipt, error) { - return MessageWaitDone(ctx, a, msgCid) -} - -func (a *API) PowerStateView(baseKey block.TipSetKey) (consensus.PowerStateView, error) { - return a.StateView(baseKey) -} - -func (a *API) MinerStateView(baseKey block.TipSetKey) (MinerStateView, error) { - return a.StateView(baseKey) -} - -func (a *API) FaultsStateView(baseKey block.TipSetKey) (consensus.FaultStateView, error) { - return a.StateView(baseKey) -} - -func (a *API) ProtocolStateView(baseKey block.TipSetKey) (ProtocolStateView, error) { - return a.StateView(baseKey) -} diff --git a/internal/app/go-filecoin/porcelain/chain.go b/internal/app/go-filecoin/porcelain/chain.go deleted file mode 100644 index e7b95e53b3..0000000000 --- a/internal/app/go-filecoin/porcelain/chain.go +++ /dev/null @@ -1,43 +0,0 @@ -package porcelain - -import ( - "context" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -type chainHeadPlumbing interface { - ChainHeadKey() block.TipSetKey - ChainTipSet(key block.TipSetKey) (block.TipSet, error) -} - -// ChainHead gets the current head tipset from plumbing. -func ChainHead(plumbing chainHeadPlumbing) (block.TipSet, error) { - return plumbing.ChainTipSet(plumbing.ChainHeadKey()) -} - -type fullBlockPlumbing interface { - ChainGetBlock(context.Context, cid.Cid) (*block.Block, error) - ChainGetMessages(context.Context, cid.Cid) ([]*types.UnsignedMessage, []*types.SignedMessage, error) -} - -// GetFullBlock returns a full block: header, messages, receipts. -func GetFullBlock(ctx context.Context, plumbing fullBlockPlumbing, id cid.Cid) (*block.FullBlock, error) { - var out block.FullBlock - var err error - - out.Header, err = plumbing.ChainGetBlock(ctx, id) - if err != nil { - return nil, err - } - - out.BLSMessages, out.SECPMessages, err = plumbing.ChainGetMessages(ctx, out.Header.Messages.Cid) - if err != nil { - return nil, err - } - - return &out, nil -} diff --git a/internal/app/go-filecoin/porcelain/client.go b/internal/app/go-filecoin/porcelain/client.go deleted file mode 100644 index 9b56e4d52e..0000000000 --- a/internal/app/go-filecoin/porcelain/client.go +++ /dev/null @@ -1,18 +0,0 @@ -package porcelain - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// Ask is a result of querying for an ask, it may contain an error -type Ask struct { - Miner address.Address - Price types.AttoFIL - Expiry abi.ChainEpoch - ID uint64 - - Error error -} diff --git a/internal/app/go-filecoin/porcelain/message.go b/internal/app/go-filecoin/porcelain/message.go deleted file mode 100644 index c5206bf657..0000000000 --- a/internal/app/go-filecoin/porcelain/message.go +++ /dev/null @@ -1,34 +0,0 @@ -package porcelain - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/util/moresync" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -type waitPlumbing interface { - MessageWait(context.Context, cid.Cid, uint64, func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error -} - -// MessageWaitDone blocks until the given message cid appears on chain -func MessageWaitDone(ctx context.Context, plumbing waitPlumbing, msgCid cid.Cid) (*vm.MessageReceipt, error) { - l := moresync.NewLatch(1) - var ret *vm.MessageReceipt - err := plumbing.MessageWait(ctx, msgCid, msg.DefaultMessageWaitLookback, func(_ *block.Block, _ *types.SignedMessage, rcpt *vm.MessageReceipt) error { - ret = rcpt - l.Done() - return nil - }) - if err != nil { - return nil, err - } - l.Wait() - return ret, nil -} diff --git a/internal/app/go-filecoin/porcelain/miner.go b/internal/app/go-filecoin/porcelain/miner.go deleted file mode 100644 index c918605a93..0000000000 --- a/internal/app/go-filecoin/porcelain/miner.go +++ /dev/null @@ -1,290 +0,0 @@ -package porcelain - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// mcAPI is the subset of the plumbing.API that MinerCreate uses. -type mcAPI interface { - ConfigGet(dottedPath string) (interface{}, error) - ConfigSet(dottedPath string, paramJSON string) error - MessageSend(ctx context.Context, from, to address.Address, value types.AttoFIL, gasPrice types.AttoFIL, gasLimit gas.Unit, method abi.MethodNum, params interface{}) (cid.Cid, chan error, error) - MessageWait(ctx context.Context, msgCid cid.Cid, lookback uint64, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error - WalletDefaultAddress() (address.Address, error) -} - -type MinerStateView interface { - MinerControlAddresses(ctx context.Context, maddr address.Address) (owner, worker address.Address, err error) - MinerPeerID(ctx context.Context, maddr address.Address) (peer.ID, error) - MinerSectorConfiguration(ctx context.Context, maddr address.Address) (*state.MinerSectorConfiguration, error) - MinerSectorCount(ctx context.Context, maddr address.Address) (uint64, error) - MinerDeadlines(ctx context.Context, maddr address.Address) (*miner.Deadlines, error) - PowerNetworkTotal(ctx context.Context) (*state.NetworkPower, error) - MinerClaimedPower(ctx context.Context, miner address.Address) (raw, qa abi.StoragePower, err error) - MinerInfo(ctx context.Context, maddr address.Address) (miner.MinerInfo, error) -} - -// MinerCreate creates a new miner actor for the given account and returns its address. -// It will wait for the the actor to appear on-chain and add set the address to mining.minerAddress in the config. -// TODO: add ability to pass in a KeyInfo to store for signing blocks. -// See https://github.com/filecoin-project/go-filecoin/issues/1843 -func MinerCreate( - ctx context.Context, - plumbing mcAPI, - minerOwnerAddr address.Address, - gasPrice types.AttoFIL, - gasLimit gas.Unit, - sealProofType abi.RegisteredProof, - pid peer.ID, - collateral types.AttoFIL, -) (_ address.Address, err error) { - if minerOwnerAddr == (address.Address{}) { - minerOwnerAddr, err = plumbing.WalletDefaultAddress() - if err != nil { - return address.Undef, err - } - } - - addr, err := plumbing.ConfigGet("mining.minerAddress") - if err != nil { - return address.Undef, err - } - if addr != address.Undef { - return address.Undef, fmt.Errorf("can only have one miner per node") - } - - params := power.CreateMinerParams{ - Worker: minerOwnerAddr, - Owner: minerOwnerAddr, - Peer: pid, - SealProofType: sealProofType, - } - - smsgCid, _, err := plumbing.MessageSend( - ctx, - minerOwnerAddr, - builtin.StoragePowerActorAddr, - collateral, - gasPrice, - gasLimit, - builtin.MethodsPower.CreateMiner, - ¶ms, - ) - if err != nil { - return address.Undef, err - } - - var result power.CreateMinerReturn - err = plumbing.MessageWait(ctx, smsgCid, msg.DefaultMessageWaitLookback, func(blk *block.Block, smsg *types.SignedMessage, receipt *vm.MessageReceipt) (err error) { - if receipt.ExitCode != exitcode.Ok { - // Dragons: do we want to have this back? - return fmt.Errorf("Error executing actor code (exitcode: %d)", receipt.ExitCode) - } - return encoding.Decode(receipt.ReturnValue, &result) - }) - if err != nil { - return address.Undef, err - } - - if err = plumbing.ConfigSet("mining.minerAddress", result.RobustAddress.String()); err != nil { - return address.Undef, err - } - - return result.RobustAddress, nil -} - -// mpcAPI is the subset of the plumbing.API that MinerPreviewCreate uses. -type mpcAPI interface { - ConfigGet(dottedPath string) (interface{}, error) - MessagePreview(ctx context.Context, from, to address.Address, method abi.MethodNum, params ...interface{}) (gas.Unit, error) - NetworkGetPeerID() peer.ID - WalletDefaultAddress() (address.Address, error) -} - -// MinerPreviewCreate previews the Gas cost of creating a miner -func MinerPreviewCreate( - ctx context.Context, - plumbing mpcAPI, - fromAddr address.Address, - sectorSize abi.SectorSize, - pid peer.ID, -) (usedGas gas.Unit, err error) { - if fromAddr.Empty() { - fromAddr, err = plumbing.WalletDefaultAddress() - if err != nil { - return gas.NewGas(0), err - } - } - - if pid == "" { - pid = plumbing.NetworkGetPeerID() - } - - if _, err := plumbing.ConfigGet("mining.minerAddress"); err != nil { - return gas.NewGas(0), fmt.Errorf("can only have one miner per node") - } - - usedGas, err = plumbing.MessagePreview( - ctx, - fromAddr, - builtin.StorageMarketActorAddr, - builtin.MethodsPower.CreateMiner, - sectorSize, - pid, - ) - if err != nil { - return gas.NewGas(0), errors.Wrap(err, "Could not create miner. Please consult the documentation to setup your wallet and genesis block correctly") - } - - return usedGas, nil -} - -// MinerSetPriceResponse collects relevant stats from the set price process -type MinerSetPriceResponse struct { - MinerAddr address.Address - Price types.AttoFIL -} - -type minerStatusPlumbing interface { - MinerStateView(baseKey block.TipSetKey) (MinerStateView, error) - ChainTipSet(key block.TipSetKey) (block.TipSet, error) -} - -// MinerProvingWindow contains a miners proving period start and end as well -// as a set of their proving set. -type MinerProvingWindow struct { - Start abi.ChainEpoch - End abi.ChainEpoch - ProvingSet map[string]types.Commitments -} - -// MinerStatus contains a miners power and the total power of the network -type MinerStatus struct { - ActorAddress address.Address - OwnerAddress address.Address - WorkerAddress address.Address - PeerID peer.ID - - SealProofType abi.RegisteredProof - SectorSize abi.SectorSize - WindowPoStPartitionSectors uint64 - SectorCount uint64 - PoStFailureCount int - - RawPower abi.StoragePower - NetworkRawPower abi.StoragePower - NetworkQualityAdjustedPower abi.StoragePower - QualityAdjustedPower abi.StoragePower -} - -// MinerGetStatus queries the power of a given miner. -func MinerGetStatus(ctx context.Context, plumbing minerStatusPlumbing, minerAddr address.Address, key block.TipSetKey) (MinerStatus, error) { - view, err := plumbing.MinerStateView(key) - if err != nil { - return MinerStatus{}, err - } - sectorCount, err := view.MinerSectorCount(ctx, minerAddr) - if err != nil { - return MinerStatus{}, err - } - minerInfo, err := view.MinerInfo(ctx, minerAddr) - if err != nil { - return MinerStatus{}, err - } - rawPower, qaPower, err := view.MinerClaimedPower(ctx, minerAddr) - if err != nil { - return MinerStatus{}, err - } - totalPower, err := view.PowerNetworkTotal(ctx) - if err != nil { - return MinerStatus{}, err - } - - return MinerStatus{ - ActorAddress: minerAddr, - OwnerAddress: minerInfo.Owner, - WorkerAddress: minerInfo.Worker, - PeerID: minerInfo.PeerId, - - SealProofType: minerInfo.SealProofType, - SectorSize: minerInfo.SectorSize, - WindowPoStPartitionSectors: minerInfo.WindowPoStPartitionSectors, - SectorCount: sectorCount, - - RawPower: rawPower, - QualityAdjustedPower: qaPower, - NetworkRawPower: totalPower.RawBytePower, - NetworkQualityAdjustedPower: totalPower.QualityAdjustedPower, - }, nil -} - -// mwapi is the subset of the plumbing.API that MinerSetWorkerAddress use. -type mwapi interface { - ConfigGet(dottedPath string) (interface{}, error) - ChainHeadKey() block.TipSetKey - MinerStateView(baseKey block.TipSetKey) (MinerStateView, error) - MessageSend(ctx context.Context, from, to address.Address, value types.AttoFIL, gasPrice types.AttoFIL, gasLimit gas.Unit, method abi.MethodNum, params interface{}) (cid.Cid, chan error, error) -} - -// MinerSetWorkerAddress sets the worker address of the miner actor to the provided new address, -// waits for the message to appear on chain and then sets miner.workerAddr config to the new address. -func MinerSetWorkerAddress( - ctx context.Context, - plumbing mwapi, - workerAddr address.Address, - gasPrice types.AttoFIL, - gasLimit gas.Unit, -) (cid.Cid, error) { - - retVal, err := plumbing.ConfigGet("mining.minerAddress") - if err != nil { - return cid.Undef, err - } - minerAddr, ok := retVal.(address.Address) - if !ok { - return cid.Undef, errors.New("problem converting miner address") - } - - head := plumbing.ChainHeadKey() - state, err := plumbing.MinerStateView(head) - if err != nil { - return cid.Undef, errors.Wrap(err, "could not get miner owner address") - } - - owner, _, err := state.MinerControlAddresses(ctx, minerAddr) - if err != nil { - return cid.Undef, errors.Wrap(err, "could not get miner owner address") - } - - c, _, err := plumbing.MessageSend( - ctx, - owner, - minerAddr, - types.ZeroAttoFIL, - gasPrice, - gasLimit, - builtin.MethodsMiner.ChangeWorkerAddress, - &workerAddr) - return c, err -} diff --git a/internal/app/go-filecoin/porcelain/miner_test.go b/internal/app/go-filecoin/porcelain/miner_test.go deleted file mode 100644 index 5924103c57..0000000000 --- a/internal/app/go-filecoin/porcelain/miner_test.go +++ /dev/null @@ -1,290 +0,0 @@ -package porcelain_test - -import ( - "context" - "errors" - "fmt" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cfg" - . "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" -) - -type minerCreate struct { - testing *testing.T - address address.Address - config *cfg.Config - wallet *wallet.Wallet - msgCid cid.Cid - msgFail bool -} - -func newMinerCreate(t *testing.T, msgFail bool, address address.Address) *minerCreate { - testRepo := repo.NewInMemoryRepo() - backend, err := wallet.NewDSBackend(testRepo.WalletDatastore()) - require.NoError(t, err) - return &minerCreate{ - testing: t, - address: address, - config: cfg.NewConfig(testRepo), - wallet: wallet.New(backend), - msgFail: msgFail, - } -} - -func (mpc *minerCreate) ConfigGet(dottedPath string) (interface{}, error) { - return mpc.config.Get(dottedPath) -} - -func (mpc *minerCreate) ConfigSet(dottedPath string, paramJSON string) error { - return mpc.config.Set(dottedPath, paramJSON) -} - -func (mpc *minerCreate) MessageSend(ctx context.Context, from, to address.Address, value types.AttoFIL, gasPrice types.AttoFIL, gasLimit gas.Unit, method abi.MethodNum, params interface{}) (cid.Cid, chan error, error) { - if mpc.msgFail { - return cid.Cid{}, nil, errors.New("test Error") - } - mpc.msgCid = types.CidFromString(mpc.testing, "somecid") - - return mpc.msgCid, nil, nil -} - -func (mpc *minerCreate) MessageWait(ctx context.Context, msgCid cid.Cid, lookback uint64, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - assert.Equal(mpc.testing, msgCid, msgCid) - midAddr, err := address.NewIDAddress(100) - if err != nil { - return err - } - - value, err := encoding.Encode(&power.CreateMinerReturn{ - IDAddress: midAddr, - RobustAddress: mpc.address, - }) - if err != nil { - return err - } - - receipt := vm.MessageReceipt{ - ReturnValue: value, - ExitCode: exitcode.Ok, - } - return cb(nil, nil, &receipt) -} - -func (mpc *minerCreate) WalletDefaultAddress() (address.Address, error) { - return wallet.NewAddress(mpc.wallet, address.SECP256K1) -} - -func TestMinerCreate(t *testing.T) { - tf.UnitTest(t) - - t.Run("success", func(t *testing.T) { - ctx := context.Background() - expectedAddress := vmaddr.NewForTestGetter()() - plumbing := newMinerCreate(t, false, expectedAddress) - collateral := types.NewAttoFILFromFIL(1) - - addr, err := MinerCreate( - ctx, - plumbing, - address.Address{}, - types.NewGasPrice(0), - gas.NewGas(100), - constants.DevSealProofType, - "", - collateral, - ) - require.NoError(t, err) - assert.Equal(t, expectedAddress, addr) - }) - - t.Run("failure to send", func(t *testing.T) { - ctx := context.Background() - plumbing := newMinerCreate(t, true, address.Address{}) - collateral := types.NewAttoFILFromFIL(1) - - _, err := MinerCreate( - ctx, - plumbing, - address.Address{}, - types.NewGasPrice(0), - gas.NewGas(100), - constants.DevSealProofType, - "", - collateral, - ) - assert.Error(t, err, "Test Error") - }) -} - -type mStatusPlumbing struct { - ts block.TipSet - head block.TipSetKey - miner, owner, worker address.Address -} - -func (p *mStatusPlumbing) ChainHeadKey() block.TipSetKey { - return p.head -} - -func (p *mStatusPlumbing) ChainTipSet(_ block.TipSetKey) (block.TipSet, error) { - return p.ts, nil -} - -func (p *mStatusPlumbing) MinerStateView(baseKey block.TipSetKey) (MinerStateView, error) { - return &state.FakeStateView{ - Power: &state.NetworkPower{ - RawBytePower: big.NewInt(4), - QualityAdjustedPower: big.NewInt(4), - MinerCount: 0, - MinPowerMinerCount: 0, - }, - Miners: map[address.Address]*state.FakeMinerState{ - p.miner: { - Owner: p.owner, - Worker: p.worker, - ClaimedRawPower: abi.NewStoragePower(2), - ClaimedQAPower: abi.NewStoragePower(2), - }, - }, - }, nil -} - -func TestMinerGetStatus(t *testing.T) { - tf.UnitTest(t) - key := block.NewTipSetKey(types.NewCidForTestGetter()()) - ts, err := block.NewTipSet(&block.Block{}) - require.NoError(t, err) - - plumbing := mStatusPlumbing{ - ts, key, vmaddr.RequireIDAddress(t, 1), vmaddr.RequireIDAddress(t, 2), vmaddr.RequireIDAddress(t, 3), - } - status, err := MinerGetStatus(context.Background(), &plumbing, plumbing.miner, key) - assert.NoError(t, err) - assert.Equal(t, plumbing.owner, status.OwnerAddress) - assert.Equal(t, plumbing.worker, status.WorkerAddress) - assert.Equal(t, "4", status.NetworkQualityAdjustedPower.String()) - assert.Equal(t, "2", status.QualityAdjustedPower.String()) -} - -type mSetWorkerPlumbing struct { - head block.TipSetKey - getStatusFail, msgFail, msgWaitFail, cfgFail bool - minerAddr, ownerAddr, workerAddr address.Address -} - -func (p *mSetWorkerPlumbing) ChainHeadKey() block.TipSetKey { - return p.head -} - -func (p *mSetWorkerPlumbing) MinerStateView(baseKey block.TipSetKey) (MinerStateView, error) { - if p.getStatusFail { - return &state.FakeStateView{}, errors.New("for testing") - } - - return &state.FakeStateView{ - Miners: map[address.Address]*state.FakeMinerState{ - p.minerAddr: { - Owner: p.ownerAddr, - Worker: p.workerAddr, - }, - }, - }, nil -} - -func (p *mSetWorkerPlumbing) MessageSend(ctx context.Context, from, to address.Address, value types.AttoFIL, gasPrice types.AttoFIL, gasLimit gas.Unit, method abi.MethodNum, params interface{}) (cid.Cid, chan error, error) { - - if p.msgFail { - return cid.Cid{}, nil, errors.New("MsgFail") - } - return types.EmptyMessagesCID, nil, nil -} - -func (p *mSetWorkerPlumbing) MessageWait(ctx context.Context, msgCid cid.Cid, cb func(*block.Block, *types.SignedMessage, *vm.MessageReceipt) error) error { - if p.msgWaitFail { - return errors.New("MsgWaitFail") - } - return nil -} - -func (p *mSetWorkerPlumbing) ConfigGet(dottedKey string) (interface{}, error) { - if p.cfgFail { - return address.Undef, errors.New("ConfigGet failed") - } - if dottedKey == "mining.minerAddress" { - return p.minerAddr, nil - } - return address.Undef, fmt.Errorf("unknown config %s", dottedKey) -} - -func TestMinerSetWorkerAddress(t *testing.T) { - tf.UnitTest(t) - - minerOwner := vmaddr.RequireIDAddress(t, 100) - minerAddr := vmaddr.RequireIDAddress(t, 101) - workerAddr := vmaddr.RequireIDAddress(t, 102) - gprice := types.ZeroAttoFIL - glimit := gas.NewGas(0) - - t.Run("Calling set worker address sets address", func(t *testing.T) { - plumbing := &mSetWorkerPlumbing{ - workerAddr: workerAddr, - ownerAddr: minerOwner, - minerAddr: minerAddr, - } - - _, err := MinerSetWorkerAddress(context.Background(), plumbing, workerAddr, gprice, glimit) - assert.NoError(t, err) - assert.Equal(t, workerAddr, plumbing.workerAddr) - }) - - testCases := []struct { - name string - plumbing *mSetWorkerPlumbing - error string - }{ - { - name: "When MessageSend fails, returns the error and does not set worker address", - plumbing: &mSetWorkerPlumbing{msgFail: true}, - error: "MsgFail", - }, - { - name: "When ConfigGet fails, returns the error and does not set worker address", - plumbing: &mSetWorkerPlumbing{cfgFail: true}, - error: "CfgFail", - }, - { - name: "When MinerGetStatus fails, returns the error and does not set worker address", - plumbing: &mSetWorkerPlumbing{getStatusFail: true}, - error: "CfgFail", - }, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - _, err := MinerSetWorkerAddress(context.Background(), test.plumbing, workerAddr, gprice, glimit) - assert.Error(t, err, test.error) - assert.Empty(t, test.plumbing.workerAddr) - }) - } -} diff --git a/internal/app/go-filecoin/porcelain/mpool.go b/internal/app/go-filecoin/porcelain/mpool.go deleted file mode 100644 index fdeb0e1d83..0000000000 --- a/internal/app/go-filecoin/porcelain/mpool.go +++ /dev/null @@ -1,25 +0,0 @@ -package porcelain - -import ( - "context" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// The subset of plumbing used by MessagePoolWait -type mpwPlumbing interface { - MessagePoolPending() []*types.SignedMessage -} - -// MessagePoolWait waits until the message pool contains at least messageCount unmined messages. -func MessagePoolWait(ctx context.Context, plumbing mpwPlumbing, messageCount uint) ([]*types.SignedMessage, error) { - pending := plumbing.MessagePoolPending() - for len(pending) < int(messageCount) { - // Poll pending again after subscribing in case a message arrived since. - pending = plumbing.MessagePoolPending() - time.Sleep(200 * time.Millisecond) - } - - return pending, nil -} diff --git a/internal/app/go-filecoin/porcelain/mpool_test.go b/internal/app/go-filecoin/porcelain/mpool_test.go deleted file mode 100644 index 58ba94a9a3..0000000000 --- a/internal/app/go-filecoin/porcelain/mpool_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package porcelain_test - -import ( - "context" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -type fakeMpoolWaitPlumbing struct { - pending []*types.SignedMessage - afterPendingCalled func() // Invoked after each call to MessagePoolPending -} - -func newFakeMpoolWaitPlumbing(onPendingCalled func()) *fakeMpoolWaitPlumbing { - return &fakeMpoolWaitPlumbing{ - afterPendingCalled: onPendingCalled, - } -} - -func (plumbing *fakeMpoolWaitPlumbing) MessagePoolPending() []*types.SignedMessage { - if plumbing.afterPendingCalled != nil { - defer plumbing.afterPendingCalled() - } - return plumbing.pending -} - -func TestMessagePoolWait(t *testing.T) { - tf.UnitTest(t) - - ki := types.MustGenerateKeyInfo(1, 42) - signer := types.NewMockSigner(ki) - - t.Run("empty", func(t *testing.T) { - - plumbing := newFakeMpoolWaitPlumbing(nil) - msgs, e := porcelain.MessagePoolWait(context.Background(), plumbing, 0) - require.NoError(t, e) - assert.Equal(t, 0, len(msgs)) - }) - - t.Run("returns immediates", func(t *testing.T) { - - plumbing := newFakeMpoolWaitPlumbing(nil) - plumbing.pending = types.NewSignedMsgs(3, signer) - - msgs, e := porcelain.MessagePoolWait(context.Background(), plumbing, 3) - require.NoError(t, e) - assert.Equal(t, 3, len(msgs)) - }) - - t.Run("waits", func(t *testing.T) { - - var plumbing *fakeMpoolWaitPlumbing - callCount := 0 - - // This callback to the MessagePoolPending plumbing orchestrates the appearance of - // pending messages and notifications on the pubsub subscription. - handlePendingCalled := func() { - if callCount == 0 { - // The first call is checking for the fast path; do nothing. - } else if callCount == 2 { - // Add a message to the pool. - plumbing.pending = types.NewSignedMsgs(1, signer) - } - callCount++ - } - - plumbing = newFakeMpoolWaitPlumbing(handlePendingCalled) - finished := assertMessagePoolWaitAsync(plumbing, 1, t) - - finished.Wait() - }) -} - -// assertMessagePoolWaitAsync waits for msgCount messages asynchronously -func assertMessagePoolWaitAsync(plumbing *fakeMpoolWaitPlumbing, msgCount uint, t *testing.T) *sync.WaitGroup { - finished := sync.WaitGroup{} - finished.Add(1) - - go func() { - msgs, e := porcelain.MessagePoolWait(context.Background(), plumbing, msgCount) - require.NoError(t, e) - assert.Equal(t, msgCount, uint(len(msgs))) - defer finished.Done() - }() - - return &finished -} diff --git a/internal/app/go-filecoin/porcelain/network.go b/internal/app/go-filecoin/porcelain/network.go deleted file mode 100644 index c863ff606f..0000000000 --- a/internal/app/go-filecoin/porcelain/network.go +++ /dev/null @@ -1,37 +0,0 @@ -package porcelain - -import ( - "context" - "fmt" - "time" - - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" - "github.com/pkg/errors" -) - -type netPlumbing interface { - NetworkPing(ctx context.Context, pid peer.ID) (<-chan ping.Result, error) -} - -// PingMinerWithTimeout pings a storage or retrieval miner, waiting the given -// timeout and returning descriptive errors. -func PingMinerWithTimeout(ctx context.Context, minerPID peer.ID, timeout time.Duration, plumbing netPlumbing) error { - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - res, err := netPlumbing.NetworkPing(plumbing, ctx, minerPID) - if err != nil { - return err - } - - select { - case _, ok := <-res: - if !ok { - return errors.New("couldn't establish connection to miner: ping channel closed") - } - return nil - case <-ctx.Done(): - return fmt.Errorf("couldn't establish connection to miner: %s, timed out after %s", ctx.Err(), timeout.String()) - } -} diff --git a/internal/app/go-filecoin/porcelain/network_test.go b/internal/app/go-filecoin/porcelain/network_test.go deleted file mode 100644 index 920234050f..0000000000 --- a/internal/app/go-filecoin/porcelain/network_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package porcelain_test - -import ( - "context" - "testing" - "time" - - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - - . "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/net" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" -) - -type ntwkPingPlumbing struct { - self peer.ID // pinging this will fail immediately - rtt time.Duration // pinging all other ids will resolve after rtt -} - -func (npp *ntwkPingPlumbing) NetworkPing(ctx context.Context, pid peer.ID) (<-chan ping.Result, error) { - if pid == npp.self { - return nil, net.ErrPingSelf - } - c := make(chan ping.Result) - - go func() { - <-time.After(npp.rtt) - c <- ping.Result{ - RTT: npp.rtt, - Error: nil, - } - }() - return c, nil -} - -func newNtwkPingPlumbing(rtt time.Duration, self peer.ID) *ntwkPingPlumbing { - return &ntwkPingPlumbing{ - rtt: rtt, - self: self, - } -} - -func TestPingSuccess(t *testing.T) { - self := th.RequireRandomPeerID(t) - plumbing := newNtwkPingPlumbing(100*time.Millisecond, self) - pid := th.RequireRandomPeerID(t) - ctx := context.Background() - - assert.NoError(t, PingMinerWithTimeout(ctx, pid, time.Second, plumbing)) -} - -func TestPingSelfFails(t *testing.T) { - self := th.RequireRandomPeerID(t) - plumbing := newNtwkPingPlumbing(100*time.Millisecond, self) - ctx := context.Background() - - assert.Error(t, PingMinerWithTimeout(ctx, self, time.Second, plumbing)) -} - -func TestPingTimeout(t *testing.T) { - self := th.RequireRandomPeerID(t) - plumbing := newNtwkPingPlumbing(300*time.Millisecond, self) - pid := th.RequireRandomPeerID(t) - ctx := context.Background() - - assert.Error(t, PingMinerWithTimeout(ctx, pid, 100*time.Millisecond, plumbing)) -} diff --git a/internal/app/go-filecoin/porcelain/piecemanager.go b/internal/app/go-filecoin/porcelain/piecemanager.go deleted file mode 100644 index 7c31a1225a..0000000000 --- a/internal/app/go-filecoin/porcelain/piecemanager.go +++ /dev/null @@ -1,24 +0,0 @@ -package porcelain - -import ( - "context" - "io" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" -) - -type pmPlumbing interface { - PieceManager() piecemanager.PieceManager -} - -// SealPieceIntoNewSector writes the provided piece-bytes into a new sector. -func SealPieceIntoNewSector(ctx context.Context, p pmPlumbing, dealID abi.DealID, dealStart, dealEnd abi.ChainEpoch, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) error { - if p.PieceManager() == nil { - return errors.New("must be mining to add piece") - } - - return p.PieceManager().SealPieceIntoNewSector(ctx, dealID, dealStart, dealEnd, pieceSize, pieceReader) -} diff --git a/internal/app/go-filecoin/porcelain/protocol.go b/internal/app/go-filecoin/porcelain/protocol.go deleted file mode 100644 index 23cff98eb3..0000000000 --- a/internal/app/go-filecoin/porcelain/protocol.go +++ /dev/null @@ -1,79 +0,0 @@ -package porcelain - -import ( - "context" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" -) - -// SectorInfo provides information about a sector construction -type SectorInfo struct { - Size abi.SectorSize - MaxPieceSize abi.UnpaddedPieceSize -} - -// ProtocolParams contains parameters that modify the filecoin nodes protocol -type ProtocolParams struct { - Network string - AutoSealInterval uint - BlockTime time.Duration - SupportedSectors []SectorInfo -} - -type protocolParamsPlumbing interface { - ConfigGet(string) (interface{}, error) - ChainHeadKey() block.TipSetKey - ProtocolStateView(baseKey block.TipSetKey) (ProtocolStateView, error) - BlockTime() time.Duration -} - -type ProtocolStateView interface { - InitNetworkName(ctx context.Context) (string, error) -} - -// ProtocolParameters returns protocol parameter information about the node -func ProtocolParameters(ctx context.Context, plumbing protocolParamsPlumbing) (*ProtocolParams, error) { - autoSealIntervalInterface, err := plumbing.ConfigGet("mining.autoSealIntervalSeconds") - if err != nil { - return nil, err - } - - autoSealInterval, ok := autoSealIntervalInterface.(uint) - if !ok { - return nil, errors.New("Failed to read autoSealInterval from config") - } - - networkName, err := getNetworkName(ctx, plumbing) - if err != nil { - return nil, errors.Wrap(err, "could not retrieve network name") - } - - sectorSizes := []abi.SectorSize{constants.DevSectorSize, constants.FiveHundredTwelveMiBSectorSize} - - var supportedSectors []SectorInfo - for _, sectorSize := range sectorSizes { - maxUserBytes := abi.PaddedPieceSize(sectorSize).Unpadded() - supportedSectors = append(supportedSectors, SectorInfo{sectorSize, maxUserBytes}) - } - - return &ProtocolParams{ - Network: networkName, - AutoSealInterval: autoSealInterval, - BlockTime: plumbing.BlockTime(), - SupportedSectors: supportedSectors, - }, nil -} - -func getNetworkName(ctx context.Context, plumbing protocolParamsPlumbing) (string, error) { - view, err := plumbing.ProtocolStateView(plumbing.ChainHeadKey()) - if err != nil { - return "", errors.Wrap(err, "failed to query state") - } - return view.InitNetworkName(ctx) -} diff --git a/internal/app/go-filecoin/porcelain/protocol_test.go b/internal/app/go-filecoin/porcelain/protocol_test.go deleted file mode 100644 index f812c8ce37..0000000000 --- a/internal/app/go-filecoin/porcelain/protocol_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package porcelain_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" -) - -const protocolTestParamBlockTime = time.Second - -type testProtocolParamsPlumbing struct { - testing *testing.T - autoSealInterval uint -} - -func (tppp *testProtocolParamsPlumbing) ConfigGet(path string) (interface{}, error) { - assert.Equal(tppp.testing, "mining.autoSealIntervalSeconds", path) - return tppp.autoSealInterval, nil -} - -func (tppp *testProtocolParamsPlumbing) ChainHeadKey() block.TipSetKey { - return block.NewTipSetKey() -} - -func (tppp *testProtocolParamsPlumbing) BlockTime() time.Duration { - return protocolTestParamBlockTime -} - -func (tppp *testProtocolParamsPlumbing) ProtocolStateView(_ block.TipSetKey) (porcelain.ProtocolStateView, error) { - return &state.FakeStateView{ - NetworkName: "protocolTest", - }, nil -} - -func TestProtocolParams(t *testing.T) { - t.Parallel() - - t.Run("emits the a ProtocolParams object with the correct values", func(t *testing.T) { - t.Parallel() - - plumbing := &testProtocolParamsPlumbing{ - testing: t, - autoSealInterval: 120, - } - - expected := &porcelain.ProtocolParams{ - AutoSealInterval: 120, - Network: "protocolTest", - SupportedSectors: []porcelain.SectorInfo{ - {constants.DevSectorSize, abi.PaddedPieceSize(constants.DevSectorSize).Unpadded()}, - {constants.FiveHundredTwelveMiBSectorSize, abi.PaddedPieceSize(constants.FiveHundredTwelveMiBSectorSize).Unpadded()}, - }, - BlockTime: protocolTestParamBlockTime, - } - - out, err := porcelain.ProtocolParameters(context.TODO(), plumbing) - require.NoError(t, err) - - assert.Equal(t, expected, out) - }) -} diff --git a/internal/app/go-filecoin/porcelain/wallet.go b/internal/app/go-filecoin/porcelain/wallet.go deleted file mode 100644 index 196f37f6ac..0000000000 --- a/internal/app/go-filecoin/porcelain/wallet.go +++ /dev/null @@ -1,64 +0,0 @@ -package porcelain - -import ( - "context" - - initact "github.com/filecoin-project/specs-actors/actors/builtin/init" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" -) - -// ErrNoDefaultFromAddress is returned when a default wallet address couldn't be determined (eg, there are zero addresses in the wallet). -var ErrNoDefaultFromAddress = errors.New("unable to determine a default wallet address") - -type wbPlumbing interface { - ActorGet(ctx context.Context, addr address.Address) (*actor.Actor, error) -} - -// WalletBalance gets the current balance associated with an address -func WalletBalance(ctx context.Context, plumbing wbPlumbing, addr address.Address) (abi.TokenAmount, error) { - act, err := plumbing.ActorGet(ctx, addr) - if err == types.ErrNotFound || err == initact.ErrAddressNotFound { - // if the account doesn't exit, the balance should be zero - return abi.NewTokenAmount(0), nil - } - if err != nil { - return abi.NewTokenAmount(0), err - } - return act.Balance, nil -} - -type wdaPlumbing interface { - ConfigGet(dottedPath string) (interface{}, error) - ConfigSet(dottedPath string, paramJSON string) error - WalletAddresses() []address.Address -} - -// WalletDefaultAddress returns a default wallet address from the config. -// If none is set it picks the first address in the wallet and -// sets it as the default in the config. -func WalletDefaultAddress(plumbing wdaPlumbing) (address.Address, error) { - ret, err := plumbing.ConfigGet("wallet.defaultAddress") - addr := ret.(address.Address) - if err != nil || !addr.Empty() { - return addr, err - } - - // No default is set; pick the 0th and make it the default. - if len(plumbing.WalletAddresses()) > 0 { - addr := plumbing.WalletAddresses()[0] - err := plumbing.ConfigSet("wallet.defaultAddress", addr.String()) - if err != nil { - return address.Undef, err - } - - return addr, nil - } - - return address.Undef, ErrNoDefaultFromAddress -} diff --git a/internal/app/go-filecoin/porcelain/wallet_test.go b/internal/app/go-filecoin/porcelain/wallet_test.go deleted file mode 100644 index 2a809776bd..0000000000 --- a/internal/app/go-filecoin/porcelain/wallet_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package porcelain_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cfg" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type wbTestPlumbing struct { - balance types.AttoFIL -} - -type wdaTestPlumbing struct { - config *cfg.Config - wallet *wallet.Wallet -} - -func newWdaTestPlumbing(t *testing.T) *wdaTestPlumbing { - repo := repo.NewInMemoryRepo() - backend, err := wallet.NewDSBackend(repo.WalletDatastore()) - require.NoError(t, err) - return &wdaTestPlumbing{ - config: cfg.NewConfig(repo), - wallet: wallet.New(backend), - } -} - -func (wbtp *wbTestPlumbing) ActorGet(ctx context.Context, addr address.Address) (*actor.Actor, error) { - aux := abi.NewTokenAmount(0) - aux.SetBits(wbtp.balance.Int.Bits()) - testActor := actor.NewActor(cid.Undef, aux, cid.Undef) - return testActor, nil -} - -func (wdatp *wdaTestPlumbing) ConfigGet(dottedPath string) (interface{}, error) { - return wdatp.config.Get(dottedPath) -} - -func (wdatp *wdaTestPlumbing) ConfigSet(dottedPath string, paramJSON string) error { - return wdatp.config.Set(dottedPath, paramJSON) -} - -func (wdatp *wdaTestPlumbing) WalletAddresses() []address.Address { - return wdatp.wallet.Addresses() -} - -func (wdatp *wdaTestPlumbing) WalletNewAddress() (address.Address, error) { - return wallet.NewAddress(wdatp.wallet, address.SECP256K1) -} - -func TestWalletBalance(t *testing.T) { - tf.UnitTest(t) - - t.Run("Returns the correct value for wallet balance", func(t *testing.T) { - ctx := context.Background() - - plumbing := &wbTestPlumbing{ - balance: types.NewAttoFILFromFIL(20), - } - balance, err := porcelain.WalletBalance(ctx, plumbing, address.Undef) - require.NoError(t, err) - - assert.Equal(t, types.NewAttoTokenFromToken(20), balance) - }) -} - -func TestWalletDefaultAddress(t *testing.T) { - tf.UnitTest(t) - - t.Run("it returns the configured wallet default if it exists", func(t *testing.T) { - wdatp := newWdaTestPlumbing(t) - - addr, err := wdatp.WalletNewAddress() - require.NoError(t, err) - err = wdatp.ConfigSet("wallet.defaultAddress", addr.String()) - require.NoError(t, err) - - _, err = porcelain.WalletDefaultAddress(wdatp) - require.NoError(t, err) - }) - - t.Run("default is consistent if none configured", func(t *testing.T) { - wdatp := newWdaTestPlumbing(t) - - addresses := []address.Address{} - for i := 0; i < 10; i++ { - a, err := wdatp.WalletNewAddress() - require.NoError(t, err) - addresses = append(addresses, a) - } - - expected, err := porcelain.WalletDefaultAddress(wdatp) - require.NoError(t, err) - require.True(t, isInList(expected, addresses)) - for i := 0; i < 30; i++ { - got, err := porcelain.WalletDefaultAddress(wdatp) - require.NoError(t, err) - assert.Equal(t, expected, got) - } - }) -} - -func isInList(needle address.Address, haystack []address.Address) bool { - for _, a := range haystack { - if a == needle { - return true - } - } - return false -} diff --git a/internal/pkg/block/block.go b/internal/pkg/block/block.go deleted file mode 100644 index 3436fd63d8..0000000000 --- a/internal/pkg/block/block.go +++ /dev/null @@ -1,183 +0,0 @@ -package block - -import ( - "encoding/json" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - node "github.com/ipfs/go-ipld-format" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -// BlockMessageLimit is the maximum number of messages in a block -const BlockMessageLimit = 512 - -// Block is a block in the blockchain. -type Block struct { - // control field for encoding struct as an array - _ struct{} `cbor:",toarray"` - - // Miner is the address of the miner actor that mined this block. - Miner address.Address `json:"miner"` - - // Ticket is the ticket submitted with this block. - Ticket Ticket `json:"ticket"` - - // ElectionProof is the vrf proof giving this block's miner authoring rights - ElectionProof *crypto.ElectionProof - - // BeaconEntries contain the verifiable oracle randomness used to elect - // this block's author leader - BeaconEntries []*drand.Entry - - // PoStProofs are the winning post proofs - PoStProofs []PoStProof `json:"PoStProofs"` - - // Parents is the set of parents this block was based on. Typically one, - // but can be several in the case where there were multiple winning ticket- - // holders for an epoch. - Parents TipSetKey `json:"parents"` - - // ParentWeight is the aggregate chain weight of the parent set. - ParentWeight fbig.Int `json:"parentWeight"` - - // Height is the chain height of this block. - Height abi.ChainEpoch `json:"height"` - - // StateRoot is the CID of the root of the state tree after application of the messages in the parent tipset - // to the parent tipset's state root. - StateRoot e.Cid `json:"stateRoot,omitempty"` - - // MessageReceipts is a list of receipts corresponding to the application of the messages in the parent tipset - // to the parent tipset's state root (corresponding to this block's StateRoot). - MessageReceipts e.Cid `json:"messageReceipts,omitempty"` - - // Messages is the set of messages included in this block - Messages e.Cid `json:"messages,omitempty"` - - // The aggregate signature of all BLS signed messages in the block - BLSAggregateSig *crypto.Signature `json:"blsAggregateSig"` - - // The timestamp, in seconds since the Unix epoch, at which this block was created. - Timestamp uint64 `json:"timestamp"` - - // The signature of the miner's worker key over the block - BlockSig *crypto.Signature `json:"blocksig"` - - // ForkSignaling is extra data used by miners to communicate - ForkSignaling uint64 - - cachedCid cid.Cid - - cachedBytes []byte -} - -// IndexMessagesField is the message field position in the encoded block -const IndexMessagesField = 10 - -// IndexParentsField is the parents field position in the encoded block -const IndexParentsField = 5 - -// Cid returns the content id of this block. -func (b *Block) Cid() cid.Cid { - if b.cachedCid == cid.Undef { - if b.cachedBytes == nil { - bytes, err := encoding.Encode(b) - if err != nil { - panic(err) - } - b.cachedBytes = bytes - } - c, err := constants.DefaultCidBuilder.Sum(b.cachedBytes) - if err != nil { - panic(err) - } - - b.cachedCid = c - } - - return b.cachedCid -} - -// ToNode converts the Block to an IPLD node. -func (b *Block) ToNode() node.Node { - data, err := encoding.Encode(b) - if err != nil { - panic(err) - } - c, err := constants.DefaultCidBuilder.Sum(data) - if err != nil { - panic(err) - } - - blk, err := blocks.NewBlockWithCid(data, c) - if err != nil { - panic(err) - } - node, err := cbor.DecodeBlock(blk) - if err != nil { - panic(err) - } - return node -} - -func (b *Block) String() string { - errStr := "(error encoding Block)" - cid := b.Cid() - js, err := json.MarshalIndent(b, "", " ") - if err != nil { - return errStr - } - return fmt.Sprintf("Block cid=[%v]: %s", cid, string(js)) -} - -// DecodeBlock decodes raw cbor bytes into a Block. -func DecodeBlock(b []byte) (*Block, error) { - var out Block - if err := encoding.Decode(b, &out); err != nil { - return nil, err - } - - out.cachedBytes = b - - return &out, nil -} - -// Equals returns true if the Block is equal to other. -func (b *Block) Equals(other *Block) bool { - return b.Cid().Equals(other.Cid()) -} - -// SignatureData returns the block's bytes with a null signature field for -// signature creation and verification -func (b *Block) SignatureData() []byte { - tmp := &Block{ - Miner: b.Miner, - Ticket: b.Ticket, - ElectionProof: b.ElectionProof, - Parents: b.Parents, - ParentWeight: b.ParentWeight, - Height: b.Height, - Messages: b.Messages, - StateRoot: b.StateRoot, - MessageReceipts: b.MessageReceipts, - PoStProofs: b.PoStProofs, - BeaconEntries: b.BeaconEntries, - Timestamp: b.Timestamp, - BLSAggregateSig: b.BLSAggregateSig, - ForkSignaling: b.ForkSignaling, - // BlockSig omitted - } - - return tmp.ToNode().RawData() -} diff --git a/internal/pkg/block/block_test.go b/internal/pkg/block/block_test.go deleted file mode 100644 index 430a4762f7..0000000000 --- a/internal/pkg/block/block_test.go +++ /dev/null @@ -1,443 +0,0 @@ -package block_test - -import ( - "bytes" - "encoding/json" - "reflect" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - blk "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" -) - -func TestTriangleEncoding(t *testing.T) { - tf.UnitTest(t) - - // We want to be sure that: - // Block => json => Block - // yields exactly the same thing as: - // Block => IPLD node => CBOR => IPLD node => json => IPLD node => Block (!) - // because we want the output encoding of a Block directly from memory - // (first case) to be exactly the same as the output encoding of a Block from - // storage (second case). WTF you might say, and you would not be wrong. The - // use case is machine-parsing command output. For example dag_daemon_test - // dumps the block from memory as json (first case). It then dag gets - // the block by cid which yeilds a json-encoded ipld node (first half of - // the second case). It json decodes this ipld node and then decodes the node - // into a block (second half of the second case). I don't claim this is ideal, - // see: https://github.com/filecoin-project/go-filecoin/issues/599 - - newAddress := vmaddr.NewForTestGetter() - - testRoundTrip := func(t *testing.T, exp *blk.Block) { - jb, err := json.Marshal(exp) - require.NoError(t, err) - var jsonRoundTrip blk.Block - err = json.Unmarshal(jb, &jsonRoundTrip) - require.NoError(t, err) - - ipldNodeOrig, err := encoding.Encode(jsonRoundTrip) - assert.NoError(t, err) - var cborJSONRoundTrip blk.Block - err = encoding.Decode(ipldNodeOrig, &cborJSONRoundTrip) - assert.NoError(t, err) - types.AssertHaveSameCid(t, exp, &cborJSONRoundTrip) - } - t.Run("encoding block with zero fields works", func(t *testing.T) { - testRoundTrip(t, &blk.Block{}) - }) - - t.Run("encoding block with nonzero fields works", func(t *testing.T) { - // We should ensure that every field is set -- zero values might - // pass when non-zero values do not due to nil/null encoding. - posts := []blk.PoStProof{blk.NewPoStProof(constants.DevRegisteredWinningPoStProof, []byte{0x07})} - b := &blk.Block{ - Miner: newAddress(), - Ticket: blk.Ticket{VRFProof: []byte{0x01, 0x02, 0x03}}, - ElectionProof: &crypto.ElectionProof{VRFProof: []byte{0x0a, 0x0b}}, - Height: 2, - BeaconEntries: []*drand.Entry{ - { - Round: drand.Round(1), - Data: []byte{0x3}, - }, - }, - Messages: e.NewCid(types.CidFromString(t, "somecid")), - MessageReceipts: e.NewCid(types.CidFromString(t, "somecid")), - Parents: blk.NewTipSetKey(types.CidFromString(t, "somecid")), - ParentWeight: fbig.NewInt(1000), - StateRoot: e.NewCid(types.CidFromString(t, "somecid")), - Timestamp: 1, - BlockSig: &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: []byte{0x3}, - }, - BLSAggregateSig: &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: []byte{0x3}, - }, - PoStProofs: posts, - ForkSignaling: 6, - } - s := reflect.TypeOf(*b) - // This check is here to request that you add a non-zero value for new fields - // to the above (and update the field count below). - // Also please add non zero fields to "b" and "diff" in TestSignatureData - // and add a new check that different values of the new field result in - // different output data. - require.Equal(t, 18, s.NumField()) // Note: this also counts private fields - testRoundTrip(t, b) - }) -} - -func TestBlockString(t *testing.T) { - tf.UnitTest(t) - - var b blk.Block - cid := b.Cid() - - got := b.String() - assert.Contains(t, got, cid.String()) -} - -func TestDecodeBlock(t *testing.T) { - tf.UnitTest(t) - - t.Run("successfully decodes raw bytes to a Filecoin block", func(t *testing.T) { - addrGetter := vmaddr.NewForTestGetter() - - c1 := types.CidFromString(t, "a") - c2 := types.CidFromString(t, "b") - cM := types.CidFromString(t, "messages") - cR := types.CidFromString(t, "receipts") - - before := &blk.Block{ - Miner: addrGetter(), - Ticket: blk.Ticket{VRFProof: []uint8{}}, - Parents: blk.NewTipSetKey(c1), - Height: 2, - ParentWeight: fbig.Zero(), - Messages: e.NewCid(cM), - StateRoot: e.NewCid(c2), - MessageReceipts: e.NewCid(cR), - BlockSig: &crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte{}}, - BLSAggregateSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte{}}, - } - - after, err := blk.DecodeBlock(before.ToNode().RawData()) - require.NoError(t, err) - assert.Equal(t, after.Cid(), before.Cid()) - assert.Equal(t, before, after) - }) - - t.Run("decode failure results in an error", func(t *testing.T) { - _, err := blk.DecodeBlock([]byte{1, 2, 3}) - assert.Error(t, err) - assert.Contains(t, err.Error(), "cbor: cannot unmarshal") - }) -} - -func TestEquals(t *testing.T) { - tf.UnitTest(t) - - c1 := types.CidFromString(t, "a") - c2 := types.CidFromString(t, "b") - - s1 := types.CidFromString(t, "state1") - s2 := types.CidFromString(t, "state2") - - var h1 abi.ChainEpoch = 1 - var h2 abi.ChainEpoch = 2 - - b1 := &blk.Block{Parents: blk.NewTipSetKey(c1), StateRoot: e.NewCid(s1), Height: h1} - b2 := &blk.Block{Parents: blk.NewTipSetKey(c1), StateRoot: e.NewCid(s1), Height: h1} - b3 := &blk.Block{Parents: blk.NewTipSetKey(c1), StateRoot: e.NewCid(s2), Height: h1} - b4 := &blk.Block{Parents: blk.NewTipSetKey(c2), StateRoot: e.NewCid(s1), Height: h1} - b5 := &blk.Block{Parents: blk.NewTipSetKey(c1), StateRoot: e.NewCid(s1), Height: h2} - b6 := &blk.Block{Parents: blk.NewTipSetKey(c2), StateRoot: e.NewCid(s1), Height: h2} - b7 := &blk.Block{Parents: blk.NewTipSetKey(c1), StateRoot: e.NewCid(s2), Height: h2} - b8 := &blk.Block{Parents: blk.NewTipSetKey(c2), StateRoot: e.NewCid(s2), Height: h1} - b9 := &blk.Block{Parents: blk.NewTipSetKey(c2), StateRoot: e.NewCid(s2), Height: h2} - assert.True(t, b1.Equals(b1)) - assert.True(t, b1.Equals(b2)) - assert.False(t, b1.Equals(b3)) - assert.False(t, b1.Equals(b4)) - assert.False(t, b1.Equals(b5)) - assert.False(t, b1.Equals(b6)) - assert.False(t, b1.Equals(b7)) - assert.False(t, b1.Equals(b8)) - assert.False(t, b1.Equals(b9)) - assert.True(t, b3.Equals(b3)) - assert.False(t, b3.Equals(b4)) - assert.False(t, b3.Equals(b6)) - assert.False(t, b3.Equals(b9)) - assert.False(t, b4.Equals(b5)) - assert.False(t, b5.Equals(b6)) - assert.False(t, b6.Equals(b7)) - assert.False(t, b7.Equals(b8)) - assert.False(t, b8.Equals(b9)) - assert.True(t, b9.Equals(b9)) -} - -func TestBlockJsonMarshal(t *testing.T) { - tf.UnitTest(t) - - var parent, child blk.Block - child.Miner = vmaddr.NewForTestGetter()() - child.Height = 1 - child.ParentWeight = fbig.Zero() - child.Parents = blk.NewTipSetKey(parent.Cid()) - child.StateRoot = e.NewCid(parent.Cid()) - - child.Messages = e.NewCid(types.CidFromString(t, "somecid")) - child.MessageReceipts = e.NewCid(types.CidFromString(t, "somecid")) - - marshalled, e1 := json.Marshal(&child) - assert.NoError(t, e1) - str := string(marshalled) - - assert.Contains(t, str, child.Miner.String()) - assert.Contains(t, str, parent.Cid().String()) - assert.Contains(t, str, child.Messages.String()) - assert.Contains(t, str, child.MessageReceipts.String()) - - // marshal/unmarshal symmetry - var unmarshalled blk.Block - e2 := json.Unmarshal(marshalled, &unmarshalled) - assert.NoError(t, e2) - - assert.Equal(t, child, unmarshalled) - types.AssertHaveSameCid(t, &child, &unmarshalled) - assert.True(t, child.Equals(&unmarshalled)) -} - -func TestSignatureData(t *testing.T) { - tf.UnitTest(t) - newAddress := vmaddr.NewForTestGetter() - posts := []blk.PoStProof{blk.NewPoStProof(constants.DevRegisteredWinningPoStProof, []byte{0x07})} - - b := &blk.Block{ - Miner: newAddress(), - Ticket: blk.Ticket{VRFProof: []byte{0x01, 0x02, 0x03}}, - ElectionProof: &crypto.ElectionProof{VRFProof: []byte{0x0a, 0x0b}}, - BeaconEntries: []*drand.Entry{ - { - Round: drand.Round(5), - Data: []byte{0x0c}, - }, - }, - Height: 2, - Messages: e.NewCid(types.CidFromString(t, "somecid")), - MessageReceipts: e.NewCid(types.CidFromString(t, "somecid")), - Parents: blk.NewTipSetKey(types.CidFromString(t, "somecid")), - ParentWeight: fbig.NewInt(1000), - ForkSignaling: 3, - StateRoot: e.NewCid(types.CidFromString(t, "somecid")), - Timestamp: 1, - PoStProofs: posts, - BlockSig: &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: []byte{0x3}, - }, - } - - diffPoSts := []blk.PoStProof{blk.NewPoStProof(constants.DevRegisteredWinningPoStProof, []byte{0x17})} - - diff := &blk.Block{ - Miner: newAddress(), - Ticket: blk.Ticket{VRFProof: []byte{0x03, 0x01, 0x02}}, - ElectionProof: &crypto.ElectionProof{VRFProof: []byte{0x0c, 0x0d}}, - BeaconEntries: []*drand.Entry{ - { - Round: drand.Round(44), - Data: []byte{0xc0}, - }, - }, - Height: 3, - Messages: e.NewCid(types.CidFromString(t, "someothercid")), - MessageReceipts: e.NewCid(types.CidFromString(t, "someothercid")), - Parents: blk.NewTipSetKey(types.CidFromString(t, "someothercid")), - ParentWeight: fbig.NewInt(1001), - ForkSignaling: 2, - StateRoot: e.NewCid(types.CidFromString(t, "someothercid")), - Timestamp: 4, - PoStProofs: diffPoSts, - BlockSig: &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: []byte{0x4}, - }, - } - - // Changing BlockSig does not affect output - func() { - before := b.SignatureData() - - cpy := b.BlockSig - defer func() { b.BlockSig = cpy }() - - b.BlockSig = diff.BlockSig - after := b.SignatureData() - assert.True(t, bytes.Equal(before, after)) - }() - - // Changing all other fields does affect output - // Note: using reflectors doesn't seem to make this much less tedious - // because it appears that there is no generic field setting function. - func() { - before := b.SignatureData() - - cpy := b.Miner - defer func() { b.Miner = cpy }() - - b.Miner = diff.Miner - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.Ticket - defer func() { b.Ticket = cpy }() - - b.Ticket = diff.Ticket - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.ElectionProof - defer func() { b.ElectionProof = cpy }() - - b.ElectionProof = diff.ElectionProof - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.Height - defer func() { b.Height = cpy }() - - b.Height = diff.Height - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.Messages - defer func() { b.Messages = cpy }() - - b.Messages = diff.Messages - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.MessageReceipts - defer func() { b.MessageReceipts = cpy }() - - b.MessageReceipts = diff.MessageReceipts - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.Parents - defer func() { b.Parents = cpy }() - - b.Parents = diff.Parents - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.ParentWeight - defer func() { b.ParentWeight = cpy }() - - b.ParentWeight = diff.ParentWeight - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.ForkSignaling - defer func() { b.ForkSignaling = cpy }() - - b.ForkSignaling = diff.ForkSignaling - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.StateRoot - defer func() { b.StateRoot = cpy }() - - b.StateRoot = diff.StateRoot - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.Timestamp - defer func() { b.Timestamp = cpy }() - - b.Timestamp = diff.Timestamp - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - - cpy := b.PoStProofs - defer func() { b.PoStProofs = cpy }() - - b.PoStProofs = diff.PoStProofs - after := b.SignatureData() - assert.False(t, bytes.Equal(before, after)) - }() - - func() { - before := b.SignatureData() - cpy := b.BeaconEntries - defer func() { - b.BeaconEntries = cpy - }() - - b.BeaconEntries = diff.BeaconEntries - after := b.SignatureData() - - assert.False(t, bytes.Equal(before, after)) - }() - -} diff --git a/internal/pkg/block/chain_info.go b/internal/pkg/block/chain_info.go deleted file mode 100644 index 2953cd760b..0000000000 --- a/internal/pkg/block/chain_info.go +++ /dev/null @@ -1,47 +0,0 @@ -package block - -import ( - "fmt" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/libp2p/go-libp2p-core/peer" -) - -// ChainInfo is used to track metadata about a peer and its chain. -type ChainInfo struct { - // The originator of the TipSetKey propagation wave. - Source peer.ID - // The peer that sent us the TipSetKey message. - Sender peer.ID - Head TipSetKey - Height abi.ChainEpoch -} - -// NewChainInfo creates a chain info from a peer id a head tipset key and a -// chain height. -func NewChainInfo(source peer.ID, sender peer.ID, head TipSetKey, height abi.ChainEpoch) *ChainInfo { - return &ChainInfo{ - Source: source, - Sender: sender, - Head: head, - Height: height, - } -} - -// Returns a human-readable string representation of a chain info -func (i *ChainInfo) String() string { - return fmt.Sprintf("{source=%s sender:%s height=%d head=%s}", i.Source, i.Sender, i.Height, i.Head) -} - -// CISlice is for sorting chain infos -type CISlice []*ChainInfo - -// Len returns the number of chain infos in the slice. -func (cis CISlice) Len() int { return len(cis) } - -// Swap swaps chain infos. -func (cis CISlice) Swap(i, j int) { cis[i], cis[j] = cis[j], cis[i] } - -// Less compares chain infos on peer ID. There should only ever be one chain -// info per peer in a CISlice. -func (cis CISlice) Less(i, j int) bool { return string(cis[i].Source) < string(cis[j].Source) } diff --git a/internal/pkg/block/epost_info.go b/internal/pkg/block/epost_info.go deleted file mode 100644 index 91c8bf50b3..0000000000 --- a/internal/pkg/block/epost_info.go +++ /dev/null @@ -1,31 +0,0 @@ -package block - -import ( - "github.com/filecoin-project/specs-actors/actors/abi" -) - -// PoStProof is a winning post proof included in a block header -type PoStProof struct { - _ struct{} `cbor:",toarray"` - RegisteredProof abi.RegisteredProof - ProofBytes []byte -} - -// NewPoStProof constructs an epost proof from registered proof and bytes -func NewPoStProof(rpp abi.RegisteredProof, bs []byte) PoStProof { - return PoStProof{ - RegisteredProof: rpp, - ProofBytes: bs, - } -} - -// FromABIPoStProofs converts the abi post proof type to a local type for -// serialization purposes -func FromABIPoStProofs(postProofs ...abi.PoStProof) []PoStProof { - out := make([]PoStProof, len(postProofs)) - for i, p := range postProofs { - out[i] = PoStProof{RegisteredProof: p.RegisteredProof, ProofBytes: p.ProofBytes} - } - - return out -} diff --git a/internal/pkg/block/full_block.go b/internal/pkg/block/full_block.go deleted file mode 100644 index 18781ea646..0000000000 --- a/internal/pkg/block/full_block.go +++ /dev/null @@ -1,20 +0,0 @@ -package block - -import "github.com/filecoin-project/go-filecoin/internal/pkg/types" - -// FullBlock carries a block header and the message and receipt collections -// referenced from the header. -type FullBlock struct { - Header *Block - SECPMessages []*types.SignedMessage - BLSMessages []*types.UnsignedMessage -} - -// NewFullBlock constructs a new full block. -func NewFullBlock(header *Block, secp []*types.SignedMessage, bls []*types.UnsignedMessage) *FullBlock { - return &FullBlock{ - Header: header, - SECPMessages: secp, - BLSMessages: bls, - } -} diff --git a/internal/pkg/block/testing.go b/internal/pkg/block/testing.go deleted file mode 100644 index d7da49dbd9..0000000000 --- a/internal/pkg/block/testing.go +++ /dev/null @@ -1,15 +0,0 @@ -package block - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -// RequireNewTipSet instantiates and returns a new tipset of the given blocks -// and requires that the setup validation succeed. -func RequireNewTipSet(t *testing.T, blks ...*Block) TipSet { - ts, err := NewTipSet(blks...) - require.NoError(t, err) - return ts -} diff --git a/internal/pkg/block/ticket.go b/internal/pkg/block/ticket.go deleted file mode 100644 index e6ca45ce4a..0000000000 --- a/internal/pkg/block/ticket.go +++ /dev/null @@ -1,28 +0,0 @@ -package block - -import ( - "bytes" - "fmt" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" -) - -// A Ticket is a marker of a tick of the blockchain's clock. It is the source -// of randomness for proofs of storage and leader election. It is generated -// by the miner of a block using a VRF. -type Ticket struct { - _ struct{} `cbor:",toarray"` - // A proof output by running a VRF on the VRFProof of the parent ticket - VRFProof crypto.VRFPi -} - -// String returns the string representation of the VRFProof of the ticket -func (t Ticket) String() string { - return fmt.Sprintf("%x", t.VRFProof) -} - -func (t *Ticket) Compare(o *Ticket) int { - tDigest := t.VRFProof.Digest() - oDigest := o.VRFProof.Digest() - return bytes.Compare(tDigest[:], oDigest[:]) -} diff --git a/internal/pkg/block/tipset.go b/internal/pkg/block/tipset.go deleted file mode 100644 index 05e0f21ee2..0000000000 --- a/internal/pkg/block/tipset.go +++ /dev/null @@ -1,159 +0,0 @@ -package block - -import ( - "bytes" - "sort" - - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - "github.com/pkg/errors" -) - -// TipSet is a non-empty, immutable set of blocks at the same height with the same parent set. -// Blocks in a tipset are canonically ordered by ticket. Blocks may be iterated either via -// ToSlice() (which involves a shallow copy) or efficiently by index with At(). -// TipSet is a lightweight value type; passing by pointer is usually unnecessary. -// -// Canonical tipset block ordering does not match the order of CIDs in a TipSetKey used as -// a tipset "key". -type TipSet struct { - // This slice is wrapped in a struct to enforce immutability. - blocks []*Block - // Key is computed at construction and cached. - key TipSetKey -} - -var ( - // errNoBlocks is returned from the tipset constructor when given no blocks. - errNoBlocks = errors.New("no blocks for tipset") - // errUndefTipSet is returned from tipset methods invoked on an undefined tipset. - errUndefTipSet = errors.New("undefined tipset") -) - -// UndefTipSet is a singleton representing a nil or undefined tipset. -var UndefTipSet = TipSet{} - -// NewTipSet builds a new TipSet from a collection of blocks. -// The blocks must be distinct (different CIDs), have the same height, and same parent set. -func NewTipSet(blocks ...*Block) (TipSet, error) { - if len(blocks) == 0 { - return UndefTipSet, errNoBlocks - } - - first := blocks[0] - height := first.Height - parents := first.Parents - weight := first.ParentWeight - cids := make([]cid.Cid, len(blocks)) - - sorted := make([]*Block, len(blocks)) - for i, blk := range blocks { - if i > 0 { // Skip redundant checks for first block - if blk.Height != height { - return UndefTipSet, errors.Errorf("Inconsistent block heights %d and %d", height, blk.Height) - } - if !blk.Parents.Equals(parents) { - return UndefTipSet, errors.Errorf("Inconsistent block parents %s and %s", parents.String(), blk.Parents.String()) - } - if !blk.ParentWeight.Equals(weight) { - return UndefTipSet, errors.Errorf("Inconsistent block parent weights %d and %d", weight, blk.ParentWeight) - } - } - sorted[i] = blk - } - - // Sort blocks by ticket - sort.Slice(sorted, func(i, j int) bool { - cmp := sorted[i].Ticket.Compare(&sorted[j].Ticket) - if cmp == 0 { - // Break ticket ties with the block CIDs, which are distinct. - cmp = bytes.Compare(sorted[i].Cid().Bytes(), sorted[j].Cid().Bytes()) - } - return cmp < 0 - }) - for i, blk := range sorted { - cids[i] = blk.Cid() - } - // Duplicate blocks (CIDs) are rejected here, pass that error through. - key, err := NewTipSetKeyFromUnique(cids...) - if err != nil { - return UndefTipSet, err - } - return TipSet{sorted, key}, nil -} - -// Defined checks whether the tipset is defined. -// Invoking any other methods on an undefined tipset will result in undefined behaviour (c.f. cid.Undef) -func (ts TipSet) Defined() bool { - return len(ts.blocks) > 0 -} - -// Len returns the number of blocks in the tipset. -func (ts TipSet) Len() int { - return len(ts.blocks) -} - -// At returns the i'th block in the tipset. -// An index outside the half-open range [0, Len()) will panic. -func (ts TipSet) At(i int) *Block { - return ts.blocks[i] -} - -// Key returns a key for the tipset. -func (ts TipSet) Key() TipSetKey { - return ts.key -} - -// ToSlice returns an ordered slice of pointers to the tipset's blocks. -func (ts TipSet) ToSlice() []*Block { - slice := make([]*Block, len(ts.blocks)) - copy(slice, ts.blocks) - return slice -} - -// MinTicket returns the smallest ticket of all blocks in the tipset. -func (ts TipSet) MinTicket() (Ticket, error) { - if len(ts.blocks) == 0 { - return Ticket{}, errUndefTipSet - } - return ts.blocks[0].Ticket, nil -} - -// Height returns the height of a tipset. -func (ts TipSet) Height() (abi.ChainEpoch, error) { - if len(ts.blocks) == 0 { - return 0, errUndefTipSet - } - return ts.blocks[0].Height, nil -} - -// Parents returns the CIDs of the parents of the blocks in the tipset. -func (ts TipSet) Parents() (TipSetKey, error) { - if len(ts.blocks) == 0 { - return TipSetKey{}, errUndefTipSet - } - return ts.blocks[0].Parents, nil -} - -// ParentWeight returns the tipset's ParentWeight in fixed point form. -func (ts TipSet) ParentWeight() (fbig.Int, error) { - if len(ts.blocks) == 0 { - return fbig.Zero(), errUndefTipSet - } - return ts.blocks[0].ParentWeight, nil -} - -// Equals tests whether the tipset contains the same blocks as another. -// Equality is not tested deeply: two tipsets are considered equal if their keys (ordered block CIDs) are equal. -func (ts TipSet) Equals(ts2 TipSet) bool { - return ts.Key().Equals(ts2.Key()) -} - -// String returns a formatted string of the CIDs in the TipSet. -// "{ }" -// Note: existing callers use this as a unique key for the tipset. We should change them -// to use the TipSetKey explicitly -func (ts TipSet) String() string { - return ts.Key().String() -} diff --git a/internal/pkg/block/tipset_key.go b/internal/pkg/block/tipset_key.go deleted file mode 100644 index e549126616..0000000000 --- a/internal/pkg/block/tipset_key.go +++ /dev/null @@ -1,225 +0,0 @@ -package block - -import ( - "encoding/json" - "fmt" - "sort" - - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/ipfs/go-cid" - "github.com/pkg/errors" -) - -// TipSetKey is an immutable set of CIDs forming a unique key for a TipSet. -// Equal keys will have equivalent iteration order. CIDs are maintained in -// the same order as the canonical iteration order of blocks in a tipset (which is by ticket). -// This convention is maintained by the caller. The order of input cids to the constructor -// must be the same as this canonical order. It is the caller's responsibility to not -// construct a key with duplicate ids -// TipSetKey is a lightweight value type; passing by pointer is usually unnecessary. -type TipSetKey struct { - // The slice is wrapped in a struct to enforce immutability. - cids []e.Cid -} - -// NewTipSetKey initialises a new TipSetKey. -// Duplicate CIDs are silently ignored. -func NewTipSetKey(ids ...cid.Cid) TipSetKey { - if len(ids) == 0 { - // Empty set is canonically represented by a nil slice rather than zero-length slice - // so that a zero-value exactly matches an empty one. - return TipSetKey{} - } - - cids := make([]e.Cid, len(ids)) - for i := 0; i < len(ids); i++ { - cids[i] = e.NewCid(ids[i]) - } - return TipSetKey{cids} -} - -// NewTipSetKeyFromUnique initialises a set with CIDs that are expected to be unique. -func NewTipSetKeyFromUnique(ids ...cid.Cid) (TipSetKey, error) { - s := NewTipSetKey(ids...) - if s.Len() != len(AsSet(ids)) { - return TipSetKey{}, errors.Errorf("Duplicate CID in %s", ids) - } - return s, nil -} - -// Empty checks whether the set is empty. -func (s TipSetKey) Empty() bool { - return s.Len() == 0 -} - -// Has checks whether the set contains `id`. -func (s TipSetKey) Has(id cid.Cid) bool { - // Find index of the first CID not less than id. - idx := sort.Search(len(s.cids), func(i int) bool { - return !cidLess(s.cids[i].Cid, id) - }) - return idx < len(s.cids) && s.cids[idx].Cid.Equals(id) -} - -// Len returns the number of items in the set. -func (s TipSetKey) Len() int { - return len(s.cids) -} - -// ToSlice returns a slice listing the cids in the set. -func (s TipSetKey) ToSlice() []cid.Cid { - return unwrap(s.cids) -} - -// Iter returns an iterator that allows the caller to iterate the set in its sort order. -func (s TipSetKey) Iter() TipSetKeyIterator { - return TipSetKeyIterator{ - s: s.ToSlice(), - i: 0, - } -} - -// Equals checks whether the set contains exactly the same CIDs as another. -func (s TipSetKey) Equals(other TipSetKey) bool { - if len(s.cids) != len(other.cids) { - return false - } - for i := 0; i < len(s.cids); i++ { - if !s.cids[i].Cid.Equals(other.cids[i].Cid) { - return false - } - } - return true -} - -// ContainsAll checks if another set is a subset of this one. -// We can assume that the relative order of members of one key is -// maintained in the other since we assume that all ids are sorted -// by corresponding block ticket value. -func (s *TipSetKey) ContainsAll(other TipSetKey) bool { - // Since we assume the ids must have the same relative sorting we can - // perform one pass over this set, advancing the other index whenever the - // values match. - otherIdx := 0 - for i := 0; i < s.Len() && otherIdx < other.Len(); i++ { - if s.cids[i].Cid.Equals(other.cids[otherIdx].Cid) { - otherIdx++ - } - } - // otherIdx is advanced the full length only if every element was found in this set. - return otherIdx == other.Len() -} - -// String returns a string listing the cids in the set. -func (s TipSetKey) String() string { - out := "{" - for it := s.Iter(); !it.Complete(); it.Next() { - out = fmt.Sprintf("%s %s", out, it.Value().String()) - } - return out + " }" -} - -// MarshalJSON serializes the key to JSON. -func (s TipSetKey) MarshalJSON() ([]byte, error) { - return json.Marshal(s.cids) -} - -// UnmarshalJSON parses JSON into the key. -// Note that this pattern technically violates the immutability. -func (s *TipSetKey) UnmarshalJSON(b []byte) error { - var cids []cid.Cid - if err := json.Unmarshal(b, &cids); err != nil { - return err - } - - k, err := NewTipSetKeyFromUnique(cids...) - if err != nil { - return err - } - s.cids = k.cids - return nil -} - -// MarshalCBOR marshals the tipset key as an array of cids -func (s TipSetKey) MarshalCBOR() ([]byte, error) { - // encode the zero value as length zero slice instead of nil per spec - if s.cids == nil { - encodableZero := make([]e.Cid, 0) - return encoding.Encode(encodableZero) - } - return encoding.Encode(s.cids) -} - -// UnmarshalCBOR unmarshals a cbor array of cids to a tipset key -func (s *TipSetKey) UnmarshalCBOR(data []byte) error { - var sortedEncCids []e.Cid - err := encoding.Decode(data, &sortedEncCids) - if err != nil { - return err - } - sortedCids := unwrap(sortedEncCids) - tmp, err := NewTipSetKeyFromUnique(sortedCids...) - if err != nil { - return err - } - *s = tmp - return nil -} - -// TipSetKeyIterator is a iterator over a sorted collection of CIDs. -type TipSetKeyIterator struct { - s []cid.Cid - i int -} - -// Complete returns true if the iterator has reached the end of the set. -func (si *TipSetKeyIterator) Complete() bool { - return si.i >= len(si.s) -} - -// Next advances the iterator to the next item and returns true if there is such an item. -func (si *TipSetKeyIterator) Next() bool { - switch { - case si.i < len(si.s): - si.i++ - return si.i < len(si.s) - case si.i == len(si.s): - return false - default: - panic("unreached") - } -} - -// Value returns the current item for the iterator -func (si TipSetKeyIterator) Value() cid.Cid { - switch { - case si.i < len(si.s): - return si.s[si.i] - case si.i == len(si.s): - return cid.Undef - default: - panic("unreached") - } -} - -func cidLess(c1, c2 cid.Cid) bool { - return c1.KeyString() < c2.KeyString() -} - -// unwrap goes from a slice of encodable cids to a slice of cids -func unwrap(eCids []e.Cid) []cid.Cid { - out := make([]cid.Cid, len(eCids)) - for i := 0; i < len(eCids); i++ { - out[i] = eCids[i].Cid - } - return out -} - -func AsSet(cids []cid.Cid) map[cid.Cid]struct{} { - set := make(map[cid.Cid]struct{}) - for _, c := range cids { - set[c] = struct{}{} - } - return set -} diff --git a/internal/pkg/block/tipset_key_test.go b/internal/pkg/block/tipset_key_test.go deleted file mode 100644 index 2e59fdd702..0000000000 --- a/internal/pkg/block/tipset_key_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package block_test - -import ( - "encoding/json" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - blk "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestTipSetKey(t *testing.T) { - tf.UnitTest(t) - - c1, _ := cid.Parse("zDPWYqFD4b5HLFuPfhkjJJkfvm4r8KLi1V9e2ahJX6Ab16Ay24pJ") - c2, _ := cid.Parse("zDPWYqFD4b5HLFuPfhkjJJkfvm4r8KLi1V9e2ahJX6Ab16Ay24pK") - c3, _ := cid.Parse("zDPWYqFD4b5HLFuPfhkjJJkfvm4r8KLi1V9e2ahJX6Ab16Ay24pL") - c4, _ := cid.Parse("zDPWYqFD4b5HLFuPfhkjJJkfvm4r8KLi1V9e2ahJX6Ab16Ay24pM") - - t.Run("empty", func(t *testing.T) { - s := blk.NewTipSetKey() - assert.True(t, s.Empty()) - assert.Equal(t, 0, s.Len()) - - it := s.Iter() - assert.Equal(t, it.Value(), cid.Undef) - assert.False(t, it.Next()) - }) - - t.Run("zero value is empty", func(t *testing.T) { - var s blk.TipSetKey - assert.True(t, s.Empty()) - assert.Equal(t, 0, s.Len()) - - it := s.Iter() - assert.Equal(t, it.Value(), cid.Undef) - assert.False(t, it.Next()) - - assert.True(t, s.Equals(blk.NewTipSetKey())) - - // Bytes must be equal in order to have equivalent CIDs - zeroBytes, err := encoding.Encode(s) - require.NoError(t, err) - emptyBytes, err := encoding.Encode(blk.NewTipSetKey()) - require.NoError(t, err) - assert.Equal(t, zeroBytes, emptyBytes) - }) - - t.Run("order set by caller", func(t *testing.T) { - s1 := blk.NewTipSetKey(c1, c2, c3) - s2 := blk.NewTipSetKey(c3, c2, c1) - s3 := blk.NewTipSetKey(c3, c2, c1) - - assert.False(t, s1.Equals(s2)) - assert.True(t, s2.Equals(s3)) - - // Sorted order is not a defined property, but an important implementation detail to - // verify unless the implementation is changed. - assert.Equal(t, []cid.Cid{c1, c2, c3}, s1.ToSlice()) - assert.Equal(t, []cid.Cid{c3, c2, c1}, s2.ToSlice()) - }) - - t.Run("fails if unexpected duplicates", func(t *testing.T) { - _, e := blk.NewTipSetKeyFromUnique(c1, c2, c3) - assert.NoError(t, e) - _, e = blk.NewTipSetKeyFromUnique(c1, c1, c2, c3) - assert.Error(t, e) - }) - - t.Run("contains", func(t *testing.T) { - empty := blk.NewTipSetKey() - s := blk.NewTipSetKey(c1, c2, c3) - - assert.False(t, empty.Has(c1)) - assert.True(t, s.Has(c1)) - assert.True(t, s.Has(c2)) - assert.True(t, s.Has(c3)) - assert.False(t, s.Has(c4)) - - assert.True(t, s.ContainsAll(empty)) - assert.True(t, s.ContainsAll(blk.NewTipSetKey(c1))) - assert.True(t, s.ContainsAll(s)) - assert.False(t, s.ContainsAll(blk.NewTipSetKey(c4))) - assert.False(t, s.ContainsAll(blk.NewTipSetKey(c1, c4))) - - assert.True(t, empty.ContainsAll(empty)) - assert.False(t, empty.ContainsAll(s)) - }) - - t.Run("iteration", func(t *testing.T) { - s := blk.NewTipSetKey(c3, c2, c1) - it := s.Iter() - assert.True(t, c3.Equals(it.Value())) - assert.True(t, it.Next()) - assert.True(t, c2.Equals(it.Value())) - assert.True(t, it.Next()) - assert.True(t, c1.Equals(it.Value())) - assert.False(t, it.Next()) - assert.Equal(t, it.Value(), cid.Undef) - assert.True(t, it.Complete()) - }) -} - -func TestTipSetKeyCborRoundtrip(t *testing.T) { - tf.UnitTest(t) - - makeCid := types.NewCidForTestGetter() - exp := blk.NewTipSetKey(makeCid(), makeCid(), makeCid()) - buf, err := encoding.Encode(exp) - assert.NoError(t, err) - - var act blk.TipSetKey - err = encoding.Decode(buf, &act) - assert.NoError(t, err) - - assert.Equal(t, 3, act.Len()) - assert.True(t, act.Equals(exp)) -} - -func TestTipSetKeyJSONRoundtrip(t *testing.T) { - tf.UnitTest(t) - - makeCid := types.NewCidForTestGetter() - exp := blk.NewTipSetKey(makeCid(), makeCid(), makeCid()) - - buf, err := json.Marshal(exp) - assert.NoError(t, err) - - var act blk.TipSetKey - err = json.Unmarshal(buf, &act) - assert.NoError(t, err) - - assert.Equal(t, 3, act.Len()) - assert.True(t, act.Equals(exp)) -} diff --git a/internal/pkg/block/tipset_test.go b/internal/pkg/block/tipset_test.go deleted file mode 100644 index a24df0a312..0000000000 --- a/internal/pkg/block/tipset_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package block_test - -import ( - "bytes" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/stretchr/testify/assert" - - blk "github.com/filecoin-project/go-filecoin/internal/pkg/block" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -const parentWeight = uint64(1337000) - -var ( - cid1, cid2 cid.Cid - mockSignerForTest types.MockSigner - cidGetter func() cid.Cid -) - -func init() { - cidGetter = types.NewCidForTestGetter() - cid1 = cidGetter() - cid2 = cidGetter() - - mockSignerForTest, _ = types.NewMockSignersAndKeyInfo(2) -} - -func block(t *testing.T, ticket []byte, height int, parentCid cid.Cid, parentWeight, timestamp uint64, msg string) *blk.Block { - return &blk.Block{ - Ticket: blk.Ticket{VRFProof: ticket}, - Parents: blk.NewTipSetKey(parentCid), - ParentWeight: fbig.NewInt(int64(parentWeight)), - Height: 42 + abi.ChainEpoch(height), - Messages: e.NewCid(cidGetter()), - StateRoot: e.NewCid(cidGetter()), - MessageReceipts: e.NewCid(cidGetter()), - Timestamp: timestamp, - } -} - -func TestTipSet(t *testing.T) { - tf.UnitTest(t) - - b1, b2, b3 := makeTestBlocks(t) - - t.Run("undefined tipset", func(t *testing.T) { - assert.False(t, blk.UndefTipSet.Defined()) - // No other methods are defined - }) - - t.Run("ordered by ticket digest", func(t *testing.T) { - ts := RequireNewTipSet(t, b3, b2, b1) // Presented in reverse order - assert.True(t, ts.Defined()) - assert.Equal(t, b1, ts.At(0)) - assert.Equal(t, b2, ts.At(1)) - assert.Equal(t, b3, ts.At(2)) - assert.Equal(t, []*blk.Block{b1, b2, b3}, ts.ToSlice()) - }) - - t.Run("order breaks ties with CID", func(t *testing.T) { - b1 := block(t, []byte{1}, 1, cid1, parentWeight, 1, "1") - b2 := block(t, []byte{1}, 1, cid1, parentWeight, 2, "2") - - ts := RequireNewTipSet(t, b1, b2) - if bytes.Compare(b1.Cid().Bytes(), b2.Cid().Bytes()) < 0 { - assert.Equal(t, []*blk.Block{b1, b2}, ts.ToSlice()) - } else { - assert.Equal(t, []*blk.Block{b2, b1}, ts.ToSlice()) - } - }) - - t.Run("len", func(t *testing.T) { - t1 := RequireNewTipSet(t, b1) - assert.True(t, t1.Defined()) - assert.Equal(t, 1, t1.Len()) - - t3 := RequireNewTipSet(t, b1, b2, b3) - assert.True(t, t3.Defined()) - assert.Equal(t, 3, t3.Len()) - }) - - t.Run("key", func(t *testing.T) { - assert.Equal(t, blk.NewTipSetKey(b1.Cid()), RequireNewTipSet(t, b1).Key()) - // sorted ticket order is b1, b2, b3 - assert.Equal(t, blk.NewTipSetKey(b1.Cid(), b2.Cid(), b3.Cid()), - RequireNewTipSet(t, b2, b3, b1).Key()) - }) - - t.Run("height", func(t *testing.T) { - tsHeight, _ := RequireNewTipSet(t, b1).Height() - assert.Equal(t, b1.Height, tsHeight) - }) - - t.Run("parents", func(t *testing.T) { - tsParents, _ := RequireNewTipSet(t, b1).Parents() - assert.Equal(t, b1.Parents, tsParents) - }) - - t.Run("parent weight", func(t *testing.T) { - tsParentWeight, _ := RequireNewTipSet(t, b1).ParentWeight() - assert.Equal(t, types.Uint64ToBig(parentWeight), tsParentWeight) - }) - - t.Run("min ticket", func(t *testing.T) { - tsTicket, _ := RequireNewTipSet(t, b1).MinTicket() - assert.Equal(t, b1.Ticket, tsTicket) - - tsTicket, _ = RequireNewTipSet(t, b2).MinTicket() - assert.Equal(t, b2.Ticket, tsTicket) - - tsTicket, _ = RequireNewTipSet(t, b3, b2, b1).MinTicket() - assert.Equal(t, b1.Ticket, tsTicket) - }) - - t.Run("equality", func(t *testing.T) { - ts1a := RequireNewTipSet(t, b3, b2, b1) - ts1b := RequireNewTipSet(t, b1, b2, b3) - ts2 := RequireNewTipSet(t, b1, b2) - ts3 := RequireNewTipSet(t, b2) - - assert.Equal(t, ts1a, ts1a) - assert.Equal(t, ts1a, ts1b) - assert.NotEqual(t, ts1a, ts2) - assert.NotEqual(t, ts1a, ts3) - assert.NotEqual(t, ts1a, blk.UndefTipSet) - assert.NotEqual(t, ts2, blk.UndefTipSet) - assert.NotEqual(t, ts3, blk.UndefTipSet) - }) - - t.Run("slice", func(t *testing.T) { - assert.Equal(t, []*blk.Block{b1}, RequireNewTipSet(t, b1).ToSlice()) - - ts := RequireNewTipSet(t, b3, b2, b1) // Presented in reverse order - slice := ts.ToSlice() - assert.Equal(t, []*blk.Block{b1, b2, b3}, slice) - - slice[1] = b1 - slice[2] = b2 - assert.NotEqual(t, slice, ts.ToSlice()) - assert.Equal(t, []*blk.Block{b1, b2, b3}, ts.ToSlice()) // tipset is immutable - }) - - t.Run("string", func(t *testing.T) { - // String shouldn't really need testing, but some existing code uses the string as a - // datastore key and depends on the format exactly. - assert.Equal(t, "{ "+b1.Cid().String()+" }", RequireNewTipSet(t, b1).String()) - - expected := blk.NewTipSetKey(b1.Cid(), b2.Cid(), b3.Cid()).String() - assert.Equal(t, expected, RequireNewTipSet(t, b3, b2, b1).String()) - }) - - t.Run("empty new tipset fails", func(t *testing.T) { - _, err := blk.NewTipSet() - require.Error(t, err) - assert.Contains(t, err.Error(), "no blocks for tipset") - }) - - t.Run("duplicate block fails new tipset", func(t *testing.T) { - b1, b2, b3 = makeTestBlocks(t) - ts, err := blk.NewTipSet(b1, b2, b1) - assert.Error(t, err) - assert.False(t, ts.Defined()) - }) - - t.Run("mismatched height fails new tipset", func(t *testing.T) { - b1, b2, b3 = makeTestBlocks(t) - b1.Height = 3 - ts, err := blk.NewTipSet(b1, b2, b3) - assert.Error(t, err) - assert.False(t, ts.Defined()) - }) - - t.Run("mismatched parents fails new tipset", func(t *testing.T) { - b1, b2, b3 = makeTestBlocks(t) - b1.Parents = blk.NewTipSetKey(cid1, cid2) - ts, err := blk.NewTipSet(b1, b2, b3) - assert.Error(t, err) - assert.False(t, ts.Defined()) - }) - - t.Run("mismatched parent weight fails new tipset", func(t *testing.T) { - b1, b2, b3 = makeTestBlocks(t) - b1.ParentWeight = fbig.NewInt(3000) - ts, err := blk.NewTipSet(b1, b2, b3) - assert.Error(t, err) - assert.False(t, ts.Defined()) - }) -} - -func TestUndefKey(t *testing.T) { - ts := blk.UndefTipSet - udKey := ts.Key() - assert.True(t, udKey.Empty()) -} - -func makeTestBlocks(t *testing.T) (*blk.Block, *blk.Block, *blk.Block) { - b1 := block(t, []byte{2}, 1, cid1, parentWeight, 1, "1") - b2 := block(t, []byte{3}, 1, cid1, parentWeight, 2, "2") - b3 := block(t, []byte{1}, 1, cid1, parentWeight, 3, "3") - - // The tickets are constructed such that their digests are ordered. - require.True(t, b1.Ticket.Compare(&b2.Ticket) < 0) - require.True(t, b2.Ticket.Compare(&b3.Ticket) < 0) - return b1, b2, b3 -} - -// RequireNewTipSet instantiates and returns a new tipset of the given blocks -// and requires that the setup validation succeed. -func RequireNewTipSet(t *testing.T, blks ...*blk.Block) blk.TipSet { - ts, err := blk.NewTipSet(blks...) - require.NoError(t, err) - return ts -} diff --git a/internal/pkg/cborutil/msgreader.go b/internal/pkg/cborutil/msgreader.go deleted file mode 100644 index 687ff0489f..0000000000 --- a/internal/pkg/cborutil/msgreader.go +++ /dev/null @@ -1,32 +0,0 @@ -package cborutil - -import ( - "bufio" - "fmt" - "io" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -// MaxMessageSize is the maximum message size to read -const MaxMessageSize = 256 << 10 - -// ErrMessageTooLarge is returned when reading too big of a message -var ErrMessageTooLarge = fmt.Errorf("attempted to read a message larger than the limit") - -// MsgReader is a cbor message reader -type MsgReader struct { - br *bufio.Reader -} - -// NewMsgReader returns a new MsgReader -func NewMsgReader(r io.Reader) *MsgReader { - return &MsgReader{ - br: bufio.NewReader(r), - } -} - -// ReadMsg reads a cbor message into the given object -func (mr *MsgReader) ReadMsg(i interface{}) error { - return encoding.StreamDecode(mr.br, i) -} diff --git a/internal/pkg/cborutil/store.go b/internal/pkg/cborutil/store.go deleted file mode 100644 index 1dacdb1082..0000000000 --- a/internal/pkg/cborutil/store.go +++ /dev/null @@ -1,84 +0,0 @@ -package cborutil - -import ( - "context" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -// IpldStore is a go-filecoin implementation of the go-hamt-ipld CborStore -// interface. -type IpldStore struct { - blocks Blocks -} - -// Blocks is the interface of block storage needed by the IpldStore -type Blocks interface { - GetBlock(context.Context, cid.Cid) (blocks.Block, error) - AddBlock(blocks.Block) error -} - -// Blockstore is the interface of internal block storage used to implement -// a default Blocks interface. -type Blockstore interface { - Get(cid.Cid) (blocks.Block, error) - Put(blocks.Block) error -} - -type bswrapper struct { - bs Blockstore -} - -func (bs *bswrapper) GetBlock(_ context.Context, c cid.Cid) (blocks.Block, error) { - return bs.bs.Get(c) -} - -func (bs *bswrapper) AddBlock(blk blocks.Block) error { - return bs.bs.Put(blk) -} - -// NewIpldStore returns an ipldstore backed by a blockstore. -func NewIpldStore(bs Blockstore) *IpldStore { - return &IpldStore{blocks: &bswrapper{bs}} -} - -// Get decodes the cbor bytes in the ipld node pointed to by cid c into out. -func (s *IpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { - ctx, cancel := context.WithTimeout(ctx, time.Second*10) - defer cancel() - - blk, err := s.blocks.GetBlock(ctx, c) - if err != nil { - return err - } - return encoding.Decode(blk.RawData(), out) -} - -// Put encodes the interface into cbor bytes and stores them as a block -func (s *IpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { - data, err := encoding.Encode(v) - if err != nil { - return cid.Undef, err - } - - c, err := constants.DefaultCidBuilder.Sum(data) - if err != nil { - return cid.Undef, err - } - - blk, err := blocks.NewBlockWithCid(data, c) - if err != nil { - return cid.Undef, err - } - - if err := s.blocks.AddBlock(blk); err != nil { - return cid.Undef, err - } - - return c, nil -} diff --git a/internal/pkg/chain/car.go b/internal/pkg/chain/car.go deleted file mode 100644 index 21fcb1e5ce..0000000000 --- a/internal/pkg/chain/car.go +++ /dev/null @@ -1,220 +0,0 @@ -package chain - -import ( - "context" - "io" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - format "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-car" - carutil "github.com/ipld/go-car/util" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -var logCar = logging.Logger("chain/car") - -type carChainReader interface { - GetTipSet(block.TipSetKey) (block.TipSet, error) -} -type carMessageReader interface { - MessageProvider -} - -type carStateReader interface { - ChainStateTree(ctx context.Context, c cid.Cid) ([]format.Node, error) -} - -// Fields need to stay lower case to match car's default refmt encoding as -// refmt can't handle arbitrary casing. -type carHeader struct { - Roots block.TipSetKey `cbor:"roots"` - Version uint64 `cbor:"version"` -} - -// Export will export a chain (all blocks and their messages) to the writer `out`. -func Export(ctx context.Context, headTS block.TipSet, cr carChainReader, mr carMessageReader, sr carStateReader, out io.Writer) error { - // ensure we don't duplicate writes to the car file. // e.g. only write EmptyMessageCID once. - filter := make(map[cid.Cid]bool) - - // fail if headTS isn't in the store. - if _, err := cr.GetTipSet(headTS.Key()); err != nil { - return err - } - - // Write the car header - ch := carHeader{ - Roots: headTS.Key(), - Version: 1, - } - chb, err := encoding.Encode(ch) - if err != nil { - return err - } - - logCar.Debugf("car file chain head: %s", headTS.Key()) - if err := carutil.LdWrite(out, chb); err != nil { - return err - } - - iter := IterAncestors(ctx, cr, headTS) - // accumulate TipSets in descending order. - for ; !iter.Complete(); err = iter.Next() { - if err != nil { - return err - } - tip := iter.Value() - // write blocks - for i := 0; i < tip.Len(); i++ { - hdr := tip.At(i) - logCar.Debugf("writing block: %s", hdr.Cid()) - - if !filter[hdr.Cid()] { - if err := carutil.LdWrite(out, hdr.Cid().Bytes(), hdr.ToNode().RawData()); err != nil { - return err - } - filter[hdr.Cid()] = true - } - - meta, err := mr.LoadTxMeta(ctx, hdr.Messages.Cid) - if err != nil { - return err - } - - if !filter[hdr.Messages.Cid] { - logCar.Debugf("writing txMeta: %s", hdr.Messages) - if err := exportTxMeta(ctx, out, meta); err != nil { - return err - } - filter[hdr.Messages.Cid] = true - } - - secpMsgs, blsMsgs, err := mr.LoadMessages(ctx, hdr.Messages.Cid) - if err != nil { - return err - } - - if !filter[meta.SecpRoot.Cid] { - logCar.Debugf("writing secp message collection: %s", hdr.Messages) - if err := exportAMTSignedMessages(ctx, out, secpMsgs); err != nil { - return err - } - filter[meta.SecpRoot.Cid] = true - } - - if !filter[meta.BLSRoot.Cid] { - logCar.Debugf("writing bls message collection: %s", hdr.Messages) - if err := exportAMTUnsignedMessages(ctx, out, blsMsgs); err != nil { - return err - } - filter[meta.BLSRoot.Cid] = true - } - - // TODO(#3473) we can remove MessageReceipts from the exported file once addressed. - rect, err := mr.LoadReceipts(ctx, hdr.MessageReceipts.Cid) - if err != nil { - return err - } - - if !filter[hdr.MessageReceipts.Cid] { - logCar.Debugf("writing message-receipt collection: %s", hdr.Messages) - if err := exportAMTReceipts(ctx, out, rect); err != nil { - return err - } - filter[hdr.MessageReceipts.Cid] = true - } - - if hdr.Height == 0 { - logCar.Debugf("writing state tree: %s", hdr.StateRoot) - stateRoots, err := sr.ChainStateTree(ctx, hdr.StateRoot.Cid) - if err != nil { - return err - } - for _, r := range stateRoots { - if err := carutil.LdWrite(out, r.Cid().Bytes(), r.RawData()); err != nil { - return err - } - } - } - } - } - return nil -} - -func exportAMTSignedMessages(ctx context.Context, out io.Writer, smsgs []*types.SignedMessage) error { - ms := carWritingMessageStore(out) - - cids, err := ms.storeSignedMessages(smsgs) - if err != nil { - return err - } - - _, err = ms.storeAMTCids(ctx, cids) - return err -} - -func exportAMTUnsignedMessages(ctx context.Context, out io.Writer, umsgs []*types.UnsignedMessage) error { - ms := carWritingMessageStore(out) - - cids, err := ms.storeUnsignedMessages(umsgs) - if err != nil { - return err - } - - _, err = ms.storeAMTCids(ctx, cids) - return err -} - -func exportAMTReceipts(ctx context.Context, out io.Writer, receipts []vm.MessageReceipt) error { - ms := carWritingMessageStore(out) - - _, err := ms.StoreReceipts(ctx, receipts) - return err -} - -func exportTxMeta(ctx context.Context, out io.Writer, meta types.TxMeta) error { - ms := carWritingMessageStore(out) - _, err := ms.StoreTxMeta(ctx, meta) - return err -} - -func carWritingMessageStore(out io.Writer) *MessageStore { - return NewMessageStore(carExportBlockstore{out: out}) -} - -type carStore interface { - Put(blocks.Block) error -} - -// Import imports a chain from `in` to `bs`. -func Import(ctx context.Context, cs carStore, in io.Reader) (block.TipSetKey, error) { - header, err := car.LoadCar(cs, in) - if err != nil { - return block.UndefTipSet.Key(), err - } - headKey := block.NewTipSetKey(header.Roots...) - return headKey, nil -} - -// carExportBlockstore allows a structure that would normally put blocks in a block store to output to a car file instead. -type carExportBlockstore struct { - out io.Writer -} - -func (cs carExportBlockstore) DeleteBlock(c cid.Cid) error { panic("not implement") } -func (cs carExportBlockstore) Has(c cid.Cid) (bool, error) { panic("not implement") } -func (cs carExportBlockstore) Get(c cid.Cid) (blocks.Block, error) { panic("not implement") } -func (cs carExportBlockstore) GetSize(c cid.Cid) (int, error) { panic("not implement") } -func (cs carExportBlockstore) Put(b blocks.Block) error { - return carutil.LdWrite(cs.out, b.Cid().Bytes(), b.RawData()) -} -func (cs carExportBlockstore) PutMany(b []blocks.Block) error { panic("not implement") } -func (cs carExportBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - panic("not implement") -} -func (cs carExportBlockstore) HashOnRead(enabled bool) { panic("not implement") } diff --git a/internal/pkg/chain/car_test.go b/internal/pkg/chain/car_test.go deleted file mode 100644 index a22c8070b4..0000000000 --- a/internal/pkg/chain/car_test.go +++ /dev/null @@ -1,326 +0,0 @@ -package chain_test - -import ( - "bufio" - "bytes" - "context" - - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-amt-ipld/v2" - - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - typegen "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -func TestChainImportExportGenesis(t *testing.T) { - tf.UnitTest(t) - - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - - // export the car file to a carW - mustExportToBuffer(ctx, t, gene, cb, &mockStateReader{}, carW) - - // import the car file from the carR - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, gene.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, gene.Key(), gene.Key(), bstore) -} - -func TestChainImportExportSingleTip(t *testing.T) { - tf.UnitTest(t) - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - // extend the head by one - headTS := cb.AppendOn(gene, 1) - - // export the car file to carW - mustExportToBuffer(ctx, t, headTS, cb, &mockStateReader{}, carW) - - // import the car file from carR - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, headTS.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, headTS.Key(), gene.Key(), bstore) -} - -func TestChainImportExportWideTip(t *testing.T) { - tf.UnitTest(t) - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - // extend the head by one, two wide - headTS := cb.AppendOn(gene, 2) - // export the car file to a carW - mustExportToBuffer(ctx, t, headTS, cb, &mockStateReader{}, carW) - // import the car file from carR - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, headTS.Key(), importedKey) - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, headTS.Key(), gene.Key(), bstore) -} - -func TestChainImportExportMultiTip(t *testing.T) { - tf.UnitTest(t) - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - // extend the head by one - headTS := cb.AppendOn(gene, 1) - headTS = cb.AppendOn(headTS, 1) - - // export the car file to a buffer - mustExportToBuffer(ctx, t, headTS, cb, &mockStateReader{}, carW) - - // import the car file from the buffer - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, headTS.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, headTS.Key(), gene.Key(), bstore) -} - -func TestChainImportExportMultiWideTip(t *testing.T) { - tf.UnitTest(t) - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - // extend the head by one - headTS := cb.AppendOn(gene, 1) - // extend by one, two wide. - headTS = cb.AppendOn(headTS, 2) - - // export the car file to a buffer - mustExportToBuffer(ctx, t, headTS, cb, &mockStateReader{}, carW) - - // import the car file from the buffer - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, headTS.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, headTS.Key(), gene.Key(), bstore) -} - -func TestChainImportExportMultiWideBaseTip(t *testing.T) { - tf.UnitTest(t) - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - // extend the head by one, two wide - headTS := cb.AppendOn(gene, 2) - // extend by one - headTS = cb.AppendOn(headTS, 1) - - // export the car file to a buffer - mustExportToBuffer(ctx, t, headTS, cb, &mockStateReader{}, carW) - - // import the car file from the buffer - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, headTS.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, headTS.Key(), gene.Key(), bstore) -} - -func TestChainImportExportMultiWideTips(t *testing.T) { - tf.UnitTest(t) - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - // extend the head by one, two wide - headTS := cb.AppendOn(gene, 2) - // extend by one, two wide - headTS = cb.AppendOn(headTS, 2) - - // export the car file to a buffer - mustExportToBuffer(ctx, t, headTS, cb, &mockStateReader{}, carW) - - // import the car file from the buffer - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, headTS.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, headTS.Key(), gene.Key(), bstore) -} - -func TestChainImportExportMessages(t *testing.T) { - tf.UnitTest(t) - - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - - keys := types.MustGenerateKeyInfo(1, 42) - mm := vm.NewMessageMaker(t, keys) - alice := mm.Addresses()[0] - - ts1 := cb.AppendManyOn(1, gene) - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - mm.NewSignedMessage(alice, 3), - mm.NewSignedMessage(alice, 4), - mm.NewSignedMessage(alice, 5), - } - ts2 := cb.BuildOneOn(ts1, func(b *chain.BlockBuilder) { - b.AddMessages(msgs, []*types.UnsignedMessage{}) - }) - - // export the car file to a buffer - mustExportToBuffer(ctx, t, ts2, cb, &mockStateReader{}, carW) - - // import the car file from the buffer - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, ts2.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, ts2.Key(), gene.Key(), bstore) -} - -func TestChainImportExportMultiTipSetWithMessages(t *testing.T) { - tf.UnitTest(t) - - ctx, gene, cb, carW, carR, bstore := setupDeps(t) - - keys := types.MustGenerateKeyInfo(1, 42) - mm := vm.NewMessageMaker(t, keys) - alice := mm.Addresses()[0] - - ts1 := cb.AppendManyOn(1, gene) - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - mm.NewSignedMessage(alice, 3), - mm.NewSignedMessage(alice, 4), - mm.NewSignedMessage(alice, 5), - } - ts2 := cb.BuildOneOn(ts1, func(b *chain.BlockBuilder) { - b.AddMessages( - msgs, - []*types.UnsignedMessage{}, - ) - }) - - ts3 := cb.AppendOn(ts2, 3) - - // export the car file to a buffer - mustExportToBuffer(ctx, t, ts3, cb, &mockStateReader{}, carW) - - // import the car file from the buffer - importedKey := mustImportFromBuffer(ctx, t, bstore, carR) - assert.Equal(t, ts3.Key(), importedKey) - - // walk the blockstore and assert it had all blocks imported - validateBlockstoreImport(ctx, t, ts3.Key(), gene.Key(), bstore) -} - -func mustExportToBuffer(ctx context.Context, t *testing.T, head block.TipSet, cb *chain.Builder, msr *mockStateReader, carW *bufio.Writer) { - err := chain.Export(ctx, head, cb, cb, msr, carW) - assert.NoError(t, err) - require.NoError(t, carW.Flush()) -} - -func mustImportFromBuffer(ctx context.Context, t *testing.T, bstore blockstore.Blockstore, carR *bufio.Reader) block.TipSetKey { - importedKey, err := chain.Import(ctx, bstore, carR) - assert.NoError(t, err) - return importedKey -} - -func setupDeps(t *testing.T) (context.Context, block.TipSet, *chain.Builder, *bufio.Writer, *bufio.Reader, blockstore.Blockstore) { - // context for operations - ctx := context.Background() - - // chain builder and its genesis - cb := chain.NewBuilder(t, address.Undef) - gene := cb.NewGenesis() - // buffers to read and write the car file from - var buf bytes.Buffer - carW := bufio.NewWriter(&buf) - carR := bufio.NewReader(&buf) - - // a store to import the car file to and validate from. - mds := ds.NewMapDatastore() - bstore := blockstore.NewBlockstore(mds) - return ctx, gene, cb, carW, carR, bstore - -} - -func validateBlockstoreImport(ctx context.Context, t *testing.T, start, stop block.TipSetKey, bstore blockstore.Blockstore) { - as := cbor.NewCborStore(bstore) - - // walk the blockstore and assert it had all blocks imported - cur := start - for { - var parents block.TipSetKey - for _, c := range cur.ToSlice() { - bsBlk, err := bstore.Get(c) - assert.NoError(t, err) - blk, err := block.DecodeBlock(bsBlk.RawData()) - assert.NoError(t, err) - - txMetaBlk, err := bstore.Get(blk.Messages.Cid) - require.NoError(t, err) - var meta types.TxMeta - require.NoError(t, encoding.Decode(txMetaBlk.RawData(), &meta)) - - secpAMT, err := amt.LoadAMT(ctx, as, meta.SecpRoot.Cid) - require.NoError(t, err) - - var smsg types.SignedMessage - requireAMTDecoding(ctx, t, bstore, secpAMT, &smsg) - - blsAMT, err := amt.LoadAMT(ctx, as, meta.BLSRoot.Cid) - require.NoError(t, err) - - var umsg types.UnsignedMessage - requireAMTDecoding(ctx, t, bstore, blsAMT, &umsg) - - rectAMT, err := amt.LoadAMT(ctx, as, blk.MessageReceipts.Cid) - require.NoError(t, err) - - var rect vm.MessageReceipt - requireAMTDecoding(ctx, t, bstore, rectAMT, &rect) - - if parents.Len() == 0 { - parents = blk.Parents - } else { - assert.True(t, blk.Parents.Equals(parents), "malformed tipsets in imported chain") - } - } - if cur.Equals(stop) { - break - } - if cur.Equals(parents) { - t.Fatal("validate blockstore import is looping") - } - cur = parents - } -} - -func requireAMTDecoding(ctx context.Context, t *testing.T, bstore blockstore.Blockstore, root *amt.Root, dest interface{}) { - err := root.ForEach(ctx, func(_ uint64, d *typegen.Deferred) error { - var c e.Cid - if err := encoding.Decode(d.Raw, &c); err != nil { - return err - } - - b, err := bstore.Get(c.Cid) - if err != nil { - return err - } - return encoding.Decode(b.RawData(), dest) - }) - require.NoError(t, err) - -} - -type mockStateReader struct{} - -func (mr *mockStateReader) ChainStateTree(ctx context.Context, c cid.Cid) ([]format.Node, error) { - return nil, nil -} diff --git a/internal/pkg/chain/init.go b/internal/pkg/chain/init.go deleted file mode 100644 index 51cec7006b..0000000000 --- a/internal/pkg/chain/init.go +++ /dev/null @@ -1,53 +0,0 @@ -package chain - -import ( - "context" - "encoding/json" - - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/genesis" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -// Init initializes a DefaultSyncer in the given repo. -func Init(ctx context.Context, r repo.Repo, bs bstore.Blockstore, cst cbor.IpldStore, gen genesis.InitFunc) (*Store, error) { - // TODO the following should be wrapped in the chain.Store or a sub - // interface. - // Generate the genesis tipset. - genesis, err := gen(cst, bs) - if err != nil { - return nil, err - } - genTipSet, err := block.NewTipSet(genesis) - if err != nil { - return nil, errors.Wrap(err, "failed to generate genesis block") - } - chainStore := NewStore(r.ChainDatastore(), cst, NewStatusReporter(), genesis.Cid()) - - // Persist the genesis tipset to the repo. - genTsas := &TipSetMetadata{ - TipSet: genTipSet, - TipSetStateRoot: genesis.StateRoot.Cid, - TipSetReceipts: genesis.MessageReceipts.Cid, - } - if err = chainStore.PutTipSetMetadata(ctx, genTsas); err != nil { - return nil, errors.Wrap(err, "failed to put genesis block in chain store") - } - if err = chainStore.SetHead(ctx, genTipSet); err != nil { - return nil, errors.Wrap(err, "failed to persist genesis block in chain store") - } - // Persist the genesis cid to the repo. - val, err := json.Marshal(genesis.Cid()) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal genesis cid") - } - if err = r.Datastore().Put(GenesisKey, val); err != nil { - return nil, errors.Wrap(err, "failed to persist genesis cid") - } - - return chainStore, nil -} diff --git a/internal/pkg/chain/message_store.go b/internal/pkg/chain/message_store.go deleted file mode 100644 index 1531f60684..0000000000 --- a/internal/pkg/chain/message_store.go +++ /dev/null @@ -1,305 +0,0 @@ -package chain - -import ( - "context" - - "github.com/filecoin-project/go-amt-ipld/v2" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/pkg/errors" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -// MessageProvider is an interface exposing the load methods of the -// MessageStore. -type MessageProvider interface { - LoadMessages(context.Context, cid.Cid) ([]*types.SignedMessage, []*types.UnsignedMessage, error) - LoadReceipts(context.Context, cid.Cid) ([]vm.MessageReceipt, error) - LoadTxMeta(context.Context, cid.Cid) (types.TxMeta, error) -} - -// MessageWriter is an interface exposing the write methods of the -// MessageStore. -type MessageWriter interface { - StoreMessages(ctx context.Context, secpMessages []*types.SignedMessage, blsMessages []*types.UnsignedMessage) (cid.Cid, error) - StoreReceipts(context.Context, []vm.MessageReceipt) (cid.Cid, error) - StoreTxMeta(context.Context, types.TxMeta) (cid.Cid, error) -} - -// MessageStore stores and loads collections of signed messages and receipts. -type MessageStore struct { - bs blockstore.Blockstore -} - -// NewMessageStore creates and returns a new store -func NewMessageStore(bs blockstore.Blockstore) *MessageStore { - return &MessageStore{bs: bs} -} - -// LoadMessages loads the signed messages in the collection with cid c from ipld -// storage. -func (ms *MessageStore) LoadMessages(ctx context.Context, metaCid cid.Cid) ([]*types.SignedMessage, []*types.UnsignedMessage, error) { - // load txmeta - meta, err := ms.LoadTxMeta(ctx, metaCid) - if err != nil { - return nil, nil, err - } - - secpCids, err := ms.loadAMTCids(ctx, meta.SecpRoot.Cid) - if err != nil { - return nil, nil, err - } - - // load secp messages from cids - secpMsgs := make([]*types.SignedMessage, len(secpCids)) - for i, c := range secpCids { - messageBlock, err := ms.bs.Get(c) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to get secp message %s", c) - } - - message := &types.SignedMessage{} - if err := encoding.Decode(messageBlock.RawData(), message); err != nil { - return nil, nil, errors.Wrapf(err, "could not decode secp message %s", c) - } - secpMsgs[i] = message - } - - blsCids, err := ms.loadAMTCids(ctx, meta.BLSRoot.Cid) - if err != nil { - return nil, nil, err - } - - // load bls messages from cids - blsMsgs := make([]*types.UnsignedMessage, len(blsCids)) - for i, c := range blsCids { - messageBlock, err := ms.bs.Get(c) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to get bls message %s", c) - } - - message := &types.UnsignedMessage{} - if err := encoding.Decode(messageBlock.RawData(), message); err != nil { - return nil, nil, errors.Wrapf(err, "could not decode bls message %s", c) - } - blsMsgs[i] = message - } - - return secpMsgs, blsMsgs, nil -} - -// StoreMessages puts the input signed messages to a collection and then writes -// this collection to ipld storage. The cid of the collection is returned. -func (ms *MessageStore) StoreMessages(ctx context.Context, secpMessages []*types.SignedMessage, blsMessages []*types.UnsignedMessage) (cid.Cid, error) { - var ret types.TxMeta - var err error - - // store secp messages - secpCids, err := ms.storeSignedMessages(secpMessages) - if err != nil { - return cid.Undef, errors.Wrap(err, "could not store secp messages") - } - - secpRaw, err := ms.storeAMTCids(ctx, secpCids) - if err != nil { - return cid.Undef, errors.Wrap(err, "could not store secp cids as AMT") - } - ret.SecpRoot = e.NewCid(secpRaw) - - // store bls messages - blsCids, err := ms.storeUnsignedMessages(blsMessages) - if err != nil { - return cid.Undef, errors.Wrap(err, "could not store secp cids as AMT") - } - blsRaw, err := ms.storeAMTCids(ctx, blsCids) - if err != nil { - return cid.Undef, errors.Wrap(err, "could not store bls cids as AMT") - } - ret.BLSRoot = e.NewCid(blsRaw) - - return ms.StoreTxMeta(ctx, ret) -} - -// LoadReceipts loads the signed messages in the collection with cid c from ipld -// storage and returns the slice implied by the collection -func (ms *MessageStore) LoadReceipts(ctx context.Context, c cid.Cid) ([]vm.MessageReceipt, error) { - rawReceipts, err := ms.loadAMTRaw(ctx, c) - if err != nil { - return nil, err - } - - // load receipts from cids - receipts := make([]vm.MessageReceipt, len(rawReceipts)) - for i, raw := range rawReceipts { - receipt := vm.MessageReceipt{} - if err := encoding.Decode(raw, &receipt); err != nil { - return nil, errors.Wrapf(err, "could not decode receipt %s", c) - } - receipts[i] = receipt - } - - return receipts, nil -} - -// StoreReceipts puts the input signed messages to a collection and then writes -// this collection to ipld storage. The cid of the collection is returned. -func (ms *MessageStore) StoreReceipts(ctx context.Context, receipts []vm.MessageReceipt) (cid.Cid, error) { - // store secp messages - rawReceipts, err := ms.storeMessageReceipts(receipts) - if err != nil { - return cid.Undef, errors.Wrap(err, "could not store secp messages") - } - - return ms.storeAMTRaw(ctx, rawReceipts) -} - -func (ms *MessageStore) loadAMTCids(ctx context.Context, c cid.Cid) ([]cid.Cid, error) { - as := cborutil.NewIpldStore(ms.bs) - a, err := amt.LoadAMT(ctx, as, c) - if err != nil { - return []cid.Cid{}, err - } - - cids := make([]cid.Cid, a.Count) - for i := uint64(0); i < a.Count; i++ { - var c cid.Cid - if err := a.Get(ctx, i, &c); err != nil { - return nil, errors.Wrapf(err, "could not retrieve %d cid from AMT", i) - } - - cids[i] = c - } - - return cids, nil -} - -func (ms *MessageStore) loadAMTRaw(ctx context.Context, c cid.Cid) ([][]byte, error) { - as := cborutil.NewIpldStore(ms.bs) - a, err := amt.LoadAMT(ctx, as, c) - if err != nil { - return nil, err - } - - raws := make([][]byte, a.Count) - for i := uint64(0); i < a.Count; i++ { - var raw cbg.Deferred - if err := a.Get(ctx, i, &raw); err != nil { - return nil, errors.Wrapf(err, "could not retrieve %d bytes from AMT", i) - } - - raws[i] = raw.Raw - } - return raws, nil -} - -// LoadTxMeta loads the secproot, blsroot data from the message store -func (ms *MessageStore) LoadTxMeta(ctx context.Context, c cid.Cid) (types.TxMeta, error) { - metaBlock, err := ms.bs.Get(c) - if err != nil { - return types.TxMeta{}, errors.Wrapf(err, "failed to get tx meta %s", c) - } - - var meta types.TxMeta - if err := encoding.Decode(metaBlock.RawData(), &meta); err != nil { - return types.TxMeta{}, errors.Wrapf(err, "could not decode tx meta %s", c) - } - return meta, nil -} - -func (ms *MessageStore) storeUnsignedMessages(messages []*types.UnsignedMessage) ([]cid.Cid, error) { - cids := make([]cid.Cid, len(messages)) - var err error - for i, msg := range messages { - cids[i], _, err = ms.storeBlock(msg) - if err != nil { - return nil, err - } - } - return cids, nil -} - -func (ms *MessageStore) storeSignedMessages(messages []*types.SignedMessage) ([]cid.Cid, error) { - cids := make([]cid.Cid, len(messages)) - var err error - for i, msg := range messages { - cids[i], _, err = ms.storeBlock(msg) - if err != nil { - return nil, err - } - } - return cids, nil -} - -// StoreTxMeta writes the secproot, blsroot block to the message store -func (ms *MessageStore) StoreTxMeta(ctx context.Context, meta types.TxMeta) (cid.Cid, error) { - c, _, err := ms.storeBlock(meta) - return c, err -} - -func (ms *MessageStore) storeMessageReceipts(receipts []vm.MessageReceipt) ([][]byte, error) { - rawReceipts := make([][]byte, len(receipts)) - for i, rcpt := range receipts { - _, rcptBlock, err := ms.storeBlock(rcpt) - if err != nil { - return nil, err - } - rawReceipts[i] = rcptBlock.RawData() - } - return rawReceipts, nil -} - -func (ms *MessageStore) storeBlock(data interface{}) (cid.Cid, blocks.Block, error) { - sblk, err := makeBlock(data) - if err != nil { - return cid.Undef, nil, err - } - - if err := ms.bs.Put(sblk); err != nil { - return cid.Undef, nil, err - } - - return sblk.Cid(), sblk, nil -} - -func makeBlock(obj interface{}) (blocks.Block, error) { - data, err := encoding.Encode(obj) - if err != nil { - return nil, err - } - - c, err := constants.DefaultCidBuilder.Sum(data) - if err != nil { - return nil, err - } - - return blocks.NewBlockWithCid(data, c) -} - -func (ms *MessageStore) storeAMTRaw(ctx context.Context, bs [][]byte) (cid.Cid, error) { - as := cborutil.NewIpldStore(ms.bs) - - rawMarshallers := make([]cbg.CBORMarshaler, len(bs)) - for i, raw := range bs { - rawMarshallers[i] = &cbg.Deferred{Raw: raw} - } - return amt.FromArray(ctx, as, rawMarshallers) -} - -func (ms *MessageStore) storeAMTCids(ctx context.Context, cids []cid.Cid) (cid.Cid, error) { - as := cborutil.NewIpldStore(ms.bs) - - cidMarshallers := make([]cbg.CBORMarshaler, len(cids)) - for i, c := range cids { - cidMarshaller := cbg.CborCid(c) - cidMarshallers[i] = &cidMarshaller - } - return amt.FromArray(ctx, as, cidMarshallers) -} diff --git a/internal/pkg/chain/message_store_test.go b/internal/pkg/chain/message_store_test.go deleted file mode 100644 index 89ff326db2..0000000000 --- a/internal/pkg/chain/message_store_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package chain_test - -import ( - "context" - "testing" - - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -func TestMessageStoreMessagesHappy(t *testing.T) { - ctx := context.Background() - keys := types.MustGenerateKeyInfo(2, 42) - mm := vm.NewMessageMaker(t, keys) - - alice := mm.Addresses()[0] - bob := mm.Addresses()[1] - - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(bob, 0), - mm.NewSignedMessage(alice, 2), - mm.NewSignedMessage(alice, 3), - mm.NewSignedMessage(bob, 1), - mm.NewSignedMessage(alice, 4), - mm.NewSignedMessage(bob, 2), - } - - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - ms := chain.NewMessageStore(bs) - msgsCid, err := ms.StoreMessages(ctx, msgs, []*types.UnsignedMessage{}) - assert.NoError(t, err) - - rtMsgs, _, err := ms.LoadMessages(ctx, msgsCid) - assert.NoError(t, err) - - assert.Equal(t, msgs, rtMsgs) -} - -func TestMessageStoreReceiptsHappy(t *testing.T) { - ctx := context.Background() - mr := vm.NewReceiptMaker() - - receipts := []vm.MessageReceipt{ - mr.NewReceipt(), - mr.NewReceipt(), - mr.NewReceipt(), - } - - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - ms := chain.NewMessageStore(bs) - receiptCids, err := ms.StoreReceipts(ctx, receipts) - assert.NoError(t, err) - - rtReceipts, err := ms.LoadReceipts(ctx, receiptCids) - assert.NoError(t, err) - assert.Equal(t, receipts, rtReceipts) -} diff --git a/internal/pkg/chain/reorg.go b/internal/pkg/chain/reorg.go deleted file mode 100644 index 6fa133cacb..0000000000 --- a/internal/pkg/chain/reorg.go +++ /dev/null @@ -1,43 +0,0 @@ -package chain - -import ( - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" -) - -// IsReorg determines if choosing the end of the newChain as the new head -// would cause a "reorg" given the current head is at curHead. -// A reorg occurs when the old head is not a member of the new chain AND the -// old head is not a subset of the new head. -func IsReorg(old, new, commonAncestor block.TipSet) bool { - oldSortedSet := old.Key() - newSortedSet := new.Key() - - return !(&newSortedSet).ContainsAll(oldSortedSet) && !commonAncestor.Equals(old) -} - -// ReorgDiff returns the dropped and added block heights resulting from the -// reorg given the old and new heads and their common ancestor. -func ReorgDiff(old, new, commonAncestor block.TipSet) (abi.ChainEpoch, abi.ChainEpoch, error) { - hOld, err := old.Height() - if err != nil { - return 0, 0, err - } - - hNew, err := new.Height() - if err != nil { - return 0, 0, err - } - - hCommon, err := commonAncestor.Height() - if err != nil { - return 0, 0, err - } - - if hCommon > hOld || hCommon > hNew { - return 0, 0, errors.New("invalid common ancestor") - } - - return hOld - hCommon, hNew - hCommon, nil -} diff --git a/internal/pkg/chain/reorg_test.go b/internal/pkg/chain/reorg_test.go deleted file mode 100644 index ea4ab2a783..0000000000 --- a/internal/pkg/chain/reorg_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package chain_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestIsReorgFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - - // main chain has 3 blocks past CA, fork has 1 - old, new, common := getForkOldNewCommon(ctx, t, builder, 2, 3, 1) - assert.True(t, chain.IsReorg(old, new, common)) -} -func TestIsReorgPrefix(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - // Old head is a direct ancestor of new head - old, new, common := getForkOldNewCommon(ctx, t, builder, 2, 3, 0) - assert.False(t, chain.IsReorg(old, new, common)) -} - -func TestIsReorgSubset(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - old, new, common := getSubsetOldNewCommon(ctx, t, builder, 2) - assert.False(t, chain.IsReorg(old, new, common)) -} - -func TestReorgDiffFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - // main chain has 11 blocks past CA, fork has 10 - old, new, common := getForkOldNewCommon(ctx, t, builder, 10, 11, 10) - - dropped, added, err := chain.ReorgDiff(old, new, common) - assert.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(10), dropped) - assert.Equal(t, abi.ChainEpoch(11), added) -} - -func TestReorgDiffSubset(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - old, new, common := getSubsetOldNewCommon(ctx, t, builder, 10) - - dropped, added, err := chain.ReorgDiff(old, new, common) - assert.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(1), dropped) - assert.Equal(t, abi.ChainEpoch(1), added) -} - -// getForkOldNewCommon is a testing helper function that creates chain with the builder. -// The blockchain forks and the common ancestor block is 'a' (> 0) blocks after the genesis block. -// The main chain has an additional 'b' blocks, the fork has an additional 'c' blocks. -// This function returns the forked head, the main head and the common ancestor. -func getForkOldNewCommon(ctx context.Context, t *testing.T, builder *chain.Builder, a, b, c int) (block.TipSet, block.TipSet, block.TipSet) { - // Add "a" tipsets to the head of the chainStore. - commonHead := builder.AppendManyOn(a, block.UndefTipSet) - oldHead := commonHead - - if c > 0 { - oldHead = builder.AppendManyOn(c, commonHead) - } - newHead := builder.AppendManyOn(b, commonHead) - return oldHead, newHead, commonHead -} - -// getSubsetOldNewCommon is a testing helper function that creates and stores -// a blockchain in the chainStore. The blockchain has 'a' blocks after genesis -// and then a fork. The forked head has a single block and the main chain -// consists of this single block and another block together forming a tipset -// that is a superset of the forked head. -func getSubsetOldNewCommon(ctx context.Context, t *testing.T, builder *chain.Builder, a int) (block.TipSet, block.TipSet, block.TipSet) { - commonHead := builder.AppendManyBlocksOnBlocks(a) - block1 := builder.AppendBlockOnBlocks(commonHead) - block2 := builder.AppendBlockOnBlocks(commonHead) - - oldHead := block.RequireNewTipSet(t, block1) - superset := block.RequireNewTipSet(t, block1, block2) - return oldHead, superset, block.RequireNewTipSet(t, commonHead) -} diff --git a/internal/pkg/chain/sampler.go b/internal/pkg/chain/sampler.go deleted file mode 100644 index 4e657ec241..0000000000 --- a/internal/pkg/chain/sampler.go +++ /dev/null @@ -1,79 +0,0 @@ -package chain - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" -) - -// Creates a new sampler for the chain identified by `head`. -func NewRandomnessSamplerAtHead(reader TipSetProvider, genesisTicket block.Ticket, head block.TipSetKey) *RandomnessSamplerAtHead { - return &RandomnessSamplerAtHead{ - sampler: NewSampler(reader, genesisTicket), - head: head, - } -} - -// A sampler draws randomness seeds from the chain. -// -// This implementation doesn't do any caching: it traverses the chain each time. A cache that could be directly -// indexed by epoch could speed up repeated samples from the same chain. -type Sampler struct { - reader TipSetProvider - genesisTicket block.Ticket -} - -func NewSampler(reader TipSetProvider, genesisTicket block.Ticket) *Sampler { - return &Sampler{reader, genesisTicket} -} - -// Draws a ticket from the chain identified by `head` and the highest tipset with height <= `epoch`. -// If `head` is empty (as when processing the pre-genesis state or the genesis block), the seed derived from -// a fixed genesis ticket. -// Note that this may produce the same value for different, neighbouring epochs when the epoch references a round -// in which no blocks were produced (an empty tipset or "null block"). A caller desiring a unique see for each epoch -// should blend in some distinguishing value (such as the epoch itself) into a hash of this ticket. -func (s *Sampler) SampleTicket(ctx context.Context, head block.TipSetKey, epoch abi.ChainEpoch) (block.Ticket, error) { - var ticket block.Ticket - if !head.Empty() { - start, err := s.reader.GetTipSet(head) - if err != nil { - return block.Ticket{}, err - } - // Note: it is not an error to have epoch > start.Height(); in the case of a run of null blocks the - // sought-after height may be after the base (last non-empty) tipset. - // It's also not an error for the requested epoch to be negative. - - tip, err := FindTipsetAtEpoch(ctx, start, epoch, s.reader) - if err != nil { - return block.Ticket{}, err - } - ticket, err = tip.MinTicket() - if err != nil { - return block.Ticket{}, err - } - } else { - // Sampling for the genesis state or genesis tipset. - ticket = s.genesisTicket - } - - return ticket, nil -} - -///// A chain sampler with a specific head tipset key. ///// - -type RandomnessSamplerAtHead struct { - sampler *Sampler - head block.TipSetKey -} - -func (s *RandomnessSamplerAtHead) Sample(ctx context.Context, epoch abi.ChainEpoch) (crypto.RandomSeed, error) { - ticket, err := s.sampler.SampleTicket(ctx, s.head, epoch) - if err != nil { - return nil, err - } - return crypto.MakeRandomSeed(ticket.VRFProof) -} diff --git a/internal/pkg/chain/sampler_test.go b/internal/pkg/chain/sampler_test.go deleted file mode 100644 index 6953647384..0000000000 --- a/internal/pkg/chain/sampler_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package chain_test - -import ( - "context" - "strconv" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestSamplingChainRandomness(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - genesisTicket := block.Ticket{VRFProof: []byte{1, 2, 3, 4}} - - makeSample := func(sampleEpoch int) block.Ticket { - vrfProof := genesisTicket.VRFProof - if sampleEpoch >= 0 { - vrfProof = []byte(strconv.Itoa(sampleEpoch)) - } - return block.Ticket{ - VRFProof: vrfProof, - } - } - - t.Run("happy path", func(t *testing.T) { - builder, ch := makeChain(t, 21) - head := ch[0].Key() - sampler := chain.NewSampler(builder, genesisTicket) - - r, err := sampler.SampleTicket(ctx, head, abi.ChainEpoch(20)) - assert.NoError(t, err) - assert.Equal(t, makeSample(20), r) - - r, err = sampler.SampleTicket(ctx, head, abi.ChainEpoch(3)) - assert.NoError(t, err) - assert.Equal(t, makeSample(3), r) - - r, err = sampler.SampleTicket(ctx, head, abi.ChainEpoch(0)) - assert.NoError(t, err) - assert.Equal(t, makeSample(0), r) - }) - - t.Run("skips missing tipsets", func(t *testing.T) { - builder, ch := makeChain(t, 21) - head := ch[0].Key() - sampler := chain.NewSampler(builder, genesisTicket) - - // Sample height after the head falls back to the head. - headParent := ch[1].Key() - r, err := sampler.SampleTicket(ctx, headParent, abi.ChainEpoch(20)) - assert.NoError(t, err) - assert.Equal(t, makeSample(19), r) - - // Another way of the same thing, sample > head. - r, err = sampler.SampleTicket(ctx, head, abi.ChainEpoch(21)) - assert.NoError(t, err) - assert.Equal(t, makeSample(20), r) - - // Add new head so as to produce null blocks between 20 and 25 - // i.e.: 25 20 19 18 ... 0 - headAfterNulls := builder.BuildOneOn(ch[0], func(b *chain.BlockBuilder) { - b.IncHeight(4) - b.SetTicket([]byte(strconv.Itoa(25))) - }) - - // Sampling in the nulls falls back to the last non-null - r, err = sampler.SampleTicket(ctx, headAfterNulls.Key(), abi.ChainEpoch(24)) - assert.NoError(t, err) - assert.Equal(t, makeSample(20), r) - }) - - t.Run("genesis", func(t *testing.T) { - builder, ch := makeChain(t, 6) - head := ch[0].Key() - gen := (ch[len(ch)-1]).Key() - sampler := chain.NewSampler(builder, genesisTicket) - - // Sample genesis from longer chain. - r, err := sampler.SampleTicket(ctx, head, abi.ChainEpoch(0)) - assert.NoError(t, err) - assert.Equal(t, makeSample(0), r) - - // Sample before genesis from longer chain. - r, err = sampler.SampleTicket(ctx, head, abi.ChainEpoch(-1)) - assert.NoError(t, err) - assert.Equal(t, makeSample(0), r) - - // Sample genesis from genesis-only chain. - r, err = sampler.SampleTicket(ctx, gen, abi.ChainEpoch(0)) - assert.NoError(t, err) - assert.Equal(t, makeSample(0), r) - - // Sample before genesis from genesis-only chain. - r, err = sampler.SampleTicket(ctx, gen, abi.ChainEpoch(-1)) - assert.NoError(t, err) - assert.Equal(t, makeSample(0), r) - - // Sample empty chain. - r, err = sampler.SampleTicket(ctx, block.NewTipSetKey(), abi.ChainEpoch(0)) - assert.NoError(t, err) - assert.Equal(t, makeSample(-1), r) - r, err = sampler.SampleTicket(ctx, block.NewTipSetKey(), abi.ChainEpoch(-1)) - assert.NoError(t, err) - assert.Equal(t, makeSample(-1), r) - }) -} - -// Builds a chain of single-block tips, returned in descending height order. -// Each block's ticket is its stringified height (as bytes). -func makeChain(t *testing.T, length int) (*chain.Builder, []block.TipSet) { - b := chain.NewBuilder(t, address.Undef) - height := 0 - head := b.BuildManyOn(length, block.UndefTipSet, func(b *chain.BlockBuilder) { - b.SetTicket([]byte(strconv.Itoa(height))) - height++ - }) - return b, b.RequireTipSets(head.Key(), length) -} diff --git a/internal/pkg/chain/status.go b/internal/pkg/chain/status.go deleted file mode 100644 index 4a18a76881..0000000000 --- a/internal/pkg/chain/status.go +++ /dev/null @@ -1,169 +0,0 @@ -package chain - -import ( - "fmt" - "sync" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - logging "github.com/ipfs/go-log/v2" -) - -var logChainStatus = logging.Logger("chain/status") - -// Reporter defines an interface to updating and reporting the status of the blockchain. -type Reporter interface { - UpdateStatus(...StatusUpdates) - Status() Status -} - -// StatusReporter implements the Reporter interface. -type StatusReporter struct { - statusMu sync.Mutex - status *Status -} - -// UpdateStatus updates the status heald by StatusReporter. -func (sr *StatusReporter) UpdateStatus(update ...StatusUpdates) { - sr.statusMu.Lock() - defer sr.statusMu.Unlock() - for _, u := range update { - u(sr.status) - } - logChainStatus.Debugf("syncing status: %s", sr.status.String()) -} - -// Status returns a copy of the current status. -func (sr *StatusReporter) Status() Status { - return *sr.status -} - -// NewStatusReporter initializes a new StatusReporter. -func NewStatusReporter() *StatusReporter { - return &StatusReporter{ - status: newDefaultChainStatus(), - } -} - -// Status defines a structure used to represent the state of a chain store and syncer. -type Status struct { - // The heaviest TipSet that has been fully validated. - ValidatedHead block.TipSetKey - // The height of ValidatedHead. - ValidatedHeadHeight abi.ChainEpoch - - // They head of the chain currently being fetched/validated, or undef if none. - SyncingHead block.TipSetKey - // The height of SyncingHead. - SyncingHeight uint64 - // Whether SyncingTip is trusted as a head far away from the validated head. - SyncingTrusted bool - // Unix time at which syncing of chain at SyncingHead began, zero if valdation hasn't started. - SyncingStarted int64 - // Whether SyncingHead has been validated. - SyncingComplete bool - // Whether SyncingHead has been fetched. - SyncingFetchComplete bool - - // The key of the tipset currently being fetched - FetchingHead block.TipSetKey - // The height of FetchingHead - FetchingHeight uint64 -} - -// NewDefaultChainStatus returns a ChainStaus with the default empty values. -func newDefaultChainStatus() *Status { - return &Status{ - ValidatedHead: block.UndefTipSet.Key(), - ValidatedHeadHeight: 0, - SyncingHead: block.UndefTipSet.Key(), - SyncingHeight: 0, - SyncingTrusted: false, - SyncingStarted: 0, - SyncingComplete: true, - SyncingFetchComplete: true, - FetchingHead: block.UndefTipSet.Key(), - FetchingHeight: 0, - } -} - -// String returns the Status as a string -func (s Status) String() string { - return fmt.Sprintf("validatedHead=%s, validatedHeight=%d, syncingStarted=%d, syncingHead=%s, syncingHeight=%d, syncingTrusted=%t, syncingComplete=%t syncingFetchComplete=%t fetchingHead=%s, fetchingHeight=%d", - s.ValidatedHead, s.ValidatedHeadHeight, s.SyncingStarted, - s.SyncingHead, s.SyncingHeight, s.SyncingTrusted, s.SyncingComplete, s.SyncingFetchComplete, - s.FetchingHead, s.FetchingHeight) -} - -// StatusUpdates defines a type for ipdating syncer status. -type StatusUpdates func(*Status) - -// -// Validation Updates -// -func validateHead(u block.TipSetKey) StatusUpdates { - return func(s *Status) { - s.ValidatedHead = u - } -} - -func validateHeight(u abi.ChainEpoch) StatusUpdates { - return func(s *Status) { - s.ValidatedHeadHeight = u - } -} - -// -// Syncing Updates -// - -func syncHead(u block.TipSetKey) StatusUpdates { - return func(s *Status) { - s.SyncingHead = u - } -} - -func syncHeight(u uint64) StatusUpdates { - return func(s *Status) { - s.SyncingHeight = u - } -} - -func syncTrusted(u bool) StatusUpdates { - return func(s *Status) { - s.SyncingTrusted = u - } -} - -func syncingStarted(u int64) StatusUpdates { - return func(s *Status) { - s.SyncingStarted = u - } -} - -func syncComplete(u bool) StatusUpdates { - return func(s *Status) { - s.SyncingComplete = u - } -} - -func syncFetchComplete(u bool) StatusUpdates { - return func(s *Status) { - s.SyncingFetchComplete = u - } -} - -// -// Fetching Updates -// - -func fetchHead(u block.TipSetKey) StatusUpdates { - return func(s *Status) { - s.FetchingHead = u - } -} -func fetchHeight(u uint64) StatusUpdates { - return func(s *Status) { - s.FetchingHeight = u - } -} diff --git a/internal/pkg/chain/status_test.go b/internal/pkg/chain/status_test.go deleted file mode 100644 index 45aaca126a..0000000000 --- a/internal/pkg/chain/status_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package chain - -import ( - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/stretchr/testify/assert" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestStatus(t *testing.T) { - tf.UnitTest(t) - - sr := NewStatusReporter() - assert.Equal(t, *newDefaultChainStatus(), sr.Status()) - assert.Equal(t, newDefaultChainStatus().String(), sr.Status().String()) - - // single update - cidFn := types.NewCidForTestGetter() - t0 := block.NewTipSetKey(cidFn()) - sr.UpdateStatus(validateHead(t0)) - assert.Equal(t, t0, sr.Status().ValidatedHead) - - // multi update - t1 := block.NewTipSetKey(cidFn()) - t2 := block.NewTipSetKey(cidFn()) - t3 := block.NewTipSetKey(cidFn()) - expStatus := Status{ - ValidatedHead: t1, - ValidatedHeadHeight: 1, - SyncingHead: t2, - SyncingHeight: 456, - SyncingTrusted: true, - SyncingStarted: 123, - SyncingComplete: false, - SyncingFetchComplete: true, - FetchingHead: t3, - FetchingHeight: 789, - } - sr.UpdateStatus(validateHead(t1), validateHeight(1), syncingStarted(123), syncHead(t2), - syncHeight(456), syncTrusted(true), syncComplete(false), syncFetchComplete(true), - fetchHead(t3), fetchHeight(789)) - assert.Equal(t, expStatus, sr.Status()) -} diff --git a/internal/pkg/chain/store.go b/internal/pkg/chain/store.go deleted file mode 100644 index 8941a4fd77..0000000000 --- a/internal/pkg/chain/store.go +++ /dev/null @@ -1,428 +0,0 @@ -package chain - -import ( - "context" - "runtime/debug" - "sync" - - "github.com/cskr/pubsub" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "github.com/pkg/errors" - "go.opencensus.io/trace" - - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics/tracing" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// NewHeadTopic is the topic used to publish new heads. -const NewHeadTopic = "new-head" - -// GenesisKey is the key at which the genesis Cid is written in the datastore. -var GenesisKey = datastore.NewKey("/consensus/genesisCid") - -var logStore = logging.Logger("chain.store") - -// HeadKey is the key at which the head tipset cid's are written in the datastore. -var HeadKey = datastore.NewKey("/chain/heaviestTipSet") - -type ipldSource struct { - // cst is a store allowing access - // (un)marshalling and interop with go-ipld-hamt. - cborStore cbor.IpldStore -} - -type tsState struct { - StateRoot e.Cid - Reciepts e.Cid -} - -func newSource(cst cbor.IpldStore) *ipldSource { - return &ipldSource{ - cborStore: cst, - } -} - -// GetBlock retrieves a filecoin block by cid from the IPLD store. -func (source *ipldSource) GetBlock(ctx context.Context, c cid.Cid) (*block.Block, error) { - var block block.Block - - err := source.cborStore.Get(ctx, c, &block) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block %s", c.String()) - } - return &block, nil -} - -// Store is a generic implementation of the Store interface. -// It works(tm) for now. -type Store struct { - // ipldSource is a wrapper around ipld storage. It is used - // for reading filecoin block and state objects kept by the node. - stateAndBlockSource *ipldSource - - // ds is the datastore for the chain's private metadata which consists - // of the tipset key to state root cid mapping, and the heaviest tipset - // key. - ds repo.Datastore - - // genesis is the CID of the genesis block. - genesis cid.Cid - // head is the tipset at the head of the best known chain. - head block.TipSet - // Protects head and genesisCid. - mu sync.RWMutex - - // headEvents is a pubsub channel that publishes an event every time the head changes. - // We operate under the assumption that tipsets published to this channel - // will always be queued and delivered to subscribers in the order discovered. - // Successive published tipsets may be supersets of previously published tipsets. - // TODO: rename to notifications. Also, reconsider ordering assumption depending - // on decisions made around the FC node notification system. - // TODO: replace this with a synchronous event bus - // https://github.com/filecoin-project/go-filecoin/issues/2309 - headEvents *pubsub.PubSub - - // Tracks tipsets by height/parentset for use by expected consensus. - tipIndex *TipIndex - - // Reporter is used by the store to update the current status of the chain. - reporter Reporter -} - -// NewStore constructs a new default store. -func NewStore(ds repo.Datastore, cst cbor.IpldStore, sr Reporter, genesisCid cid.Cid) *Store { - return &Store{ - stateAndBlockSource: newSource(cst), - ds: ds, - headEvents: pubsub.New(12), - tipIndex: NewTipIndex(), - genesis: genesisCid, - reporter: sr, - } -} - -// Load rebuilds the Store's caches by traversing backwards from the -// most recent best head as stored in its datastore. Because Load uses a -// content addressed datastore it guarantees that parent blocks are correctly -// resolved from the datastore. Furthermore Load ensures that all tipsets -// references correctly have the same parent height, weight and parent set. -// However, Load DOES NOT validate state transitions, it assumes that the -// tipset were only Put to the Store after checking for valid transitions. -// -// Furthermore Load trusts that the Store's backing datastore correctly -// preserves the cids of the heaviest tipset under the "HeadKey" datastore key. -// If the HeadKey cids are tampered with and invalid blocks added to the datastore -// then Load could be tricked into loading an invalid chain. Load will error if the -// head does not link back to the expected genesis block, or the Store's -// datastore does not store a link in the chain. In case of error the caller -// should not consider the chain useable and propagate the error. -func (store *Store) Load(ctx context.Context) (err error) { - ctx, span := trace.StartSpan(ctx, "Store.Load") - defer tracing.AddErrorEndSpan(ctx, span, &err) - - // Clear the tipset index. - store.tipIndex = NewTipIndex() - - headTsKey, err := store.loadHead() - if err != nil { - return err - } - - headTs, err := LoadTipSetBlocks(ctx, store.stateAndBlockSource, headTsKey) - if err != nil { - return errors.Wrap(err, "error loading head tipset") - } - startHeight := headTs.At(0).Height - logStore.Infof("start loading chain at tipset: %s, height: %d", headTsKey.String(), startHeight) - // Ensure we only produce 10 log messages regardless of the chain height. - logStatusEvery := startHeight / 10 - - var genesii block.TipSet - // Provide tipsets directly from the block store, not from the tipset index which is - // being rebuilt by this traversal. - tipsetProvider := TipSetProviderFromBlocks(ctx, store.stateAndBlockSource) - for iterator := IterAncestors(ctx, tipsetProvider, headTs); !iterator.Complete(); err = iterator.Next() { - if err != nil { - return err - } - - height, err := iterator.Value().Height() - if err != nil { - return err - } - if logStatusEvery != 0 && (height%logStatusEvery) == 0 { - logStore.Infof("load tipset: %s, height: %v", iterator.Value().String(), height) - } - stateRoot, receipts, err := store.loadStateRootAndReceipts(iterator.Value()) - if err != nil { - return err - } - err = store.PutTipSetMetadata(ctx, &TipSetMetadata{ - TipSet: iterator.Value(), - TipSetStateRoot: stateRoot, - TipSetReceipts: receipts, - }) - if err != nil { - return err - } - - genesii = iterator.Value() - } - // Check genesis here. - if genesii.Len() != 1 { - return errors.Errorf("load terminated with tipset of %d blocks, expected genesis with exactly 1", genesii.Len()) - } - - loadCid := genesii.At(0).Cid() - if !loadCid.Equals(store.genesis) { - return errors.Errorf("expected genesis cid: %s, loaded genesis cid: %s", store.genesis, loadCid) - } - - logStore.Infof("finished loading %d tipsets from %s", startHeight, headTs.String()) - // Set actual head. - return store.SetHead(ctx, headTs) -} - -// loadHead loads the latest known head from disk. -func (store *Store) loadHead() (block.TipSetKey, error) { - var emptyCidSet block.TipSetKey - bb, err := store.ds.Get(HeadKey) - if err != nil { - return emptyCidSet, errors.Wrap(err, "failed to read HeadKey") - } - - var cids block.TipSetKey - err = encoding.Decode(bb, &cids) - if err != nil { - return emptyCidSet, errors.Wrap(err, "failed to cast headCids") - } - - return cids, nil -} - -func (store *Store) loadStateRootAndReceipts(ts block.TipSet) (cid.Cid, cid.Cid, error) { - h, err := ts.Height() - if err != nil { - return cid.Undef, cid.Undef, err - } - key := datastore.NewKey(makeKey(ts.String(), h)) - bb, err := store.ds.Get(key) - if err != nil { - return cid.Undef, cid.Undef, errors.Wrapf(err, "failed to read tipset key %s", ts.String()) - } - - var metadata tsState - err = encoding.Decode(bb, &metadata) - if err != nil { - return cid.Undef, cid.Undef, errors.Wrapf(err, "failed to decode tip set metadata %s", ts.String()) - } - - return metadata.StateRoot.Cid, metadata.Reciepts.Cid, nil -} - -// PutTipSetMetadata persists the blocks of a tipset and the tipset index. -func (store *Store) PutTipSetMetadata(ctx context.Context, tsm *TipSetMetadata) error { - // Update tipindex. - err := store.tipIndex.Put(tsm) - if err != nil { - return err - } - // Persist the state mapping. - if err = store.writeTipSetMetadata(tsm); err != nil { - return err - } - - return nil -} - -// GetTipSet returns the tipset identified by `key`. -func (store *Store) GetTipSet(key block.TipSetKey) (block.TipSet, error) { - return store.tipIndex.GetTipSet(key) -} - -// GetTipSetState returns the aggregate state of the tipset identified by `key`. -func (store *Store) GetTipSetState(ctx context.Context, key block.TipSetKey) (state.Tree, error) { - stateCid, err := store.tipIndex.GetTipSetStateRoot(key) - if err != nil { - return nil, err - } - return state.LoadState(ctx, store.stateAndBlockSource.cborStore, stateCid) -} - -// GetGenesisState returns the state tree at genesis to retrieve initialization parameters. -func (store *Store) GetGenesisState(ctx context.Context) (state.Tree, error) { - // retrieve genesis block - genesis, err := store.stateAndBlockSource.GetBlock(ctx, store.GenesisCid()) - if err != nil { - return nil, err - } - - // create state tree - return state.LoadState(ctx, store.stateAndBlockSource.cborStore, genesis.StateRoot.Cid) -} - -// GetGenesisBlock returns the genesis block held by the chain store. -func (store *Store) GetGenesisBlock(ctx context.Context) (*block.Block, error) { - return store.stateAndBlockSource.GetBlock(ctx, store.GenesisCid()) -} - -// GetTipSetStateRoot returns the aggregate state root CID of the tipset identified by `key`. -func (store *Store) GetTipSetStateRoot(key block.TipSetKey) (cid.Cid, error) { - return store.tipIndex.GetTipSetStateRoot(key) -} - -// GetTipSetReceiptsRoot returns the root CID of the message receipts for the tipset identified by `key`. -func (store *Store) GetTipSetReceiptsRoot(key block.TipSetKey) (cid.Cid, error) { - return store.tipIndex.GetTipSetReceiptsRoot(key) -} - -// HasTipSetAndState returns true iff the default store's tipindex is indexing -// the tipset identified by `key`. -func (store *Store) HasTipSetAndState(ctx context.Context, key block.TipSetKey) bool { - return store.tipIndex.Has(key) -} - -// GetTipSetAndStatesByParentsAndHeight returns the the tipsets and states tracked by -// the default store's tipIndex that have parents identified by `parentKey`. -func (store *Store) GetTipSetAndStatesByParentsAndHeight(parentKey block.TipSetKey, h abi.ChainEpoch) ([]*TipSetMetadata, error) { - return store.tipIndex.GetByParentsAndHeight(parentKey, h) -} - -// HasTipSetAndStatesWithParentsAndHeight returns true if the default store's tipindex -// contains any tipset identified by `parentKey`. -func (store *Store) HasTipSetAndStatesWithParentsAndHeight(parentKey block.TipSetKey, h abi.ChainEpoch) bool { - return store.tipIndex.HasByParentsAndHeight(parentKey, h) -} - -// HeadEvents returns a pubsub interface the pushes events each time the -// default store's head is reset. -func (store *Store) HeadEvents() *pubsub.PubSub { - return store.headEvents -} - -// SetHead sets the passed in tipset as the new head of this chain. -func (store *Store) SetHead(ctx context.Context, ts block.TipSet) error { - logStore.Debugf("SetHead %s", ts.String()) - - // Add logging to debug sporadic test failure. - if !ts.Defined() { - logStore.Errorf("publishing empty tipset") - logStore.Error(debug.Stack()) - } - - noop, err := store.setHeadPersistent(ctx, ts) - if err != nil { - return err - } - if noop { - // exit without sending head events if head was already set to ts - return nil - } - - h, err := ts.Height() - if err != nil { - return err - } - store.reporter.UpdateStatus(validateHead(ts.Key()), validateHeight(h)) - // Publish an event that we have a new head. - store.HeadEvents().Pub(ts, NewHeadTopic) - - return nil -} - -// ReadOnlyStateStore provides a read-only IPLD store for access to chain state. -func (store *Store) ReadOnlyStateStore() cborutil.ReadOnlyIpldStore { - return cborutil.ReadOnlyIpldStore{IpldStore: store.stateAndBlockSource.cborStore} -} - -func (store *Store) setHeadPersistent(ctx context.Context, ts block.TipSet) (bool, error) { - // setHeaadPersistent sets the head in memory and on disk if the head is not - // already set to ts. If it is already set to ts it skips this and returns true - store.mu.Lock() - defer store.mu.Unlock() - - // Ensure consistency by storing this new head on disk. - if errInner := store.writeHead(ctx, ts.Key()); errInner != nil { - return false, errors.Wrap(errInner, "failed to write new Head to datastore") - } - if ts.Equals(store.head) { - return true, nil - } - - store.head = ts - - return false, nil -} - -// writeHead writes the given cid set as head to disk. -func (store *Store) writeHead(ctx context.Context, cids block.TipSetKey) error { - logStore.Debugf("WriteHead %s", cids.String()) - val, err := encoding.Encode(cids) - if err != nil { - return err - } - - return store.ds.Put(HeadKey, val) -} - -// writeTipSetMetadata writes the tipset key and the state root id to the -// datastore. -func (store *Store) writeTipSetMetadata(tsm *TipSetMetadata) error { - if tsm.TipSetStateRoot == cid.Undef { - return errors.New("attempting to write state root cid.Undef") - } - - if tsm.TipSetReceipts == cid.Undef { - return errors.New("attempting to write receipts cid.Undef") - } - - metadata := tsState{ - StateRoot: e.NewCid(tsm.TipSetStateRoot), - Reciepts: e.NewCid(tsm.TipSetReceipts), - } - val, err := encoding.Encode(metadata) - if err != nil { - return err - } - - // datastore keeps key:stateRoot (k,v) pairs. - h, err := tsm.TipSet.Height() - if err != nil { - return err - } - key := datastore.NewKey(makeKey(tsm.TipSet.String(), h)) - return store.ds.Put(key, val) -} - -// GetHead returns the current head tipset cids. -func (store *Store) GetHead() block.TipSetKey { - store.mu.RLock() - defer store.mu.RUnlock() - - if !store.head.Defined() { - return block.TipSetKey{} - } - - return store.head.Key() -} - -// GenesisCid returns the genesis cid of the chain tracked by the default store. -func (store *Store) GenesisCid() cid.Cid { - store.mu.Lock() - defer store.mu.Unlock() - return store.genesis -} - -// Stop stops all activities and cleans up. -func (store *Store) Stop() { - store.headEvents.Shutdown() -} diff --git a/internal/pkg/chain/store_test.go b/internal/pkg/chain/store_test.go deleted file mode 100644 index 114d2b5122..0000000000 --- a/internal/pkg/chain/store_test.go +++ /dev/null @@ -1,436 +0,0 @@ -package chain_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// Default Chain diagram below. Note that blocks in the same tipset are in parentheses. -// -// genesis -> (link1blk1, link1blk2) -> (link2blk1, link2blk2, link2blk3) -> link3blk1 -> (null block) -> (null block) -> (link4blk1, link4blk2) - -// newChainStore creates a new chain store for tests. -func newChainStore(r repo.Repo, genCid cid.Cid) *chain.Store { - return chain.NewStore(r.Datastore(), cbor.NewMemCborStore(), chain.NewStatusReporter(), genCid) -} - -// requirePutTestChain puts the count tipsets preceding head in the source to -// the input chain store. -func requirePutTestChain(ctx context.Context, t *testing.T, chainStore *chain.Store, head block.TipSetKey, source *chain.Builder, count int) { - tss := source.RequireTipSets(head, count) - for _, ts := range tss { - tsas := &chain.TipSetMetadata{ - TipSet: ts, - TipSetStateRoot: ts.At(0).StateRoot.Cid, - TipSetReceipts: types.EmptyReceiptsCID, - } - require.NoError(t, chainStore.PutTipSetMetadata(ctx, tsas)) - } -} - -func requireGetTsasByParentAndHeight(t *testing.T, chain *chain.Store, pKey block.TipSetKey, h abi.ChainEpoch) []*chain.TipSetMetadata { - tsasSlice, err := chain.GetTipSetAndStatesByParentsAndHeight(pKey, h) - require.NoError(t, err) - return tsasSlice -} - -type HeadAndTipsetGetter interface { - GetHead() block.TipSetKey - GetTipSet(block.TipSetKey) (block.TipSet, error) -} - -func requirePutBlocksToCborStore(t *testing.T, cst cbor.IpldStore, blocks ...*block.Block) { - for _, block := range blocks { - _, err := cst.Put(context.Background(), block) - require.NoError(t, err) - } -} - -/* Putting and getting tipsets and states. */ - -// Adding tipsets to the store doesn't error. -func TestPutTipSet(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - cs := newChainStore(r, genTS.At(0).Cid()) - - genTsas := &chain.TipSetMetadata{ - TipSet: genTS, - TipSetStateRoot: genTS.At(0).StateRoot.Cid, - TipSetReceipts: types.EmptyReceiptsCID, - } - err := cs.PutTipSetMetadata(ctx, genTsas) - assert.NoError(t, err) -} - -// Tipsets can be retrieved by key (all block cids). -func TestGetByKey(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - cs := newChainStore(r, genTS.At(0).Cid()) - - // Construct test chain data - link1 := builder.AppendOn(genTS, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.BuildOn(link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) - - // Put the test chain to the store - requirePutTestChain(ctx, t, cs, link4.Key(), builder, 5) - - // Check that we can get all tipsets by key - gotGTS := requireGetTipSet(ctx, t, cs, genTS.Key()) - gotGTSSR := requireGetTipSetStateRoot(ctx, t, cs, genTS.Key()) - - got1TS := requireGetTipSet(ctx, t, cs, link1.Key()) - got1TSSR := requireGetTipSetStateRoot(ctx, t, cs, link1.Key()) - - got2TS := requireGetTipSet(ctx, t, cs, link2.Key()) - got2TSSR := requireGetTipSetStateRoot(ctx, t, cs, link2.Key()) - - got3TS := requireGetTipSet(ctx, t, cs, link3.Key()) - got3TSSR := requireGetTipSetStateRoot(ctx, t, cs, link3.Key()) - - got4TS := requireGetTipSet(ctx, t, cs, link4.Key()) - got4TSSR := requireGetTipSetStateRoot(ctx, t, cs, link4.Key()) - assert.Equal(t, genTS, gotGTS) - assert.Equal(t, link1, got1TS) - assert.Equal(t, link2, got2TS) - assert.Equal(t, link3, got3TS) - assert.Equal(t, link4, got4TS) - - assert.Equal(t, genTS.At(0).StateRoot.Cid, gotGTSSR) - assert.Equal(t, link1.At(0).StateRoot.Cid, got1TSSR) - assert.Equal(t, link2.At(0).StateRoot.Cid, got2TSSR) - assert.Equal(t, link3.At(0).StateRoot.Cid, got3TSSR) - assert.Equal(t, link4.At(0).StateRoot.Cid, got4TSSR) -} - -// Tipset state is loaded correctly -func TestGetTipSetState(t *testing.T) { - ctx := context.Background() - ds := repo.NewInMemoryRepo().ChainDatastore() - bs := bstore.NewBlockstore(ds) - cst := cborutil.NewIpldStore(bs) - - // setup testing state - fakeCode := types.CidFromString(t, "somecid") - balance := abi.NewTokenAmount(1000000) - testActor := actor.NewActor(fakeCode, balance, cid.Undef) - addr := vmaddr.NewForTestGetter()() - st1 := state.NewState(cst) - require.NoError(t, st1.SetActor(ctx, addr, testActor)) - root, err := st1.Commit(ctx) - require.NoError(t, err) - - // link testing state to test block - builder := chain.NewBuilder(t, address.Undef) - gen := builder.NewGenesis() - testTs := builder.BuildOneOn(gen, func(b *chain.BlockBuilder) { - b.SetStateRoot(root) - }) - - // setup chain store - store := chain.NewStore(ds, cst, chain.NewStatusReporter(), gen.At(0).Cid()) - - // add tipset and state to chain store - require.NoError(t, store.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ - TipSet: testTs, - TipSetStateRoot: root, - TipSetReceipts: types.EmptyReceiptsCID, - })) - - // verify output of GetTipSetState - st2, err := store.GetTipSetState(ctx, testTs.Key()) - assert.NoError(t, err) - for actRes := range st2.GetAllActors(ctx) { - assert.NoError(t, actRes.Error) - assert.Equal(t, addr, actRes.Key) - assert.Equal(t, fakeCode, actRes.Actor.Code.Cid) - assert.Equal(t, testActor.Head, actRes.Actor.Head) - assert.Equal(t, uint64(0), actRes.Actor.CallSeqNum) - assert.Equal(t, balance, actRes.Actor.Balance) - } -} - -// Tipsets can be retrieved by parent key (all block cids of parents). -func TestGetByParent(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - cs := newChainStore(r, genTS.At(0).Cid()) - - // Construct test chain data - link1 := builder.AppendOn(genTS, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.BuildOn(link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) - - // Put the test chain to the store - requirePutTestChain(ctx, t, cs, link4.Key(), builder, 5) - - gotG := requireGetTsasByParentAndHeight(t, cs, block.TipSetKey{}, 0) - got1 := requireGetTsasByParentAndHeight(t, cs, genTS.Key(), 1) - got2 := requireGetTsasByParentAndHeight(t, cs, link1.Key(), 2) - got3 := requireGetTsasByParentAndHeight(t, cs, link2.Key(), 3) - got4 := requireGetTsasByParentAndHeight(t, cs, link3.Key(), 6) // two null blocks in between 3 and 4! - - assert.Equal(t, genTS, gotG[0].TipSet) - assert.Equal(t, link1, got1[0].TipSet) - assert.Equal(t, link2, got2[0].TipSet) - assert.Equal(t, link3, got3[0].TipSet) - assert.Equal(t, link4, got4[0].TipSet) - - assert.Equal(t, genTS.At(0).StateRoot.Cid, gotG[0].TipSetStateRoot) - assert.Equal(t, link1.At(0).StateRoot.Cid, got1[0].TipSetStateRoot) - assert.Equal(t, link2.At(0).StateRoot.Cid, got2[0].TipSetStateRoot) - assert.Equal(t, link3.At(0).StateRoot.Cid, got3[0].TipSetStateRoot) - assert.Equal(t, link4.At(0).StateRoot.Cid, got4[0].TipSetStateRoot) -} - -func TestGetMultipleByParent(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - cs := newChainStore(r, genTS.At(0).Cid()) - - // Construct test chain data - link1 := builder.AppendOn(genTS, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.BuildOn(link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) - - // Put the test chain to the store - requirePutTestChain(ctx, t, cs, link4.Key(), builder, 5) - - // Add extra children to the genesis tipset - otherLink1 := builder.AppendOn(genTS, 1) - otherRoot1 := types.CidFromString(t, "otherState") - newChildTsas := &chain.TipSetMetadata{ - TipSet: otherLink1, - TipSetStateRoot: otherRoot1, - TipSetReceipts: types.EmptyReceiptsCID, - } - require.NoError(t, cs.PutTipSetMetadata(ctx, newChildTsas)) - gotNew1 := requireGetTsasByParentAndHeight(t, cs, genTS.Key(), 1) - require.Equal(t, 2, len(gotNew1)) - for _, tsas := range gotNew1 { - if tsas.TipSet.Len() == 1 { - assert.Equal(t, otherRoot1, tsas.TipSetStateRoot) - } else { - assert.Equal(t, link1.At(0).StateRoot.Cid, tsas.TipSetStateRoot) - } - } -} - -/* Head and its State is set and notified properly. */ - -// The constructor call sets the genesis cid for the chain store. -func TestSetGenesis(t *testing.T) { - tf.UnitTest(t) - - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - cs := newChainStore(r, genTS.At(0).Cid()) - - require.Equal(t, genTS.At(0).Cid(), cs.GenesisCid()) -} - -func assertSetHead(t *testing.T, chainStore *chain.Store, ts block.TipSet) { - ctx := context.Background() - err := chainStore.SetHead(ctx, ts) - assert.NoError(t, err) -} - -// Set and Get Head. -func TestHead(t *testing.T) { - tf.UnitTest(t) - - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - sr := chain.NewStatusReporter() - cs := chain.NewStore(r.Datastore(), cbor.NewMemCborStore(), sr, genTS.At(0).Cid()) - - // Construct test chain data - link1 := builder.AppendOn(genTS, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.BuildOn(link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) - - // Head starts as an empty cid set - assert.Equal(t, block.TipSetKey{}, cs.GetHead()) - - // Set Head - assertSetHead(t, cs, genTS) - assert.Equal(t, genTS.Key(), cs.GetHead()) - assert.Equal(t, genTS.Key(), sr.Status().ValidatedHead) - - // Move head forward - assertSetHead(t, cs, link4) - assert.Equal(t, link4.Key(), cs.GetHead()) - assert.Equal(t, link4.Key(), sr.Status().ValidatedHead) - - // Move head back - assertSetHead(t, cs, link1) - assert.Equal(t, link1.Key(), cs.GetHead()) - assert.Equal(t, link1.Key(), sr.Status().ValidatedHead) -} - -func assertEmptyCh(t *testing.T, ch <-chan interface{}) { - select { - case <-ch: - assert.True(t, false) - default: - } -} - -// Head events are propagated on HeadEvents. -func TestHeadEvents(t *testing.T) { - tf.UnitTest(t) - - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - r := repo.NewInMemoryRepo() - chainStore := newChainStore(r, genTS.At(0).Cid()) - - // Construct test chain data - link1 := builder.AppendOn(genTS, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.BuildOn(link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) - ps := chainStore.HeadEvents() - chA := ps.Sub(chain.NewHeadTopic) - chB := ps.Sub(chain.NewHeadTopic) - - assertSetHead(t, chainStore, genTS) - assertSetHead(t, chainStore, link1) - assertSetHead(t, chainStore, link2) - assertSetHead(t, chainStore, link3) - assertSetHead(t, chainStore, link4) - assertSetHead(t, chainStore, link3) - assertSetHead(t, chainStore, link2) - assertSetHead(t, chainStore, link1) - assertSetHead(t, chainStore, genTS) - heads := []block.TipSet{genTS, link1, link2, link3, link4, link3, link2, link1, genTS} - - // Heads arrive in the expected order - for i := 0; i < 9; i++ { - headA := <-chA - headB := <-chB - assert.Equal(t, headA, headB) - assert.Equal(t, headA, heads[i]) - } - - // No extra notifications - assertEmptyCh(t, chA) - assertEmptyCh(t, chB) -} - -/* Loading */ -// Load does not error and gives the chain store access to all blocks and -// tipset indexes along the heaviest chain. -func TestLoadAndReboot(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - genTS := builder.NewGenesis() - rPriv := repo.NewInMemoryRepo() - ds := rPriv.Datastore() - cst := cborutil.NewIpldStore(bstore.NewBlockstore(ds)) - - // Construct test chain data - link1 := builder.AppendOn(genTS, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.BuildOn(link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) - - // Add blocks to blockstore - requirePutBlocksToCborStore(t, cst, genTS.ToSlice()...) - requirePutBlocksToCborStore(t, cst, link1.ToSlice()...) - requirePutBlocksToCborStore(t, cst, link2.ToSlice()...) - requirePutBlocksToCborStore(t, cst, link3.ToSlice()...) - requirePutBlocksToCborStore(t, cst, link4.ToSlice()...) - - chainStore := chain.NewStore(ds, cst, chain.NewStatusReporter(), genTS.At(0).Cid()) - requirePutTestChain(ctx, t, chainStore, link4.Key(), builder, 5) - assertSetHead(t, chainStore, genTS) // set the genesis block - - assertSetHead(t, chainStore, link4) - chainStore.Stop() - - // rebuild chain with same datastore and cborstore - sr := chain.NewStatusReporter() - rebootChain := chain.NewStore(ds, cst, sr, genTS.At(0).Cid()) - err := rebootChain.Load(ctx) - assert.NoError(t, err) - assert.Equal(t, link4.Key(), sr.Status().ValidatedHead) - - // Check that chain store has index - // Get a tipset and state by key - got2 := requireGetTipSet(ctx, t, rebootChain, link2.Key()) - assert.Equal(t, link2, got2) - - // Get another by parent key - got4 := requireGetTsasByParentAndHeight(t, rebootChain, link3.Key(), 6) - assert.Equal(t, 1, len(got4)) - assert.Equal(t, link4, got4[0].TipSet) - - // Check the head - assert.Equal(t, link4.Key(), rebootChain.GetHead()) -} - -type tipSetGetter interface { - GetTipSet(block.TipSetKey) (block.TipSet, error) -} - -func requireGetTipSet(ctx context.Context, t *testing.T, chainStore tipSetGetter, key block.TipSetKey) block.TipSet { - ts, err := chainStore.GetTipSet(key) - require.NoError(t, err) - return ts -} - -type tipSetStateRootGetter interface { - GetTipSetStateRoot(tsKey block.TipSetKey) (cid.Cid, error) -} - -func requireGetTipSetStateRoot(ctx context.Context, t *testing.T, chainStore tipSetStateRootGetter, key block.TipSetKey) cid.Cid { - stateCid, err := chainStore.GetTipSetStateRoot(key) - require.NoError(t, err) - return stateCid -} diff --git a/internal/pkg/chain/testing.go b/internal/pkg/chain/testing.go deleted file mode 100644 index e9c0082f3e..0000000000 --- a/internal/pkg/chain/testing.go +++ /dev/null @@ -1,624 +0,0 @@ -package chain - -import ( - "context" - "encoding/binary" - "fmt" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - syncds "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// Builder builds fake chains and acts as a provider and fetcher for the chain thus generated. -// All blocks are unique (even if they share parents) and form valid chains of parents and heights, -// but do not carry valid tickets. Each block contributes a weight of 1. -// State root CIDs are computed by an abstract StateBuilder. The default FakeStateBuilder produces -// state CIDs that are distinct but not CIDs of any real state tree. A more sophisticated -// builder could actually apply the messages to a state tree (not yet implemented). -// The builder is deterministic: two builders receiving the same sequence of calls will produce -// exactly the same chain. -type Builder struct { - t *testing.T - minerAddress address.Address - stateBuilder StateBuilder - stamper TimeStamper - bs blockstore.Blockstore - cstore cbor.IpldStore - messages *MessageStore - seq uint64 // For unique tickets - - // Cache of the state root CID computed for each tipset key. - tipStateCids map[string]cid.Cid -} - -var _ BlockProvider = (*Builder)(nil) -var _ TipSetProvider = (*Builder)(nil) -var _ MessageProvider = (*Builder)(nil) - -// NewBuilder builds a new chain faker with default fake state building. -func NewBuilder(t *testing.T, miner address.Address) *Builder { - return NewBuilderWithDeps(t, miner, &FakeStateBuilder{}, &ZeroTimestamper{}) -} - -// NewBuilderWithDeps builds a new chain faker. -// Blocks will have `miner` set as the miner address, or a default if empty. -func NewBuilderWithDeps(t *testing.T, miner address.Address, sb StateBuilder, stamper TimeStamper) *Builder { - if miner.Empty() { - var err error - miner, err = address.NewSecp256k1Address([]byte("miner")) - require.NoError(t, err) - } - - bs := blockstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) - b := &Builder{ - t: t, - minerAddress: miner, - stateBuilder: sb, - stamper: stamper, - bs: bs, - cstore: cborutil.NewIpldStore(bs), - messages: NewMessageStore(bs), - tipStateCids: make(map[string]cid.Cid), - } - - ctx := context.TODO() - _, err := b.messages.StoreMessages(ctx, []*types.SignedMessage{}, []*types.UnsignedMessage{}) - require.NoError(t, err) - _, err = b.messages.StoreReceipts(ctx, []vm.MessageReceipt{}) - require.NoError(t, err) - - nullState := types.CidFromString(t, "null") - b.tipStateCids[block.NewTipSetKey().String()] = nullState - return b -} - -// NewGenesis creates and returns a tipset of one block with no parents. -func (f *Builder) NewGenesis() block.TipSet { - return block.RequireNewTipSet(f.t, f.AppendBlockOn(block.UndefTipSet)) -} - -// AppendBlockOnBlocks creates and returns a new block child of `parents`, with no messages. -func (f *Builder) AppendBlockOnBlocks(parents ...*block.Block) *block.Block { - tip := block.UndefTipSet - if len(parents) > 0 { - tip = block.RequireNewTipSet(f.t, parents...) - } - return f.AppendBlockOn(tip) -} - -// AppendBlockOn creates and returns a new block child of `parent`, with no messages. -func (f *Builder) AppendBlockOn(parent block.TipSet) *block.Block { - return f.Build(parent, 1, nil).At(0) -} - -// AppendOn creates and returns a new `width`-block tipset child of `parents`, with no messages. -func (f *Builder) AppendOn(parent block.TipSet, width int) block.TipSet { - return f.Build(parent, width, nil) -} - -// AppendManyBlocksOnBlocks appends `height` blocks to the chain. -func (f *Builder) AppendManyBlocksOnBlocks(height int, parents ...*block.Block) *block.Block { - tip := block.UndefTipSet - if len(parents) > 0 { - tip = block.RequireNewTipSet(f.t, parents...) - } - return f.BuildManyOn(height, tip, nil).At(0) -} - -// AppendManyBlocksOn appends `height` blocks to the chain. -func (f *Builder) AppendManyBlocksOn(height int, parent block.TipSet) *block.Block { - return f.BuildManyOn(height, parent, nil).At(0) -} - -// AppendManyOn appends `height` tipsets to the chain. -func (f *Builder) AppendManyOn(height int, parent block.TipSet) block.TipSet { - return f.BuildManyOn(height, parent, nil) -} - -// BuildOnBlock creates and returns a new block child of singleton tipset `parent`. See Build. -func (f *Builder) BuildOnBlock(parent *block.Block, build func(b *BlockBuilder)) *block.Block { - tip := block.UndefTipSet - if parent != nil { - tip = block.RequireNewTipSet(f.t, parent) - } - return f.BuildOneOn(tip, build).At(0) -} - -// BuildOneOn creates and returns a new single-block tipset child of `parent`. -func (f *Builder) BuildOneOn(parent block.TipSet, build func(b *BlockBuilder)) block.TipSet { - return f.Build(parent, 1, singleBuilder(build)) -} - -// BuildOn creates and returns a new `width` block tipset child of `parent`. -func (f *Builder) BuildOn(parent block.TipSet, width int, build func(b *BlockBuilder, i int)) block.TipSet { - return f.Build(parent, width, build) -} - -// BuildManyOn builds a chain by invoking Build `height` times. -func (f *Builder) BuildManyOn(height int, parent block.TipSet, build func(b *BlockBuilder)) block.TipSet { - require.True(f.t, height > 0, "") - for i := 0; i < height; i++ { - parent = f.Build(parent, 1, singleBuilder(build)) - } - return parent -} - -// Build creates and returns a new tipset child of `parent`. -// The tipset carries `width` > 0 blocks with the same height and parents, but different tickets. -// Note: the blocks will all have the same miner, which is unrealistic and forbidden by consensus; -// generalise this to random miner addresses when that is rejected by the syncer. -// The `build` function is invoked to modify the block before it is stored. -func (f *Builder) Build(parent block.TipSet, width int, build func(b *BlockBuilder, i int)) block.TipSet { - require.True(f.t, width > 0) - var blocks []*block.Block - - height := abi.ChainEpoch(0) - grandparentKey := block.NewTipSetKey() - if parent.Defined() { - var err error - height = parent.At(0).Height + 1 - grandparentKey, err = parent.Parents() - require.NoError(f.t, err) - } - - parentWeight, err := f.stateBuilder.Weigh(parent, f.StateForKey(grandparentKey)) - require.NoError(f.t, err) - - emptyBLSSig := crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: (*bls.Aggregate([]bls.Signature{}))[:], - } - for i := 0; i < width; i++ { - ticket := block.Ticket{} - ticket.VRFProof = make([]byte, binary.Size(f.seq)) - binary.BigEndian.PutUint64(ticket.VRFProof, f.seq) - f.seq++ - - b := &block.Block{ - Ticket: ticket, - Miner: f.minerAddress, - BeaconEntries: []*drand.Entry{}, - PoStProofs: []block.PoStProof{}, - ParentWeight: parentWeight, - Parents: parent.Key(), - Height: height, - Messages: e.NewCid(types.EmptyTxMetaCID), - MessageReceipts: e.NewCid(types.EmptyReceiptsCID), - BLSAggregateSig: &emptyBLSSig, - // Omitted fields below - //StateRoot: stateRoot, - //EPoStInfo: ePoStInfo, - //ForkSignaling: forkSig, - Timestamp: f.stamper.Stamp(height), - BlockSig: &crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte{}}, - } - - if build != nil { - build(&BlockBuilder{b, f.t, f.messages}, i) - } - - // Compute state root for this block. - ctx := context.Background() - prevState := f.StateForKey(parent.Key()) - smsgs, umsgs, err := f.messages.LoadMessages(ctx, b.Messages.Cid) - require.NoError(f.t, err) - stateRootRaw, _, err := f.stateBuilder.ComputeState(prevState, [][]*types.UnsignedMessage{umsgs}, [][]*types.SignedMessage{smsgs}) - require.NoError(f.t, err) - b.StateRoot = e.NewCid(stateRootRaw) - - // add block to cstore - _, err = f.cstore.Put(ctx, b) - require.NoError(f.t, err) - blocks = append(blocks, b) - } - tip := block.RequireNewTipSet(f.t, blocks...) - // Compute and remember state for the tipset. - f.tipStateCids[tip.Key().String()] = f.ComputeState(tip) - - return tip -} - -// StateForKey loads (or computes) the state root for a tipset key. -func (f *Builder) StateForKey(key block.TipSetKey) cid.Cid { - state, found := f.tipStateCids[key.String()] - if found { - return state - } - // No state yet computed for this tip (perhaps because the blocks in it have not previously - // been considered together as a tipset). - tip, err := f.GetTipSet(key) - require.NoError(f.t, err) - return f.ComputeState(tip) -} - -// GetBlockstoreValue gets data straight out of the underlying blockstore by cid -func (f *Builder) GetBlockstoreValue(ctx context.Context, c cid.Cid) (blocks.Block, error) { - return f.bs.Get(c) -} - -// ComputeState computes the state for a tipset from its parent state. -func (f *Builder) ComputeState(tip block.TipSet) cid.Cid { - parentKey, err := tip.Parents() - require.NoError(f.t, err) - // Load the state of the parent tipset and compute the required state (recursively). - prev := f.StateForKey(parentKey) - state, _, err := f.stateBuilder.ComputeState(prev, [][]*types.UnsignedMessage{}, f.tipMessages(tip)) - require.NoError(f.t, err) - return state -} - -// tipMessages returns the messages of a tipset. Each block's messages are -// grouped into a slice and a slice of these slices is returned. -func (f *Builder) tipMessages(tip block.TipSet) [][]*types.SignedMessage { - ctx := context.Background() - var msgs [][]*types.SignedMessage - for i := 0; i < tip.Len(); i++ { - smsgs, _, err := f.messages.LoadMessages(ctx, tip.At(i).Messages.Cid) - require.NoError(f.t, err) - msgs = append(msgs, smsgs) - } - return msgs -} - -// Wraps a simple build function in one that also accepts an index, propagating a nil function. -func singleBuilder(build func(b *BlockBuilder)) func(b *BlockBuilder, i int) { - if build == nil { - return nil - } - return func(b *BlockBuilder, i int) { build(b) } -} - -///// Block builder ///// - -// BlockBuilder mutates blocks as they are generated. -type BlockBuilder struct { - block *block.Block - t *testing.T - messages *MessageStore -} - -// SetTicket sets the block's ticket. -func (bb *BlockBuilder) SetTicket(raw []byte) { - bb.block.Ticket = block.Ticket{VRFProof: crypto.VRFPi(raw)} -} - -// SetTimestamp sets the block's timestamp. -func (bb *BlockBuilder) SetTimestamp(timestamp uint64) { - bb.block.Timestamp = timestamp -} - -// IncHeight increments the block's height, implying a number of null blocks before this one -// is mined. -func (bb *BlockBuilder) IncHeight(nullBlocks abi.ChainEpoch) { - bb.block.Height += nullBlocks -} - -// AddMessages adds a message & receipt collection to the block. -func (bb *BlockBuilder) AddMessages(secpmsgs []*types.SignedMessage, blsMsgs []*types.UnsignedMessage) { - ctx := context.Background() - - meta, err := bb.messages.StoreMessages(ctx, secpmsgs, blsMsgs) - require.NoError(bb.t, err) - - bb.block.Messages = e.NewCid(meta) -} - -// SetStateRoot sets the block's state root. -func (bb *BlockBuilder) SetStateRoot(root cid.Cid) { - bb.block.StateRoot = e.NewCid(root) -} - -///// State builder ///// - -// StateBuilder abstracts the computation of state root CIDs from the chain builder. -type StateBuilder interface { - ComputeState(prev cid.Cid, blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage) (cid.Cid, []vm.MessageReceipt, error) - Weigh(tip block.TipSet, state cid.Cid) (fbig.Int, error) -} - -// FakeStateBuilder computes a fake state CID by hashing the CIDs of a block's parents and messages. -type FakeStateBuilder struct { -} - -// ComputeState computes a fake state from a previous state root CID and the messages contained -// in list-of-lists of messages in blocks. Note that if there are no messages, the resulting state -// is the same as the input state. -// This differs from the true state transition function in that messages that are duplicated -// between blocks in the tipset are not ignored. -func (FakeStateBuilder) ComputeState(prev cid.Cid, blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage) (cid.Cid, []vm.MessageReceipt, error) { - receipts := []vm.MessageReceipt{} - - // Accumulate the cids of the previous state and of all messages in the tipset. - inputs := []cid.Cid{prev} - for _, blockMessages := range blsMessages { - for _, msg := range blockMessages { - mCId, err := msg.Cid() - if err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - inputs = append(inputs, mCId) - receipts = append(receipts, vm.MessageReceipt{ - ExitCode: 0, - ReturnValue: mCId.Bytes(), - GasUsed: gas.NewGas(3), - }) - } - } - for _, blockMessages := range secpMessages { - for _, msg := range blockMessages { - mCId, err := msg.Cid() - if err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - inputs = append(inputs, mCId) - receipts = append(receipts, vm.MessageReceipt{ - ExitCode: 0, - ReturnValue: mCId.Bytes(), - GasUsed: gas.NewGas(3), - }) - } - } - - if len(inputs) == 1 { - // If there are no messages, the state doesn't change! - return prev, receipts, nil - } - - root, err := makeCid(inputs) - if err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - return root, receipts, nil -} - -// Weigh computes a tipset's weight as its parent weight plus one for each block in the tipset. -func (FakeStateBuilder) Weigh(tip block.TipSet, state cid.Cid) (fbig.Int, error) { - parentWeight := fbig.Zero() - if tip.Defined() { - var err error - parentWeight, err = tip.ParentWeight() - if err != nil { - return fbig.Zero(), err - } - } - - return fbig.Add(parentWeight, fbig.NewInt(int64(tip.Len()))), nil -} - -///// Timestamper ///// - -// TimeStamper is an object that timestamps blocks -type TimeStamper interface { - Stamp(abi.ChainEpoch) uint64 -} - -// ZeroTimestamper writes a default of 0 to the timestamp -type ZeroTimestamper struct{} - -// Stamp returns a stamp for the current block -func (zt *ZeroTimestamper) Stamp(height abi.ChainEpoch) uint64 { - return uint64(0) -} - -// ClockTimestamper writes timestamps based on a blocktime and genesis time -type ClockTimestamper struct { - c clock.ChainEpochClock -} - -// NewClockTimestamper makes a new stamper for creating production valid timestamps -func NewClockTimestamper(chainClock clock.ChainEpochClock) *ClockTimestamper { - return &ClockTimestamper{ - c: chainClock, - } -} - -// Stamp assigns a valid timestamp given genesis time and block time to -// a block of the provided height. -func (ct *ClockTimestamper) Stamp(height abi.ChainEpoch) uint64 { - startTime := ct.c.StartTimeOfEpoch(height) - - return uint64(startTime.Unix()) -} - -///// State evaluator ///// - -// FakeStateEvaluator is a syncStateEvaluator that delegates to the FakeStateBuilder. -type FakeStateEvaluator struct { - FakeStateBuilder -} - -// RunStateTransition delegates to StateBuilder.ComputeState. -func (e *FakeStateEvaluator) RunStateTransition(ctx context.Context, tip block.TipSet, blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage, parentWeight fbig.Int, stateID cid.Cid, receiptCid cid.Cid) (cid.Cid, []vm.MessageReceipt, error) { - return e.ComputeState(stateID, blsMessages, secpMessages) -} - -// ValidateHeaderSemantic is a stub that always returns no error -func (e *FakeStateEvaluator) ValidateHeaderSemantic(_ context.Context, _ *block.Block, _ block.TipSet) error { - return nil -} - -// ValidateHeaderSemantic is a stub that always returns no error -func (e *FakeStateEvaluator) ValidateMessagesSemantic(_ context.Context, _ *block.Block, _ block.TipSetKey) error { - return nil -} - -///// Chain selector ///// - -// FakeChainSelector is a syncChainSelector that delegates to the FakeStateBuilder -type FakeChainSelector struct { - FakeStateBuilder -} - -// IsHeavier compares chains weighed with StateBuilder.Weigh. -func (e *FakeChainSelector) IsHeavier(ctx context.Context, a, b block.TipSet, aStateID, bStateID cid.Cid) (bool, error) { - aw, err := e.Weigh(a, aStateID) - if err != nil { - return false, err - } - bw, err := e.Weigh(b, bStateID) - if err != nil { - return false, err - } - return aw.GreaterThan(bw), nil -} - -// Weight delegates to the statebuilder -func (e *FakeChainSelector) Weight(ctx context.Context, ts block.TipSet, stID cid.Cid) (fbig.Int, error) { - return e.Weigh(ts, stID) -} - -///// Interface and accessor implementations ///// - -// GetBlock returns the block identified by `c`. -func (f *Builder) GetBlock(ctx context.Context, c cid.Cid) (*block.Block, error) { - var block block.Block - if err := f.cstore.Get(ctx, c, &block); err != nil { - return nil, err - } - return &block, nil -} - -// GetBlocks returns the blocks identified by `cids`. -func (f *Builder) GetBlocks(ctx context.Context, cids []cid.Cid) ([]*block.Block, error) { - ret := make([]*block.Block, len(cids)) - for i, c := range cids { - var block block.Block - if err := f.cstore.Get(ctx, c, &block); err != nil { - return nil, err - } - ret[i] = &block - } - return ret, nil -} - -// GetTipSet returns the tipset identified by `key`. -func (f *Builder) GetTipSet(key block.TipSetKey) (block.TipSet, error) { - ctx := context.Background() - var blocks []*block.Block - for it := key.Iter(); !it.Complete(); it.Next() { - var blk block.Block - if err := f.cstore.Get(ctx, it.Value(), &blk); err != nil { - return block.UndefTipSet, fmt.Errorf("no block %s", it.Value()) - } - blocks = append(blocks, &blk) - } - return block.NewTipSet(blocks...) -} - -// FetchTipSets fetchs the tipset at `tsKey` from the fetchers blockStore backed by the Builder. -func (f *Builder) FetchTipSets(ctx context.Context, key block.TipSetKey, from peer.ID, done func(t block.TipSet) (bool, error)) ([]block.TipSet, error) { - var tips []block.TipSet - for { - tip, err := f.GetTipSet(key) - if err != nil { - return nil, err - } - tips = append(tips, tip) - ok, err := done(tip) - if err != nil { - return nil, err - } - if ok { - break - } - key, err = tip.Parents() - if err != nil { - return nil, err - } - } - return tips, nil -} - -// FetchTipSetHeaders fetchs the tipset at `tsKey` from the fetchers blockStore backed by the Builder. -func (f *Builder) FetchTipSetHeaders(ctx context.Context, key block.TipSetKey, from peer.ID, done func(t block.TipSet) (bool, error)) ([]block.TipSet, error) { - return f.FetchTipSets(ctx, key, from, done) -} - -// GetTipSetStateRoot returns the state root that was computed for a tipset. -func (f *Builder) GetTipSetStateRoot(key block.TipSetKey) (cid.Cid, error) { - found, ok := f.tipStateCids[key.String()] - if !ok { - return cid.Undef, errors.Errorf("no state for %s", key) - } - return found, nil -} - -// RequireTipSet returns a tipset by key, which must exist. -func (f *Builder) RequireTipSet(key block.TipSetKey) block.TipSet { - tip, err := f.GetTipSet(key) - require.NoError(f.t, err) - return tip -} - -// RequireTipSets returns a chain of tipsets from key, which must exist and be long enough. -func (f *Builder) RequireTipSets(head block.TipSetKey, count int) []block.TipSet { - var tips []block.TipSet - var err error - for i := 0; i < count; i++ { - tip := f.RequireTipSet(head) - tips = append(tips, tip) - head, err = tip.Parents() - require.NoError(f.t, err) - } - return tips -} - -// LoadMessages returns the message collections tracked by the builder. -func (f *Builder) LoadMessages(ctx context.Context, metaCid cid.Cid) ([]*types.SignedMessage, []*types.UnsignedMessage, error) { - return f.messages.LoadMessages(ctx, metaCid) -} - -// LoadReceipts returns the message collections tracked by the builder. -func (f *Builder) LoadReceipts(ctx context.Context, c cid.Cid) ([]vm.MessageReceipt, error) { - return f.messages.LoadReceipts(ctx, c) -} - -// LoadTxMeta returns the tx meta wrapper tracked by the builder. -func (f *Builder) LoadTxMeta(ctx context.Context, metaCid cid.Cid) (types.TxMeta, error) { - return f.messages.LoadTxMeta(ctx, metaCid) -} - -// StoreReceipts stores message receipts and returns a commitment. -func (f *Builder) StoreReceipts(ctx context.Context, receipts []vm.MessageReceipt) (cid.Cid, error) { - return f.messages.StoreReceipts(ctx, receipts) -} - -// StoreTxMeta stores a tx meta -func (f *Builder) StoreTxMeta(ctx context.Context, meta types.TxMeta) (cid.Cid, error) { - return f.messages.StoreTxMeta(ctx, meta) -} - -///// Internals ///// - -func makeCid(i interface{}) (cid.Cid, error) { - bytes, err := encoding.Encode(i) - if err != nil { - return cid.Undef, err - } - return constants.DefaultCidBuilder.Sum(bytes) -} diff --git a/internal/pkg/chain/tip_index.go b/internal/pkg/chain/tip_index.go deleted file mode 100644 index 6ee88ca745..0000000000 --- a/internal/pkg/chain/tip_index.go +++ /dev/null @@ -1,162 +0,0 @@ -package chain - -import ( - "fmt" - "sync" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/pkg/errors" -) - -var ( - // ErrNotFound is returned when the key for a "Get" lookup is not in the index. - ErrNotFound = errors.New("Key not found in tipindex") -) - -// TipSetMetadata is the type stored at the leaves of the TipIndex. It contains -// a tipset pointing to blocks, the root cid of the chain's state after -// applying the messages in this tipset to it's parent state, and the cid of the receipts -// for these messages. -type TipSetMetadata struct { - // TipSetStateRoot is the root of aggregate state after applying tipset - TipSetStateRoot cid.Cid - - // TipSet is the set of blocks that forms the tip set - TipSet block.TipSet - - // TipSetReceipts receipts from all message contained within this tipset - TipSetReceipts cid.Cid -} - -type tsmByTipSetID map[string]*TipSetMetadata - -// TipIndex tracks tipsets and their states by tipset block ids and parent -// block ids. All methods are threadsafe as shared data is guarded by a -// mutex. -type TipIndex struct { - mu sync.Mutex - // tsasByParents allows lookup of all TipSetAndStates with the same parent IDs. - tsasByParentsAndHeight map[string]tsmByTipSetID - // tsasByID allows lookup of recorded TipSetAndStates by TipSet ID. - tsasByID tsmByTipSetID -} - -// NewTipIndex is the TipIndex constructor. -func NewTipIndex() *TipIndex { - return &TipIndex{ - tsasByParentsAndHeight: make(map[string]tsmByTipSetID), - tsasByID: make(map[string]*TipSetMetadata), - } -} - -// Put adds an entry to both of TipIndex's internal indexes. -// After this call the input TipSetMetadata can be looked up by the ID of -// the tipset, or the tipset's parent. -func (ti *TipIndex) Put(tsas *TipSetMetadata) error { - ti.mu.Lock() - defer ti.mu.Unlock() - tsKey := tsas.TipSet.String() - // Update tsasByID - ti.tsasByID[tsKey] = tsas - - // Update tsasByParents - pSet, err := tsas.TipSet.Parents() - if err != nil { - return err - } - pKey := pSet.String() - h, err := tsas.TipSet.Height() - if err != nil { - return err - } - key := makeKey(pKey, h) - tsasByID, ok := ti.tsasByParentsAndHeight[key] - if !ok { - tsasByID = make(map[string]*TipSetMetadata) - ti.tsasByParentsAndHeight[key] = tsasByID - } - tsasByID[tsKey] = tsas - return nil -} - -// Get returns the tipset given by the input ID and its state. -func (ti *TipIndex) Get(tsKey block.TipSetKey) (*TipSetMetadata, error) { - ti.mu.Lock() - defer ti.mu.Unlock() - tsas, ok := ti.tsasByID[tsKey.String()] - if !ok { - return nil, ErrNotFound - } - return tsas, nil -} - -// GetTipSet returns the tipset from func (ti *TipIndex) Get(tsKey string) -func (ti *TipIndex) GetTipSet(tsKey block.TipSetKey) (block.TipSet, error) { - tsas, err := ti.Get(tsKey) - if err != nil { - return block.UndefTipSet, err - } - return tsas.TipSet, nil -} - -// GetTipSetStateRoot returns the tipsetStateRoot from func (ti *TipIndex) Get(tsKey string). -func (ti *TipIndex) GetTipSetStateRoot(tsKey block.TipSetKey) (cid.Cid, error) { - tsas, err := ti.Get(tsKey) - if err != nil { - return cid.Cid{}, err - } - return tsas.TipSetStateRoot, nil -} - -// GetTipSetReceiptsRoot returns the tipsetReceipts from func (ti *TipIndex) Get(tsKey string). -func (ti *TipIndex) GetTipSetReceiptsRoot(tsKey block.TipSetKey) (cid.Cid, error) { - tsas, err := ti.Get(tsKey) - if err != nil { - return cid.Cid{}, err - } - return tsas.TipSetReceipts, nil -} - -// Has returns true iff the tipset with the input ID is stored in -// the TipIndex. -func (ti *TipIndex) Has(tsKey block.TipSetKey) bool { - ti.mu.Lock() - defer ti.mu.Unlock() - _, ok := ti.tsasByID[tsKey.String()] - return ok -} - -// GetByParentsAndHeight returns the all tipsets and states stored in the TipIndex -// such that the parent ID of these tipsets equals the input. -func (ti *TipIndex) GetByParentsAndHeight(pKey block.TipSetKey, h abi.ChainEpoch) ([]*TipSetMetadata, error) { - key := makeKey(pKey.String(), h) - ti.mu.Lock() - defer ti.mu.Unlock() - tsasByID, ok := ti.tsasByParentsAndHeight[key] - if !ok { - return nil, ErrNotFound - } - var ret []*TipSetMetadata - for _, tsas := range tsasByID { - ret = append(ret, tsas) - } - return ret, nil -} - -// HasByParentsAndHeight returns true iff there exist tipsets, and states, -// tracked in the TipIndex such that the parent ID of these tipsets equals the -// input. -func (ti *TipIndex) HasByParentsAndHeight(pKey block.TipSetKey, h abi.ChainEpoch) bool { - key := makeKey(pKey.String(), h) - ti.mu.Lock() - defer ti.mu.Unlock() - _, ok := ti.tsasByParentsAndHeight[key] - return ok -} - -// makeKey returns a unique string for every parent set key and height input -func makeKey(pKey string, h abi.ChainEpoch) string { - return fmt.Sprintf("p-%s h-%d", pKey, h) -} diff --git a/internal/pkg/chain/traversal.go b/internal/pkg/chain/traversal.go deleted file mode 100644 index d170f0a5d9..0000000000 --- a/internal/pkg/chain/traversal.go +++ /dev/null @@ -1,230 +0,0 @@ -package chain - -import ( - "context" - "errors" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" -) - -// TipSetProvider provides tipsets for traversal. -type TipSetProvider interface { - GetTipSet(tsKey block.TipSetKey) (block.TipSet, error) -} - -// IterAncestors returns an iterator over tipset ancestors, yielding first the start tipset and -// then its parent tipsets until (and including) the genesis tipset. -func IterAncestors(ctx context.Context, store TipSetProvider, start block.TipSet) *TipsetIterator { - return &TipsetIterator{ctx, store, start} -} - -// TipsetIterator is an iterator over tipsets. -type TipsetIterator struct { - ctx context.Context - store TipSetProvider - value block.TipSet -} - -// Value returns the iterator's current value, if not Complete(). -func (it *TipsetIterator) Value() block.TipSet { - return it.value -} - -// Complete tests whether the iterator is exhausted. -func (it *TipsetIterator) Complete() bool { - return !it.value.Defined() -} - -// Next advances the iterator to the next value. -func (it *TipsetIterator) Next() error { - select { - case <-it.ctx.Done(): - return it.ctx.Err() - default: - parentKey, err := it.value.Parents() - // Parents is empty (without error) for the genesis tipset. - if err != nil || parentKey.Len() == 0 { - it.value = block.UndefTipSet - } else { - it.value, err = it.store.GetTipSet(parentKey) - } - return err - } -} - -// BlockProvider provides blocks. -type BlockProvider interface { - GetBlock(ctx context.Context, cid cid.Cid) (*block.Block, error) -} - -// LoadTipSetBlocks loads all the blocks for a tipset from the store. -func LoadTipSetBlocks(ctx context.Context, store BlockProvider, key block.TipSetKey) (block.TipSet, error) { - var blocks []*block.Block - for it := key.Iter(); !it.Complete(); it.Next() { - blk, err := store.GetBlock(ctx, it.Value()) - if err != nil { - return block.UndefTipSet, err - } - blocks = append(blocks, blk) - } - return block.NewTipSet(blocks...) -} - -type tipsetFromBlockProvider struct { - ctx context.Context // Context to use when loading blocks - blocks BlockProvider // Provides blocks -} - -// TipSetProviderFromBlocks builds a tipset provider backed by a block provider. -// Blocks will be loaded with the provided context, since GetTipSet does not accept a -// context parameter. This can and should be removed when GetTipSet does take a context. -func TipSetProviderFromBlocks(ctx context.Context, blocks BlockProvider) TipSetProvider { - return &tipsetFromBlockProvider{ctx, blocks} -} - -// GetTipSet loads the blocks for a tipset. -func (p *tipsetFromBlockProvider) GetTipSet(tsKey block.TipSetKey) (block.TipSet, error) { - return LoadTipSetBlocks(p.ctx, p.blocks, tsKey) -} - -// CollectTipsToCommonAncestor traverses chains from two tipsets (called old and new) until their common -// ancestor, collecting all tipsets that are in one chain but not the other. -// The resulting lists of tipsets are ordered by decreasing height. -func CollectTipsToCommonAncestor(ctx context.Context, store TipSetProvider, oldHead, newHead block.TipSet) (oldTips, newTips []block.TipSet, err error) { - oldIter := IterAncestors(ctx, store, oldHead) - newIter := IterAncestors(ctx, store, newHead) - - commonAncestor, err := FindCommonAncestor(oldIter, newIter) - if err != nil { - return - } - commonHeight, err := commonAncestor.Height() - if err != nil { - return - } - - // Refresh iterators modified by FindCommonAncestors - oldIter = IterAncestors(ctx, store, oldHead) - newIter = IterAncestors(ctx, store, newHead) - - // Add 1 to the height argument so that the common ancestor is not - // included in the outputs. - oldTips, err = CollectTipSetsOfHeightAtLeast(ctx, oldIter, commonHeight+1) - if err != nil { - return - } - newTips, err = CollectTipSetsOfHeightAtLeast(ctx, newIter, commonHeight+1) - return -} - -// ErrNoCommonAncestor is returned when two chains assumed to have a common ancestor do not. -var ErrNoCommonAncestor = errors.New("no common ancestor") - -// FindCommonAncestor returns the common ancestor of the two tipsets pointed to -// by the input iterators. If they share no common ancestor ErrNoCommonAncestor -// will be returned. -func FindCommonAncestor(leftIter, rightIter *TipsetIterator) (block.TipSet, error) { - for !rightIter.Complete() && !leftIter.Complete() { - left := leftIter.Value() - right := rightIter.Value() - - leftHeight, err := left.Height() - if err != nil { - return block.UndefTipSet, err - } - rightHeight, err := right.Height() - if err != nil { - return block.UndefTipSet, err - } - - // Found common ancestor. - if left.Equals(right) { - return left, nil - } - - // Update the pointers. Pointers move back one tipset if they - // point to a tipset at the same height or higher than the - // other pointer's tipset. - if rightHeight >= leftHeight { - if err := rightIter.Next(); err != nil { - return block.UndefTipSet, err - } - } - - if leftHeight >= rightHeight { - if err := leftIter.Next(); err != nil { - return block.UndefTipSet, err - } - } - } - return block.UndefTipSet, ErrNoCommonAncestor -} - -// CollectTipSetsOfHeightAtLeast collects all tipsets with a height greater -// than or equal to minHeight from the input tipset. -func CollectTipSetsOfHeightAtLeast(ctx context.Context, iterator *TipsetIterator, minHeight abi.ChainEpoch) ([]block.TipSet, error) { - var ret []block.TipSet - var err error - var h abi.ChainEpoch - for ; !iterator.Complete(); err = iterator.Next() { - if err != nil { - return nil, err - } - h, err = iterator.Value().Height() - if err != nil { - return nil, err - } - if h < minHeight { - return ret, nil - } - ret = append(ret, iterator.Value()) - } - return ret, nil -} - -// FindTipSetAtEpoch finds the highest tipset with height <= the input epoch -// by traversing backwards from start -func FindTipsetAtEpoch(ctx context.Context, start block.TipSet, epoch abi.ChainEpoch, reader TipSetProvider) (ts block.TipSet, err error) { - iterator := IterAncestors(ctx, reader, start) - var h abi.ChainEpoch - for ; !iterator.Complete(); err = iterator.Next() { - if err != nil { - return - } - ts = iterator.Value() - h, err = ts.Height() - if err != nil { - return - } - if h <= epoch { - break - } - } - // If the iterator completed, ts is the genesis tipset. - return -} - -// FindLatestDRAND returns the latest DRAND entry in the chain beginning at start -func FindLatestDRAND(ctx context.Context, start block.TipSet, reader TipSetProvider) (*drand.Entry, error) { - iterator := IterAncestors(ctx, reader, start) - var err error - for ; !iterator.Complete(); err = iterator.Next() { - if err != nil { - return nil, err - } - ts := iterator.Value() - // DRAND entries must be the same for all blocks on the tipset as - // an invariant of the tipset provider - - entries := ts.At(0).BeaconEntries - if len(entries) > 0 { - return entries[len(entries)-1], nil - } - // No entries, simply move on to the next ancestor - } - return nil, errors.New("no DRAND entries in chain") -} diff --git a/internal/pkg/chain/traversal_test.go b/internal/pkg/chain/traversal_test.go deleted file mode 100644 index 82a5cfa1d5..0000000000 --- a/internal/pkg/chain/traversal_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package chain_test - -import ( - "context" - "fmt" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestIterAncestors(t *testing.T) { - tf.UnitTest(t) - miner, err := address.NewSecp256k1Address([]byte(fmt.Sprintf("address"))) - require.NoError(t, err) - - t.Run("iterates", func(t *testing.T) { - ctx := context.Background() - store := chain.NewBuilder(t, miner) - - root := store.AppendBlockOnBlocks() - b11 := store.AppendBlockOnBlocks(root) - b12 := store.AppendBlockOnBlocks(root) - b21 := store.AppendBlockOnBlocks(b11, b12) - - t0 := block.RequireNewTipSet(t, root) - t1 := block.RequireNewTipSet(t, b11, b12) - t2 := block.RequireNewTipSet(t, b21) - - it := chain.IterAncestors(ctx, store, t2) - assert.False(t, it.Complete()) - assert.True(t, t2.Equals(it.Value())) - - assert.NoError(t, it.Next()) - assert.False(t, it.Complete()) - assert.True(t, t1.Equals(it.Value())) - - assert.NoError(t, it.Next()) - assert.False(t, it.Complete()) - assert.True(t, t0.Equals(it.Value())) - - assert.NoError(t, it.Next()) - assert.True(t, it.Complete()) - }) - - t.Run("respects context", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - store := chain.NewBuilder(t, miner) - - root := store.AppendBlockOnBlocks() - b11 := store.AppendBlockOnBlocks(root) - b12 := store.AppendBlockOnBlocks(root) - b21 := store.AppendBlockOnBlocks(b11, b12) - - block.RequireNewTipSet(t, root) - t1 := block.RequireNewTipSet(t, b11, b12) - t2 := block.RequireNewTipSet(t, b21) - - it := chain.IterAncestors(ctx, store, t2) - assert.False(t, it.Complete()) - assert.True(t, t2.Equals(it.Value())) - - assert.NoError(t, it.Next()) - assert.False(t, it.Complete()) - assert.True(t, t1.Equals(it.Value())) - - cancel() - - assert.Error(t, it.Next()) - }) -} - -// Happy path -func TestCollectTipSetsOfHeightAtLeast(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - - chainLen := 15 - head := builder.AppendManyOn(chainLen, block.UndefTipSet) - - stopHeight := abi.ChainEpoch(4) - iterator := chain.IterAncestors(ctx, builder, head) - tipsets, err := chain.CollectTipSetsOfHeightAtLeast(ctx, iterator, stopHeight) - assert.NoError(t, err) - latestHeight, err := tipsets[0].Height() - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(14), latestHeight) - earliestHeight, err := tipsets[len(tipsets)-1].Height() - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(4), earliestHeight) - assert.Equal(t, 11, len(tipsets)) -} - -// Height at least 0. -func TestCollectTipSetsOfHeightAtLeastZero(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - - chainLen := 25 - head := builder.AppendManyOn(chainLen, block.UndefTipSet) - - stopHeight := abi.ChainEpoch(0) - iterator := chain.IterAncestors(ctx, builder, head) - tipsets, err := chain.CollectTipSetsOfHeightAtLeast(ctx, iterator, stopHeight) - assert.NoError(t, err) - latestHeight, err := tipsets[0].Height() - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(24), latestHeight) - earliestHeight, err := tipsets[len(tipsets)-1].Height() - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(0), earliestHeight) - assert.Equal(t, chainLen, len(tipsets)) -} - -// The starting epoch is a null block. -func TestCollectTipSetsOfHeightAtLeastStartingEpochIsNull(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - head := builder.NewGenesis() - - // Add 30 tipsets to the head of the chainStore. - head = builder.AppendManyOn(30, head) - - // Now add 10 null blocks and 1 tipset. - head = builder.BuildOneOn(head, func(b *chain.BlockBuilder) { - b.IncHeight(10) - }) - - // Now add 19 more tipsets. - head = builder.AppendManyOn(19, head) - - stopHeight := abi.ChainEpoch(35) - iterator := chain.IterAncestors(ctx, builder, head) - tipsets, err := chain.CollectTipSetsOfHeightAtLeast(ctx, iterator, stopHeight) - assert.NoError(t, err) - latestHeight, err := tipsets[0].Height() - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(60), latestHeight) - earliestHeight, err := tipsets[len(tipsets)-1].Height() - require.NoError(t, err) - assert.Equal(t, abi.ChainEpoch(41), earliestHeight) - assert.Equal(t, 20, len(tipsets)) -} - -func TestFindCommonAncestorSameChain(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - head := builder.NewGenesis() - // Add 30 tipsets to the head of the chainStore. - head = builder.AppendManyOn(30, head) - headIterOne := chain.IterAncestors(ctx, builder, head) - headIterTwo := chain.IterAncestors(ctx, builder, head) - commonAncestor, err := chain.FindCommonAncestor(headIterOne, headIterTwo) - assert.NoError(t, err) - assert.Equal(t, head, commonAncestor) -} - -func TestFindCommonAncestorFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - head := builder.NewGenesis() - - // Add 3 tipsets to the head of the chainStore. - commonHeadTip := builder.AppendManyOn(3, head) - - // Grow the fork chain - lenFork := 10 - forkHead := builder.AppendManyOn(lenFork, commonHeadTip) - - // Grow the main chain - lenMainChain := 14 - mainHead := builder.AppendManyOn(lenMainChain, commonHeadTip) - - forkItr := chain.IterAncestors(ctx, builder, forkHead) - mainItr := chain.IterAncestors(ctx, builder, mainHead) - commonAncestor, err := chain.FindCommonAncestor(mainItr, forkItr) - assert.NoError(t, err) - assert.Equal(t, commonHeadTip, commonAncestor) -} - -func TestFindCommonAncestorNoFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - head := builder.NewGenesis() - - // Add 30 tipsets to the head of the chainStore. - head = builder.AppendManyOn(30, head) - headIterOne := chain.IterAncestors(ctx, builder, head) - - // Now add 19 more tipsets. - expectedAncestor := head - head = builder.AppendManyOn(19, head) - headIterTwo := chain.IterAncestors(ctx, builder, head) - - commonAncestor, err := chain.FindCommonAncestor(headIterOne, headIterTwo) - assert.NoError(t, err) - assert.True(t, expectedAncestor.Equals(commonAncestor)) -} - -// This test exercises an edge case fork that our previous common ancestor -// utility handled incorrectly. -func TestFindCommonAncestorNullBlockFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder := chain.NewBuilder(t, address.Undef) - head := builder.NewGenesis() - - // Add 10 tipsets to the head of the chainStore. - commonHead := builder.AppendManyOn(10, head) - - // From the common ancestor, add a block following a null block. - headAfterNull := builder.BuildOneOn(commonHead, func(b *chain.BlockBuilder) { - b.IncHeight(1) - }) - afterNullItr := chain.IterAncestors(ctx, builder, headAfterNull) - - // Add a block (with no null) on another fork. - headNoNull := builder.AppendOn(commonHead, 1) - noNullItr := chain.IterAncestors(ctx, builder, headNoNull) - - commonAncestor, err := chain.FindCommonAncestor(afterNullItr, noNullItr) - assert.NoError(t, err) - assert.Equal(t, commonHead, commonAncestor) -} diff --git a/internal/pkg/chain/util.go b/internal/pkg/chain/util.go deleted file mode 100644 index fad9317222..0000000000 --- a/internal/pkg/chain/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package chain - -import ( - "github.com/filecoin-project/go-filecoin/internal/pkg/block" -) - -// Reverse reverses the order of the slice `chain`. -func Reverse(chain []block.TipSet) { - // https://github.com/golang/go/wiki/SliceTricks#reversing - for i := len(chain)/2 - 1; i >= 0; i-- { - opp := len(chain) - 1 - i - chain[i], chain[opp] = chain[opp], chain[i] - } -} diff --git a/internal/pkg/chainsampler/height_threshold_listener.go b/internal/pkg/chainsampler/height_threshold_listener.go deleted file mode 100644 index 8839dfdb7b..0000000000 --- a/internal/pkg/chainsampler/height_threshold_listener.go +++ /dev/null @@ -1,105 +0,0 @@ -package chainsampler - -import ( - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" -) - -// HeightThresholdListener listens for new heaviest chains and notifies when a height threshold is crossed. -type HeightThresholdListener struct { - target abi.ChainEpoch - targetHit bool - - HitCh chan block.TipSetKey - ErrCh chan error - InvalidCh chan struct{} - DoneCh chan struct{} -} - -// NewHeightThresholdListener creates a new listener -func NewHeightThresholdListener(target abi.ChainEpoch, hitCh chan block.TipSetKey, errCh chan error, invalidCh, doneCh chan struct{}) *HeightThresholdListener { - return &HeightThresholdListener{ - target: target, - targetHit: false, - HitCh: hitCh, - ErrCh: errCh, - InvalidCh: invalidCh, - DoneCh: doneCh, - } -} - -// Handle a chainStore update by sending appropriate status messages back to the channels. -// newChain is all the tipsets that are new since the last head update. -// Normally, this will be a single tipset, but in the case of a re-org it will contain -// all the common ancestors of the new tipset to the greatest common ancestor. -// The tipsets must be ordered from newest (highest block height) to oldest. -// Returns false if this handler is no longer valid. -func (l *HeightThresholdListener) Handle(chain []block.TipSet) (bool, error) { - if len(chain) < 1 { - return true, nil - } - - h, err := chain[0].Height() - if err != nil { - return true, err - } - - // check if we've hit finality and should stop listening - if h >= l.target+miner.ChainFinalityish { - l.DoneCh <- struct{}{} - return false, nil - } - - lcaHeight, err := chain[len(chain)-1].Height() - if err != nil { - return true, err - } - - // if we have already seen a target tipset - if l.targetHit { - // if we've completely reverted - if h < l.target { - l.InvalidCh <- struct{}{} - l.targetHit = false - // if we've re-orged to a point before the target - } else if lcaHeight < l.target { - l.InvalidCh <- struct{}{} - err := l.sendHit(chain) - if err != nil { - return true, err - } - } - return true, nil - } - - // otherwise send randomness if we've hit the height - if h >= l.target { - l.targetHit = true - err := l.sendHit(chain) - if err != nil { - return true, err - } - } - return true, nil -} - -func (l *HeightThresholdListener) sendHit(chain []block.TipSet) error { - // assume chainStore not empty and first tipset height greater than target - firstTargetTipset := chain[0] - for _, ts := range chain { - h, err := ts.Height() - if err != nil { - return err - } - - if h < l.target { - break - } - firstTargetTipset = ts - } - - l.HitCh <- firstTargetTipset.Key() - return nil -} diff --git a/internal/pkg/chainsampler/height_threshold_listener_test.go b/internal/pkg/chainsampler/height_threshold_listener_test.go deleted file mode 100644 index 9e5ad1cc70..0000000000 --- a/internal/pkg/chainsampler/height_threshold_listener_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package chainsampler - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestNewHeightThresholdListener(t *testing.T) { - tf.UnitTest(t) - builder := chain.NewBuilder(t, address.Undef) - genesis := builder.NewGenesis() - - startHead := builder.BuildManyOn(6, genesis, nil) - - t.Run("does nothing until chain crosses threshold", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - - hc, ec, ic, dc := setupChannels() - listener := NewHeightThresholdListener(11, hc, ec, ic, dc) - - // 6 + 3 = 9 which is less than 11 - nextTS := builder.BuildManyOn(3, startHead, nil) - newChain, err := tipsetToSlice(nextTS, 4, builder) - require.NoError(t, err) - - go func() { - _, err := listener.Handle(newChain) - require.NoError(t, err) - cancel() - }() - - expectCancelBeforeOutput(ctx, hc, ec, ic, dc) - }) - - t.Run("add tipset at target height sends key", func(t *testing.T) { - hc, ec, ic, dc := setupChannels() - listener := NewHeightThresholdListener(7, hc, ec, ic, dc) - - nextTS := builder.Build(startHead, 1, nil) - go func() { - _, err := listener.Handle([]block.TipSet{nextTS}) - require.NoError(t, err) - }() - - key := waitForKey(t, hc, ec, ic, dc) - assert.Equal(t, nextTS.Key(), key) - }) - - t.Run("invalidates when new fork head is lower than target", func(t *testing.T) { - hc, ec, ic, dc := setupChannels() - listener := NewHeightThresholdListener(8, hc, ec, ic, dc) - - nextTS := builder.BuildManyOn(4, startHead, nil) - newChain, err := tipsetToSlice(nextTS, 4, builder) - require.NoError(t, err) - - go func() { - _, err := listener.Handle(newChain) - require.NoError(t, err) - }() - - key := waitForKey(t, hc, ec, ic, dc) - assert.Equal(t, newChain[2].Key(), key) - - shorterFork := builder.BuildManyOn(1, startHead, nil) - go func() { - _, err := listener.Handle([]block.TipSet{shorterFork}) - require.NoError(t, err) - }() - - waitForInvalidation(t, hc, ec, ic, dc) - }) - - t.Run("invalidates and then sends new seed when new fork head is higher than target with a lower lca", func(t *testing.T) { - hc, ec, ic, dc := setupChannels() - listener := NewHeightThresholdListener(8, hc, ec, ic, dc) - - nextTS := builder.BuildManyOn(4, startHead, nil) - newChain, err := tipsetToSlice(nextTS, 4, builder) - require.NoError(t, err) - - go func() { - _, err := listener.Handle(newChain) - require.NoError(t, err) - }() - - key := waitForKey(t, hc, ec, ic, dc) - assert.Equal(t, newChain[2].Key(), key) - - fork := builder.BuildManyOn(3, startHead, nil) - forkSlice, err := tipsetToSlice(fork, 3, builder) - require.NoError(t, err) - - go func() { - _, err := listener.Handle(forkSlice) - require.NoError(t, err) - }() - - // first invalidate - waitForInvalidation(t, hc, ec, ic, dc) - - // then send new key - key = waitForKey(t, hc, ec, ic, dc) - assert.Equal(t, forkSlice[1].Key(), key) - }) - - t.Run("does nothing if new chain is entirely above threshold", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - - hc, ec, ic, dc := setupChannels() - listener := NewHeightThresholdListener(8, hc, ec, ic, dc) - - // cross the threshold (8) with 4 tipsets - nextTS := builder.BuildManyOn(4, startHead, nil) - newChain, err := tipsetToSlice(nextTS, 4, builder) - require.NoError(t, err) - - go func() { - _, err := listener.Handle(newChain) - require.NoError(t, err) - }() - - key := waitForKey(t, hc, ec, ic, dc) - assert.Equal(t, newChain[2].Key(), key) - - // add 3 more tipsets on existing highest head that do not cross threshold - nextTS = builder.BuildManyOn(3, nextTS, nil) - newChain, err = tipsetToSlice(nextTS, 4, builder) - require.NoError(t, err) - - go func() { - _, err := listener.Handle(newChain) - require.NoError(t, err) - cancel() - }() - - expectCancelBeforeOutput(ctx, hc, ec, ic, dc) - }) - - t.Run("sends on done channel when finality is crossed", func(t *testing.T) { - hc, ec, ic, dc := setupChannels() - listener := NewHeightThresholdListener(8, hc, ec, ic, dc) - - // cross the threshold (8) with 4 tipsets - nextTS := builder.BuildManyOn(4, startHead, nil) - newChain, err := tipsetToSlice(nextTS, 4, builder) - require.NoError(t, err) - - go func() { - _, err := listener.Handle(newChain) - require.NoError(t, err) - }() - - key := waitForKey(t, hc, ec, ic, dc) - assert.Equal(t, newChain[2].Key(), key) - - // add tipsets till finality - go func() { - for i := abi.ChainEpoch(0); i < miner.ChainFinalityish; i++ { - nextTS = builder.BuildOn(nextTS, 1, nil) - valid, err := listener.Handle([]block.TipSet{nextTS}) - require.NoError(t, err) - - h, err := nextTS.Height() - require.NoError(t, err) - if h >= 8+miner.ChainFinalityish { - assert.False(t, valid) - } else { - assert.True(t, valid) - } - } - }() - - select { - case <-hc: - panic("unexpected sample key") - case err := <-ec: - panic(err) - case <-ic: - panic("unexpected height invalidation") - case <-dc: - return // got value on done channel - } - }) -} - -func setupChannels() (chan block.TipSetKey, chan error, chan struct{}, chan struct{}) { - return make(chan block.TipSetKey), make(chan error), make(chan struct{}), make(chan struct{}) -} - -func waitForKey(t *testing.T, hc chan block.TipSetKey, ec chan error, ic, dc chan struct{}) block.TipSetKey { - select { - case key := <-hc: - return key - case err := <-ec: - panic(err) - case <-ic: - panic("unexpected height invalidation") - case <-dc: - panic("listener completed before sending key") - } -} - -func expectCancelBeforeOutput(ctx context.Context, hc chan block.TipSetKey, ec chan error, ic, dc chan struct{}) { - select { - case <-hc: - panic("unexpected target tip set") - case err := <-ec: - panic(err) - case <-ic: - panic("unexpected height invalidation") - case <-dc: - panic("listener completed before sending seed") - case <-ctx.Done(): - return - } -} - -func waitForInvalidation(t *testing.T, hc chan block.TipSetKey, ec chan error, ic, dc chan struct{}) { - select { - case <-hc: - panic("got key when we expected invalidation") - case err := <-ec: - panic(err) - case <-ic: - return - case <-dc: - panic("listener completed before sending key") - } -} - -func tipsetToSlice(ts block.TipSet, ancestors int, builder *chain.Builder) ([]block.TipSet, error) { - s := make([]block.TipSet, ancestors) - for i := 0; i < ancestors; i++ { - s[i] = ts - - tskey, err := ts.Parents() - if err != nil { - return nil, err - } - - ts, err = builder.GetTipSet(tskey) - if err != nil { - return nil, err - } - } - return s, nil -} diff --git a/internal/pkg/chainsampler/height_threshold_scheduler.go b/internal/pkg/chainsampler/height_threshold_scheduler.go deleted file mode 100644 index 4e78477c3b..0000000000 --- a/internal/pkg/chainsampler/height_threshold_scheduler.go +++ /dev/null @@ -1,90 +0,0 @@ -package chainsampler - -import ( - "context" - "sync" - - "github.com/filecoin-project/specs-actors/actors/abi" - logging "github.com/ipfs/go-log/v2" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" -) - -var log = logging.Logger("chainsampler") // nolint: deadcode - -// HeightThresholdScheduler listens for changes to chain height and notifies when the threshold is hit or invalidated -type HeightThresholdScheduler struct { - mtx sync.Mutex - heightListeners []*HeightThresholdListener - chainStore *chain.Store - prevHead block.TipSet -} - -// NewHeightThresholdScheduler creates a new scheduler -func NewHeightThresholdScheduler(chainStore *chain.Store) *HeightThresholdScheduler { - return &HeightThresholdScheduler{ - chainStore: chainStore, - } -} - -// AddListener adds a new listener for the target height -func (hts *HeightThresholdScheduler) AddListener(target abi.ChainEpoch) *HeightThresholdListener { - hc := make(chan block.TipSetKey) - ec := make(chan error) - ic := make(chan struct{}) - dc := make(chan struct{}) - newListener := NewHeightThresholdListener(target, hc, ec, ic, dc) - - hts.mtx.Lock() - defer hts.mtx.Unlock() - hts.heightListeners = append(hts.heightListeners, newListener) - return newListener -} - -// CancelListener stops a listener from listening and sends a message over its done channel -func (hts *HeightThresholdScheduler) CancelListener(cancelledListener *HeightThresholdListener) { - hts.mtx.Lock() - defer hts.mtx.Unlock() - var remainingListeners []*HeightThresholdListener - for _, l := range hts.heightListeners { - if l != cancelledListener { - remainingListeners = append(remainingListeners, l) - } - } - hts.heightListeners = remainingListeners - cancelledListener.DoneCh <- struct{}{} -} - -// HandleNewTipSet must be called when the chain head changes. -func (hts *HeightThresholdScheduler) HandleNewTipSet(ctx context.Context, newHead block.TipSet) error { - var err error - var newTips []block.TipSet - - hts.mtx.Lock() - defer hts.mtx.Unlock() - if hts.prevHead.Defined() { - _, newTips, err = chain.CollectTipsToCommonAncestor(ctx, hts.chainStore, hts.prevHead, newHead) - if err != nil { - return errors.Wrapf(err, "failed to collect tips between %s and %s", hts.prevHead, newHead) - } - } else { - newTips = []block.TipSet{newHead} - } - hts.prevHead = newHead - - var newListeners []*HeightThresholdListener - for _, listener := range hts.heightListeners { - valid, err := listener.Handle(newTips) - if err != nil { - log.Error("Error checking storage miner chainStore listener", err) - } - - if valid { - newListeners = append(newListeners, listener) - } - } - hts.heightListeners = newListeners - return nil -} diff --git a/internal/pkg/chainsync/chainsync.go b/internal/pkg/chainsync/chainsync.go deleted file mode 100644 index bf45e6cfa9..0000000000 --- a/internal/pkg/chainsync/chainsync.go +++ /dev/null @@ -1,64 +0,0 @@ -package chainsync - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/internal/dispatcher" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/internal/syncer" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/slashing" -) - -// BlockProposer allows callers to propose new blocks for inclusion in the chain. -type BlockProposer interface { - SendHello(ci *block.ChainInfo) error - SendOwnBlock(ci *block.ChainInfo) error - SendGossipBlock(ci *block.ChainInfo) error - WaiterForTarget(wk block.TipSetKey) func() error -} - -// Manager sync the chain. -type Manager struct { - syncer *syncer.Syncer - dispatcher *dispatcher.Dispatcher - transitionCh chan bool -} - -// NewManager creates a new chain sync manager. -func NewManager(fv syncer.FullBlockValidator, hv syncer.BlockValidator, cs syncer.ChainSelector, s syncer.ChainReaderWriter, m *chain.MessageStore, f syncer.Fetcher, c clock.Clock, detector *slashing.ConsensusFaultDetector) (Manager, error) { - syncer, err := syncer.NewSyncer(fv, hv, cs, s, m, f, status.NewReporter(), c, detector) - if err != nil { - return Manager{}, err - } - gapTransitioner := dispatcher.NewGapTransitioner(s, syncer) - dispatcher := dispatcher.NewDispatcher(syncer, gapTransitioner) - return Manager{ - syncer: syncer, - dispatcher: dispatcher, - transitionCh: gapTransitioner.TransitionChannel(), - }, nil -} - -// Start starts the chain sync manager. -func (m *Manager) Start(ctx context.Context) error { - m.dispatcher.Start(ctx) - return m.syncer.InitStaged() -} - -// BlockProposer returns the block proposer. -func (m *Manager) BlockProposer() BlockProposer { - return m.dispatcher -} - -// TransitionChannel returns a channel emitting transition flags. -func (m *Manager) TransitionChannel() chan bool { - return m.transitionCh -} - -// Status returns the block proposer. -func (m *Manager) Status() status.Status { - return m.syncer.Status() -} diff --git a/internal/pkg/chainsync/fetcher/graphsync_fetcher.go b/internal/pkg/chainsync/fetcher/graphsync_fetcher.go deleted file mode 100644 index 1474c6ae23..0000000000 --- a/internal/pkg/chainsync/fetcher/graphsync_fetcher.go +++ /dev/null @@ -1,664 +0,0 @@ -package fetcher - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/internal/syncer" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - ipldselector "github.com/ipld/go-ipld-prime/traversal/selector" - selectorbuilder "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" - typegen "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var logGraphsyncFetcher = logging.Logger("chainsync.fetcher.graphsync") - -const ( - // Timeout for a single graphsync request getting "stuck" - // -- if no more responses are received for a period greater than this, - // we will assume the request has hung-up and cancel it - progressTimeout = 10 * time.Second - - // AMT selector recursion. An AMT has arity of 8 so this gives allows - // us to retrieve trees with 8^10 (1,073,741,824) elements. - amtRecurstionDepth = uint32(10) - - // field index of AMT node in AMT head - amtHeadNodeFieldIndex = 2 - - // field index of links array AMT node - amtNodeLinksFieldIndex = 1 - - // field index of values array AMT node - amtNodeValuesFieldIndex = 2 -) - -// ChainsyncProtocolExtension is the extension name to indicate graphsync requests are to sync the chain -const ChainsyncProtocolExtension = graphsync.ExtensionName("chainsync") - -// interface conformance check -var _ syncer.Fetcher = (*GraphSyncFetcher)(nil) - -// GraphExchange is an interface wrapper to Graphsync so it can be stubbed in -// unit testing -type GraphExchange interface { - Request(ctx context.Context, p peer.ID, root ipld.Link, selector ipld.Node, extensions ...graphsync.ExtensionData) (<-chan graphsync.ResponseProgress, <-chan error) -} - -type graphsyncFallbackPeerTracker interface { - List() []*block.ChainInfo - Self() peer.ID -} - -// GraphSyncFetcher is used to fetch data over the network. It is implemented -// using a Graphsync exchange to fetch tipsets recursively -type GraphSyncFetcher struct { - exchange GraphExchange - validator consensus.SyntaxValidator - store bstore.Blockstore - ssb selectorbuilder.SelectorSpecBuilder - peerTracker graphsyncFallbackPeerTracker - systemClock clock.Clock -} - -// NewGraphSyncFetcher returns a GraphsyncFetcher wired up to the input Graphsync exchange and -// attached local blockservice for reloading blocks in memory once they are returned -func NewGraphSyncFetcher(ctx context.Context, exchange GraphExchange, blockstore bstore.Blockstore, - v consensus.SyntaxValidator, systemClock clock.Clock, pt graphsyncFallbackPeerTracker) *GraphSyncFetcher { - gsf := &GraphSyncFetcher{ - store: blockstore, - validator: v, - exchange: exchange, - ssb: selectorbuilder.NewSelectorSpecBuilder(basicnode.Style.Any), - peerTracker: pt, - systemClock: systemClock, - } - return gsf -} - -// Graphsync can fetch a fixed number of tipsets from a remote peer recursively -// with a single request. We don't know until we get all of the response whether -// our final tipset was included in the response -// -// When fetching tipsets we try to balance performance for two competing cases: -// - an initial chain sync that is likely to fetch lots and lots of tipsets -// - a future update sync that is likely to fetch only a few -// -// To do this, the Graphsync fetcher starts fetching a single tipset at a time, -// then gradually ramps up to fetch lots of tipsets at once, up to a fixed limit -// -// The constants below determine the maximum number of tipsets fetched at once -// (maxRecursionDepth) and how fast the ramp up is (recursionMultipler) -const maxRecursionDepth = 64 -const recursionMultiplier = 4 - -// FetchTipSets gets Tipsets starting from the given tipset key and continuing until -// the done function returns true or errors -// -// For now FetchTipSets operates in two parts: -// 1. It fetches relevant blocks through Graphsync, which writes them to the block store -// 2. It reads them from the block store and validates their syntax as blocks -// and constructs a tipset -// This does have a potentially unwanted side effect of writing blocks to the block store -// that later don't validate (bitswap actually does this as well) -// -// TODO: In the future, the blocks will be validated directly through graphsync as -// go-filecoin migrates to the same IPLD library used by go-graphsync (go-ipld-prime) -// -// See: https://github.com/filecoin-project/go-filecoin/issues/3175 -func (gsf *GraphSyncFetcher) FetchTipSets(ctx context.Context, tsKey block.TipSetKey, originatingPeer peer.ID, done func(block.TipSet) (bool, error)) ([]block.TipSet, error) { - return gsf.fetchTipSetsCommon(ctx, tsKey, originatingPeer, done, gsf.loadAndVerifyFullBlock, gsf.fullBlockSel, gsf.recFullBlockSel) -} - -// FetchTipSetHeaders behaves as FetchTipSets but it only fetches and -// syntactically validates a chain of headers, not full blocks. -func (gsf *GraphSyncFetcher) FetchTipSetHeaders(ctx context.Context, tsKey block.TipSetKey, originatingPeer peer.ID, done func(block.TipSet) (bool, error)) ([]block.TipSet, error) { - return gsf.fetchTipSetsCommon(ctx, tsKey, originatingPeer, done, gsf.loadAndVerifyHeader, gsf.headerSel, gsf.recHeaderSel) -} - -func (gsf *GraphSyncFetcher) fetchTipSetsCommon(ctx context.Context, tsKey block.TipSetKey, originatingPeer peer.ID, done func(block.TipSet) (bool, error), loadAndVerify func(context.Context, block.TipSetKey) (block.TipSet, []cid.Cid, error), selGen func() ipld.Node, recSelGen func(int) ipld.Node) ([]block.TipSet, error) { - // We can run into issues if we fetch from an originatingPeer that we - // are not already connected to so we usually ignore this value. - // However if the originator is our own peer ID (i.e. this node mined - // the block) then we need to fetch from ourselves to retrieve it - fetchFromSelf := originatingPeer == gsf.peerTracker.Self() - rpf, err := newRequestPeerFinder(gsf.peerTracker, fetchFromSelf) - if err != nil { - return nil, err - } - - // fetch initial tipset - startingTipset, err := gsf.fetchFirstTipset(ctx, tsKey, loadAndVerify, selGen, rpf) - if err != nil { - return nil, err - } - - // fetch remaining tipsets recursively - return gsf.fetchRemainingTipsets(ctx, startingTipset, done, loadAndVerify, recSelGen, rpf) -} - -func (gsf *GraphSyncFetcher) fetchFirstTipset(ctx context.Context, tsKey block.TipSetKey, loadAndVerify func(context.Context, block.TipSetKey) (block.TipSet, []cid.Cid, error), selGen func() ipld.Node, rpf *requestPeerFinder) (block.TipSet, error) { - blocksToFetch := tsKey.ToSlice() - for { - peer := rpf.CurrentPeer() - logGraphsyncFetcher.Infof("fetching initial tipset %s from peer %s", tsKey, peer) - err := gsf.fetchBlocks(ctx, selGen, blocksToFetch, peer) - if err != nil { - // A likely case is the peer doesn't have the tipset. When graphsync provides - // this status we should quiet this log. - logGraphsyncFetcher.Infof("request failed: %s", err) - } - - var verifiedTip block.TipSet - verifiedTip, blocksToFetch, err = loadAndVerify(ctx, tsKey) - if err != nil { - return block.UndefTipSet, err - } - if len(blocksToFetch) == 0 { - return verifiedTip, nil - } - - logGraphsyncFetcher.Infof("incomplete fetch for initial tipset %s, trying new peer", tsKey) - // Some of the blocks may have been fetched, but avoid tricksy optimization here and just - // request the whole bunch again. Graphsync internally will avoid redundant network requests. - err = rpf.FindNextPeer() - if err != nil { - return block.UndefTipSet, errors.Wrapf(err, "fetching tipset: %s", tsKey) - } - } -} - -func (gsf *GraphSyncFetcher) fetchRemainingTipsets(ctx context.Context, startingTipset block.TipSet, done func(block.TipSet) (bool, error), loadAndVerify func(context.Context, block.TipSetKey) (block.TipSet, []cid.Cid, error), recSelGen func(int) ipld.Node, rpf *requestPeerFinder) ([]block.TipSet, error) { - out := []block.TipSet{startingTipset} - isDone, err := done(startingTipset) - if err != nil { - return nil, err - } - - // fetch remaining tipsets recursively - recursionDepth := 1 - anchor := startingTipset // The tipset above the one we actually want to fetch. - for !isDone { - // Because a graphsync query always starts from a single CID, - // we fetch tipsets anchored from any block in the last (i.e. highest) tipset and - // recursively fetching sets of parents. - childBlock := anchor.At(0) - peer := rpf.CurrentPeer() - logGraphsyncFetcher.Infof("fetching chain from height %d, block %s, peer %s, %d levels", childBlock.Height, childBlock.Cid(), peer, recursionDepth) - err := gsf.fetchBlocksRecursively(ctx, recSelGen, childBlock.Cid(), peer, recursionDepth) - if err != nil { - // something went wrong in a graphsync request, but we want to keep trying other peers, so - // just log error - logGraphsyncFetcher.Infof("request failed, trying another peer: %s", err) - } - var incomplete []cid.Cid - for i := 0; !isDone && i < recursionDepth; i++ { - tsKey, err := anchor.Parents() - if err != nil { - return nil, err - } - - var verifiedTip block.TipSet - verifiedTip, incomplete, err = loadAndVerify(ctx, tsKey) - if err != nil { - return nil, err - } - if len(incomplete) == 0 { - out = append(out, verifiedTip) - isDone, err = done(verifiedTip) - if err != nil { - return nil, err - } - anchor = verifiedTip - } else { - logGraphsyncFetcher.Infof("incomplete fetch for tipset %s, trying new peer", tsKey) - err := rpf.FindNextPeer() - if err != nil { - return nil, errors.Wrapf(err, "fetching tipset: %s", tsKey) - } - break // Stop verifying, make another fetch - } - } - if len(incomplete) == 0 && recursionDepth < maxRecursionDepth { - recursionDepth *= recursionMultiplier - } - } - return out, nil -} - -// fullBlockSel is a function that generates a selector for a block and its messages. -func (gsf *GraphSyncFetcher) fullBlockSel() ipld.Node { - selector := gsf.ssb.ExploreIndex(block.IndexMessagesField, - gsf.ssb.ExploreRange(0, 2, gsf.fetchThroughAMTSelector(amtRecurstionDepth)), - ).Node() - - return selector -} - -// headerSel is a function that generates a selector for a block header. -func (gsf *GraphSyncFetcher) headerSel() ipld.Node { - return gsf.ssb.Matcher().Node() -} - -// fetchBlocks requests a single set of cids as individual blocks, fetching -// non-recursively -func (gsf *GraphSyncFetcher) fetchBlocks(ctx context.Context, selGen func() ipld.Node, cids []cid.Cid, targetPeer peer.ID) error { - selector := selGen() - var wg sync.WaitGroup - // Any of the multiple parallel requests might fail. Wait for all of them to complete, then - // return any error (in this case, the first one to be received). - var setAnyError sync.Once - var anyError error - for _, c := range cids { - requestCtx, requestCancel := context.WithCancel(ctx) - defer requestCancel() - requestChan, errChan := gsf.exchange.Request(requestCtx, targetPeer, cidlink.Link{Cid: c}, selector, graphsync.ExtensionData{Name: ChainsyncProtocolExtension}) - wg.Add(1) - go func(requestChan <-chan graphsync.ResponseProgress, errChan <-chan error, cancelFunc func()) { - defer wg.Done() - err := gsf.consumeResponse(requestChan, errChan, cancelFunc) - if err != nil { - setAnyError.Do(func() { - anyError = err - }) - } - }(requestChan, errChan, requestCancel) - } - wg.Wait() - return anyError -} - -func (gsf *GraphSyncFetcher) fetchThroughAMTSelector(recursionDepth uint32) selectorbuilder.SelectorSpec { - return gsf.ssb.ExploreIndex(amtHeadNodeFieldIndex, - gsf.ssb.ExploreRecursive(ipldselector.RecursionLimitDepth(int(recursionDepth)), - gsf.ssb.ExploreUnion( - gsf.ssb.ExploreIndex(amtNodeLinksFieldIndex, gsf.ssb.ExploreAll(gsf.ssb.ExploreRecursiveEdge())), - gsf.ssb.ExploreIndex(amtNodeValuesFieldIndex, gsf.ssb.ExploreAll(gsf.ssb.Matcher()))))) -} - -func (gsf *GraphSyncFetcher) consumeResponse(requestChan <-chan graphsync.ResponseProgress, errChan <-chan error, cancelFunc func()) error { - timer := gsf.systemClock.NewTimer(progressTimeout) - var anyError error - for errChan != nil || requestChan != nil { - select { - case err, ok := <-errChan: - if !ok { - errChan = nil - } - anyError = err - timer.Reset(progressTimeout) - case _, ok := <-requestChan: - if !ok { - requestChan = nil - } - timer.Reset(progressTimeout) - case <-timer.Chan(): - cancelFunc() - } - } - return anyError -} - -// recFullBlockSel generates a selector for a chain of full blocks including -// messages. -func (gsf *GraphSyncFetcher) recFullBlockSel(recursionDepth int) ipld.Node { - // recursive selector to fetch n sets of parent blocks - // starting from block matching base cid: - // - fetch all parent blocks, with messages - // - with exactly the first parent block, repeat again for its parents - // - continue up to recursion depth - selector := gsf.ssb.ExploreRecursive(ipldselector.RecursionLimitDepth(recursionDepth), gsf.ssb.ExploreIndex(block.IndexParentsField, - gsf.ssb.ExploreUnion( - gsf.ssb.ExploreAll( - gsf.ssb.ExploreIndex(block.IndexMessagesField, - gsf.ssb.ExploreRange(0, 2, gsf.fetchThroughAMTSelector(amtRecurstionDepth)), - )), - gsf.ssb.ExploreIndex(0, gsf.ssb.ExploreRecursiveEdge()), - ))).Node() - return selector -} - -// recHeaderSel generates a selector for a chain of only block headers. -func (gsf *GraphSyncFetcher) recHeaderSel(recursionDepth int) ipld.Node { - selector := gsf.ssb.ExploreRecursive(ipldselector.RecursionLimitDepth(recursionDepth), gsf.ssb.ExploreIndex(block.IndexParentsField, - gsf.ssb.ExploreUnion( - gsf.ssb.ExploreAll( - gsf.ssb.Matcher(), - ), - gsf.ssb.ExploreIndex(0, gsf.ssb.ExploreRecursiveEdge()), - ))).Node() - return selector -} - -// fetchBlocksRecursively gets the blocks from recursionDepth ancestor tipsets -// starting from baseCid. -func (gsf *GraphSyncFetcher) fetchBlocksRecursively(ctx context.Context, recSelGen func(int) ipld.Node, baseCid cid.Cid, targetPeer peer.ID, recursionDepth int) error { - requestCtx, requestCancel := context.WithCancel(ctx) - defer requestCancel() - selector := recSelGen(recursionDepth) - - requestChan, errChan := gsf.exchange.Request(requestCtx, targetPeer, cidlink.Link{Cid: baseCid}, selector, graphsync.ExtensionData{Name: ChainsyncProtocolExtension}) - return gsf.consumeResponse(requestChan, errChan, requestCancel) -} - -// loadAndVerifyHeaders loads the IPLD blocks for the headers in a tipset. -// It returns the tipset if complete. Otherwise it returns UndefTipset and the -// CIDs of all missing headers. -func (gsf *GraphSyncFetcher) loadAndVerifyHeader(ctx context.Context, key block.TipSetKey) (block.TipSet, []cid.Cid, error) { - // Load the block headers that exist. - incomplete := make(map[cid.Cid]struct{}) - tip, err := gsf.loadTipHeaders(ctx, key, incomplete) - if err != nil { - return block.UndefTipSet, nil, err - } - if len(incomplete) == 0 { - return tip, nil, nil - } - incompleteArr := make([]cid.Cid, 0, len(incomplete)) - for cid := range incomplete { - incompleteArr = append(incompleteArr, cid) - } - return block.UndefTipSet, incompleteArr, nil -} - -// Loads the IPLD blocks for all blocks in a tipset, and checks for the presence of the -// message list structures in the store. -// Returns the tipset if complete. Otherwise it returns UndefTipSet and the CIDs of -// all blocks missing either their header or messages. -func (gsf *GraphSyncFetcher) loadAndVerifyFullBlock(ctx context.Context, key block.TipSetKey) (block.TipSet, []cid.Cid, error) { - // Load the block headers that exist. - incomplete := make(map[cid.Cid]struct{}) - tip, err := gsf.loadTipHeaders(ctx, key, incomplete) - if err != nil { - return block.UndefTipSet, nil, err - } - - err = gsf.loadAndVerifySubComponents(ctx, tip, incomplete, - func(meta types.TxMeta) cid.Cid { - return meta.SecpRoot.Cid - }, - func(rawBlock blocks.Block) error { - messages := []*types.SignedMessage{} - - err := gsf.loadAndProcessAMTData(ctx, rawBlock.Cid(), func(msgBlock blocks.Block) error { - var message types.SignedMessage - if err := encoding.Decode(msgBlock.RawData(), &message); err != nil { - return errors.Wrapf(err, "could not decode secp message (cid %s)", msgBlock.Cid()) - } - if err := gsf.validator.ValidateSignedMessageSyntax(ctx, &message); err != nil { - return errors.Wrapf(err, "invalid syntax for secp message (cid %s)", msgBlock.Cid()) - } - messages = append(messages, &message) - return nil - }) - if err != nil { - return err - } - - return nil - }) - if err != nil { - return block.UndefTipSet, nil, err - } - - err = gsf.loadAndVerifySubComponents(ctx, tip, incomplete, - func(meta types.TxMeta) cid.Cid { - return meta.BLSRoot.Cid - }, - func(rawBlock blocks.Block) error { - messages := []*types.UnsignedMessage{} - - err := gsf.loadAndProcessAMTData(ctx, rawBlock.Cid(), func(msgBlock blocks.Block) error { - var message types.UnsignedMessage - if err := encoding.Decode(msgBlock.RawData(), &message); err != nil { - return errors.Wrapf(err, "could not decode bls message (cid %s)", msgBlock.Cid()) - } - if err := gsf.validator.ValidateUnsignedMessageSyntax(ctx, &message); err != nil { - return errors.Wrapf(err, "invalid syntax for bls message (cid %s)", msgBlock.Cid()) - } - - messages = append(messages, &message) - return nil - }) - if err != nil { - return err - } - - return nil - }) - - // TODO #3312 we should validate these messages in the same way we validate blocks - if err != nil { - return block.UndefTipSet, nil, err - } - - if len(incomplete) > 0 { - incompleteArr := make([]cid.Cid, 0, len(incomplete)) - for cid := range incomplete { - incompleteArr = append(incompleteArr, cid) - } - return block.UndefTipSet, incompleteArr, nil - } - - return tip, nil, nil -} - -// loadAndProcessAMTData processes data loaded from an AMT that is stored in the fetcher's datastore. -func (gsf *GraphSyncFetcher) loadAndProcessAMTData(ctx context.Context, c cid.Cid, processFn func(b blocks.Block) error) error { - as := cbor.NewCborStore(gsf.store) - - a, err := amt.LoadAMT(ctx, as, c) - if err != nil { - if err == bstore.ErrNotFound { - return err - } - return errors.Wrapf(err, "fetched data (cid %s) could not be decoded as an AMT", c.String()) - } - - return a.ForEach(ctx, func(index uint64, deferred *typegen.Deferred) error { - var c cid.Cid - if err := cbor.DecodeInto(deferred.Raw, &c); err != nil { - return errors.Wrapf(err, "cid from amt could not be decoded as a Cid (index %d)", index) - } - - ok, err := gsf.store.Has(c) - if err != nil { - return errors.Wrapf(err, "could not retrieve secp message from blockstore (cid %s)", c) - } - - if !ok { - return bstore.ErrNotFound - } - - rawMsg, err := gsf.store.Get(c) - if err != nil { - return errors.Wrapf(err, "could not retrieve secp message from blockstore (cid %s)", c) - } - - if err := processFn(rawMsg); err != nil { - return errors.Wrapf(err, "could not decode secp message (cid %s)", c) - } - - return nil - }) -} - -func (gsf *GraphSyncFetcher) loadTxMeta(c cid.Cid) (types.TxMeta, error) { - rawMetaBlock, err := gsf.store.Get(c) - if err != nil { - return types.TxMeta{}, err - } - var ret types.TxMeta - err = encoding.Decode(rawMetaBlock.RawData(), &ret) - if err != nil { - return types.TxMeta{}, err - } - return ret, nil -} - -// Loads and validates the block headers for a tipset. Returns the tipset if complete, -// else the cids of blocks which are not yet stored. -func (gsf *GraphSyncFetcher) loadTipHeaders(ctx context.Context, key block.TipSetKey, incomplete map[cid.Cid]struct{}) (block.TipSet, error) { - rawBlocks := make([]blocks.Block, 0, key.Len()) - for it := key.Iter(); !it.Complete(); it.Next() { - hasBlock, err := gsf.store.Has(it.Value()) - if err != nil { - return block.UndefTipSet, err - } - if !hasBlock { - incomplete[it.Value()] = struct{}{} - continue - } - rawBlock, err := gsf.store.Get(it.Value()) - if err != nil { - return block.UndefTipSet, err - } - rawBlocks = append(rawBlocks, rawBlock) - } - - // Validate the headers. - validatedBlocks, err := sanitizeBlocks(ctx, rawBlocks, gsf.validator) - if err != nil || len(validatedBlocks) == 0 { - return block.UndefTipSet, err - } - tip, err := block.NewTipSet(validatedBlocks...) - return tip, err -} - -type getMetaComponentFn func(types.TxMeta) cid.Cid -type verifyComponentFn func(blocks.Block) error - -// Loads and validates the block messages for a tipset. Returns the tipset if complete, -// else the cids of blocks which are not yet stored. -func (gsf *GraphSyncFetcher) loadAndVerifySubComponents(ctx context.Context, - tip block.TipSet, - incomplete map[cid.Cid]struct{}, - getMetaComponent getMetaComponentFn, - verifyComponent verifyComponentFn) error { - subComponents := make([]blocks.Block, 0, tip.Len()) - - // Check that nested structures are also stored, recording any that are missing as incomplete. - for i := 0; i < tip.Len(); i++ { - blk := tip.At(i) - - meta, err := gsf.loadTxMeta(blk.Messages.Cid) - if err == bstore.ErrNotFound { - // exit early as we can't load anything else without txmeta - incomplete[blk.Cid()] = struct{}{} - continue - } - if err != nil { - return err - } - - link := getMetaComponent(meta) - ok, err := gsf.store.Has(link) - if err != nil { - return err - } - if !ok { - incomplete[blk.Cid()] = struct{}{} - continue - } - rawBlock, err := gsf.store.Get(link) - if err != nil { - return err - } - subComponents = append(subComponents, rawBlock) - } - - for _, rawBlock := range subComponents { - err := verifyComponent(rawBlock) - if err != nil { - // If this is a not found error, this simply means we failed to fetch some information. - // Mark this block as incomplete, but don't fail. - if err == bstore.ErrNotFound { - incomplete[rawBlock.Cid()] = struct{}{} - return nil - } - return err - } - } - - return nil -} - -type requestPeerFinder struct { - peerTracker graphsyncFallbackPeerTracker - currentPeer peer.ID - triedPeers map[peer.ID]struct{} -} - -func newRequestPeerFinder(peerTracker graphsyncFallbackPeerTracker, fetchFromSelf bool) (*requestPeerFinder, error) { - pri := &requestPeerFinder{ - peerTracker: peerTracker, - triedPeers: make(map[peer.ID]struct{}), - } - - // If the new cid triggering this request came from ourselves then - // the first peer to request from should be ourselves. - if fetchFromSelf { - pri.triedPeers[peerTracker.Self()] = struct{}{} - pri.currentPeer = peerTracker.Self() - return pri, nil - } - - // Get a peer ID from the peer tracker - err := pri.FindNextPeer() - if err != nil { - return nil, err - } - return pri, nil -} - -func (pri *requestPeerFinder) CurrentPeer() peer.ID { - return pri.currentPeer -} - -func (pri *requestPeerFinder) FindNextPeer() error { - chains := pri.peerTracker.List() - for _, chain := range chains { - if _, tried := pri.triedPeers[chain.Sender]; !tried { - pri.triedPeers[chain.Sender] = struct{}{} - pri.currentPeer = chain.Sender - return nil - } - } - return fmt.Errorf("Unable to find any untried peers") -} - -func sanitizeBlocks(ctx context.Context, unsanitized []blocks.Block, validator consensus.BlockSyntaxValidator) ([]*block.Block, error) { - var blocks []*block.Block - for _, u := range unsanitized { - block, err := block.DecodeBlock(u.RawData()) - if err != nil { - return nil, errors.Wrapf(err, "fetched data (cid %s) was not a block", u.Cid().String()) - } - - if err := validator.ValidateSyntax(ctx, block); err != nil { - return nil, errors.Wrapf(err, "invalid block %s", block.Cid()) - } - - blocks = append(blocks, block) - } - return blocks, nil -} diff --git a/internal/pkg/chainsync/fetcher/graphsync_fetcher_test.go b/internal/pkg/chainsync/fetcher/graphsync_fetcher_test.go deleted file mode 100644 index 992a0644d6..0000000000 --- a/internal/pkg/chainsync/fetcher/graphsync_fetcher_test.go +++ /dev/null @@ -1,972 +0,0 @@ -package fetcher_test - -import ( - "bytes" - "context" - "fmt" - "io" - "reflect" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - "github.com/ipfs/go-graphsync" - graphsyncimpl "github.com/ipfs/go-graphsync/impl" - gsnet "github.com/ipfs/go-graphsync/network" - gsstoreutil "github.com/ipfs/go-graphsync/storeutil" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector" - selectorbuilder "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/fetcher" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/discovery" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -const visitsPerBlock = 18 - -type notDecodable struct { - _ struct{} `cbor:",toarray"` - Num int `json:"num"` - Message string `json:"message"` -} - -func init() { - encoding.RegisterIpldCborType(notDecodable{}) -} - -func TestGraphsyncFetcher(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - bs := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - fc, chainClock := clock.NewFakeChain(1234567890, 5*time.Second, time.Second, time.Now().Unix()) - bv := consensus.NewDefaultBlockValidator(chainClock, nil, nil) - msgV := &consensus.FakeMessageValidator{} - syntax := consensus.WrappedSyntaxValidator{ - BlockSyntaxValidator: bv, - MessageSyntaxValidator: msgV, - } - - pid0 := th.RequireIntPeerID(t, 0) - builder := chain.NewBuilderWithDeps(t, address.Undef, &chain.FakeStateBuilder{}, chain.NewClockTimestamper(chainClock)) - keys := types.MustGenerateKeyInfo(2, 42) - mm := vm.NewMessageMaker(t, keys) - notDecodableBytes, err := encoding.Encode(notDecodable{Num: 5, Message: "applesauce"}) - require.NoError(t, err) - notDecodableBlock, err := cbor.Decode(notDecodableBytes, constants.DefaultHashFunction, -1) - require.NoError(t, err) - - alice := mm.Addresses()[0] - bob := mm.Addresses()[1] - - ssb := selectorbuilder.NewSelectorSpecBuilder(basicnode.Style.Any) - - amtSelector := ssb.ExploreIndex(2, - ssb.ExploreRecursive(selector.RecursionLimitDepth(10), - ssb.ExploreUnion( - ssb.ExploreIndex(1, ssb.ExploreAll(ssb.ExploreRecursiveEdge())), - ssb.ExploreIndex(2, ssb.ExploreAll(ssb.Matcher()))))) - - layer1Selector, err := ssb.ExploreIndex(block.IndexMessagesField, - ssb.ExploreRange(0, 2, amtSelector), - ).Selector() - - require.NoError(t, err) - - recursiveSelector := func(levels int) selector.Selector { - s, err := ssb.ExploreRecursive(selector.RecursionLimitDepth(levels), ssb.ExploreIndex(block.IndexParentsField, - ssb.ExploreUnion( - ssb.ExploreAll( - ssb.ExploreIndex(block.IndexMessagesField, - ssb.ExploreRange(0, 2, amtSelector), - )), - ssb.ExploreIndex(0, ssb.ExploreRecursiveEdge()), - ))).Selector() - require.NoError(t, err) - return s - } - msgStore := chain.NewMessageStore(bs) - - pid1 := th.RequireIntPeerID(t, 1) - pid2 := th.RequireIntPeerID(t, 2) - - doneAt := func(tsKey block.TipSetKey) func(block.TipSet) (bool, error) { - return func(ts block.TipSet) (bool, error) { - if ts.Key().Equals(tsKey) { - return true, nil - } - return false, nil - } - } - withMessageBuilder := func(b *chain.BlockBuilder) { - b.AddMessages( - []*types.SignedMessage{mm.NewSignedMessage(alice, 1)}, - []*types.UnsignedMessage{&mm.NewSignedMessage(bob, 1).Message}, - ) - } - withMessageEachBuilder := func(b *chain.BlockBuilder, i int) { - withMessageBuilder(b) - } - - verifyMessagesFetched := func(t *testing.T, ts block.TipSet) { - for i := 0; i < ts.Len(); i++ { - blk := ts.At(i) - - // use fetcher blockstore to retrieve messages - secpMsgs, blsMsgs, err := msgStore.LoadMessages(ctx, blk.Messages.Cid) - require.NoError(t, err) - - // get expected messages from builders block store - expectedSecpMessages, expectedBLSMsgs, err := builder.LoadMessages(ctx, blk.Messages.Cid) - require.NoError(t, err) - - require.True(t, reflect.DeepEqual(secpMsgs, expectedSecpMessages)) - require.True(t, reflect.DeepEqual(blsMsgs, expectedBLSMsgs)) - } - } - - loader := successLoader(ctx, builder) - t.Run("happy path returns correct tipsets", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 3, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.stubResponseWithLoader(pid0, layer1Selector, loader, final.Key().ToSlice()...) - mgs.stubResponseWithLoader(pid0, recursiveSelector(1), loader, final.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(4) - require.Equal(t, 2, len(ts), "the right number of tipsets is returned") - require.True(t, final.Key().Equals(ts[0].Key()), "the initial tipset is correct") - require.True(t, gen.Key().Equals(ts[1].Key()), "the remaining tipsets are correct") - }) - - t.Run("initial request fails on a block but fallback peer succeeds", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 3, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - pt := newFakePeerTracker(chain0, chain1, chain2) - - mgs := newMockableGraphsync(ctx, bs, fc, t) - pid0Loader := errorOnCidsLoader(loader, final.At(1).Cid(), final.At(2).Cid()) - pid1Loader := errorOnCidsLoader(loader, final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, pid0Loader, final.Key().ToSlice()...) - mgs.expectRequestToRespondWithLoader(pid1, layer1Selector, pid1Loader, final.At(1).Cid(), final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid2, layer1Selector, loader, final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid2, recursiveSelector(1), loader, final.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, pt) - - done := doneAt(gen.Key()) - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(7) - mgs.verifyExpectations() - require.Equal(t, 2, len(ts), "the right number of tipsets is returned") - require.True(t, final.Key().Equals(ts[0].Key()), "the initial tipset is correct") - require.True(t, gen.Key().Equals(ts[1].Key()), "the remaining tipsets are correct") - }) - - t.Run("initial request fails and no other peers succeed", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 3, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - pt := newFakePeerTracker(chain0, chain1, chain2) - mgs := newMockableGraphsync(ctx, bs, fc, t) - errorLoader := errorOnCidsLoader(loader, final.At(1).Cid(), final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, errorLoader, final.Key().ToSlice()...) - mgs.expectRequestToRespondWithLoader(pid1, layer1Selector, errorLoader, final.At(1).Cid(), final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid2, layer1Selector, errorLoader, final.At(1).Cid(), final.At(2).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, pt) - - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - mgs.verifyReceivedRequestCount(7) - mgs.verifyExpectations() - require.EqualError(t, err, fmt.Sprintf("fetching tipset: %s: Unable to find any untried peers", final.Key().String())) - require.Nil(t, ts) - }) - - t.Run("requests fails because blocks are present but are missing messages", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 3, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - mgs := newMockableGraphsync(ctx, bs, fc, t) - final2Meta, err := builder.LoadTxMeta(ctx, final.At(2).Messages.Cid) - require.NoError(t, err) - errorOnMessagesLoader := errorOnCidsLoader(loader, final2Meta.SecpRoot.Cid) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, errorOnMessagesLoader, final.Key().ToSlice()...) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - - done := doneAt(gen.Key()) - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - mgs.verifyReceivedRequestCount(3) - mgs.verifyExpectations() - require.EqualError(t, err, fmt.Sprintf("fetching tipset: %s: Unable to find any untried peers", final.Key().String())) - require.Nil(t, ts) - }) - - t.Run("partial response fail during recursive fetch recovers at fail point", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildManyOn(5, gen, withMessageBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - pt := newFakePeerTracker(chain0, chain1, chain2) - - blocks := make([]*block.Block, 4) // in fetch order - prev := final.At(0) - for i := 0; i < 4; i++ { - parent := prev.Parents.Iter().Value() - prev, err = builder.GetBlock(ctx, parent) - require.NoError(t, err) - blocks[i] = prev - } - - mgs := newMockableGraphsync(ctx, bs, fc, t) - pid0Loader := errorOnCidsLoader(loader, blocks[3].Cid()) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, pid0Loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(1), pid0Loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(4), pid0Loader, blocks[0].Cid()) - mgs.expectRequestToRespondWithLoader(pid1, recursiveSelector(4), loader, blocks[2].Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, pt) - - done := func(ts block.TipSet) (bool, error) { - if ts.Key().Equals(gen.Key()) { - return true, nil - } - return false, nil - } - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(4) - mgs.verifyExpectations() - require.Equal(t, 6, len(ts), "the right number of tipsets is returned") - expectedTs := final - for _, resultTs := range ts { - require.True(t, expectedTs.Key().Equals(resultTs.Key()), "the initial tipset is correct") - key, err := expectedTs.Parents() - require.NoError(t, err) - if !key.Empty() { - expectedTs, err = builder.GetTipSet(key) - require.NoError(t, err) - } - } - }) - - t.Run("missing single block in multi block tip during recursive fetch", func(t *testing.T) { - gen := builder.NewGenesis() - multi := builder.BuildOn(gen, 3, withMessageEachBuilder) - penultimate := builder.BuildManyOn(3, multi, withMessageBuilder) - final := builder.BuildOn(penultimate, 1, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - mgs := newMockableGraphsync(ctx, bs, fc, t) - errorInMultiBlockLoader := errorOnCidsLoader(loader, multi.At(1).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, errorInMultiBlockLoader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(1), errorInMultiBlockLoader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(4), errorInMultiBlockLoader, penultimate.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - mgs.verifyReceivedRequestCount(3) - mgs.verifyExpectations() - require.EqualError(t, err, fmt.Sprintf("fetching tipset: %s: Unable to find any untried peers", multi.Key().String())) - require.Nil(t, ts) - }) - - t.Run("missing single block in multi block tip during recursive fetch, recover through fallback", func(t *testing.T) { - gen := builder.NewGenesis() - multi := builder.BuildOn(gen, 3, withMessageEachBuilder) - withMultiParent := builder.BuildOn(multi, 1, withMessageEachBuilder) - penultimate := builder.BuildManyOn(2, withMultiParent, withMessageBuilder) - final := builder.BuildOn(penultimate, 1, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - - mgs := newMockableGraphsync(ctx, bs, fc, t) - errorInMultiBlockLoader := errorOnCidsLoader(loader, multi.At(1).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, errorInMultiBlockLoader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(1), errorInMultiBlockLoader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(4), errorInMultiBlockLoader, penultimate.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid1, recursiveSelector(4), loader, withMultiParent.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0, chain1, chain2)) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(4) - mgs.verifyExpectations() - require.Equal(t, 6, len(ts), "the right number of tipsets is returned") - expectedTs := final - for _, resultTs := range ts { - require.True(t, expectedTs.Key().Equals(resultTs.Key()), "the initial tipset is correct") - key, err := expectedTs.Parents() - require.NoError(t, err) - if !key.Empty() { - expectedTs, err = builder.GetTipSet(key) - require.NoError(t, err) - } - } - }) - - t.Run("stopping at edge heights in recursive fetch", func(t *testing.T) { - gen := builder.NewGenesis() - recursive16stop := builder.BuildManyOn(1, gen, withMessageBuilder) - recursive16middle := builder.BuildManyOn(15, recursive16stop, withMessageBuilder) - recursive4stop := builder.BuildManyOn(1, recursive16middle, withMessageBuilder) - recursive4middle := builder.BuildManyOn(3, recursive4stop, withMessageBuilder) - recursive1stop := builder.BuildManyOn(1, recursive4middle, withMessageBuilder) - final := builder.BuildOn(recursive1stop, 1, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - nextKey := final.Key() - for i := 1; i <= 22; i++ { - tipset, err := builder.GetTipSet(nextKey) - require.NoError(t, err) - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, loader, final.At(0).Cid()) - receivedRequestCount := 1 - if i > 1 { - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(1), loader, final.At(0).Cid()) - receivedRequestCount++ - } - if i > 2 { - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(4), loader, recursive1stop.At(0).Cid()) - receivedRequestCount++ - } - if i > 6 { - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(16), loader, recursive4stop.At(0).Cid()) - receivedRequestCount++ - } - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - done := doneAt(tipset.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(receivedRequestCount) - mgs.verifyExpectations() - - require.Equal(t, i, len(ts), "the right number of tipsets is returned") - lastTs := ts[len(ts)-1] - verifyMessagesFetched(t, lastTs) - - nextKey, err = tipset.Parents() - require.NoError(t, err) - } - }) - - t.Run("value returned with non block format", func(t *testing.T) { - mgs := newMockableGraphsync(ctx, bs, fc, t) - - key := block.NewTipSetKey(notDecodableBlock.Cid()) - chain0 := block.NewChainInfo(pid0, pid0, key, 0) - notDecodableLoader := simpleLoader([]format.Node{notDecodableBlock}) - mgs.stubResponseWithLoader(pid0, layer1Selector, notDecodableLoader, notDecodableBlock.Cid()) - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - - done := doneAt(key) - ts, err := fetcher.FetchTipSets(ctx, key, pid0, done) - require.Error(t, err) - assert.Contains(t, err.Error(), fmt.Sprintf("fetched data (cid %s) was not a block: cbor: cannot unmarshal", notDecodableBlock.Cid().String())) - require.Nil(t, ts) - }) - - t.Run("block returned with invalid syntax", func(t *testing.T) { - mgs := newMockableGraphsync(ctx, bs, fc, t) - blk := simpleBlock() - blk.Height = 1 - blk.Timestamp = uint64(chainClock.StartTimeOfEpoch(blk.Height).Unix()) - key := block.NewTipSetKey(blk.Cid()) - chain0 := block.NewChainInfo(pid0, pid0, key, blk.Height) - invalidSyntaxLoader := simpleLoader([]format.Node{blk.ToNode()}) - mgs.stubResponseWithLoader(pid0, layer1Selector, invalidSyntaxLoader, blk.Cid()) - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - done := doneAt(key) - ts, err := fetcher.FetchTipSets(ctx, key, pid0, done) - require.EqualError(t, err, fmt.Sprintf("invalid block %s: block %s has nil miner address", blk.Cid().String(), blk.Cid().String())) - require.Nil(t, ts) - }) - - t.Run("blocks present but messages don't decode", func(t *testing.T) { - mgs := newMockableGraphsync(ctx, bs, fc, t) - blk := requireSimpleValidBlock(t, 3, address.Undef) - metaCid, err := msgStore.StoreTxMeta(ctx, types.TxMeta{SecpRoot: e.NewCid(notDecodableBlock.Cid()), BLSRoot: e.NewCid(types.EmptyMessagesCID)}) - require.NoError(t, err) - blk.Messages = e.NewCid(metaCid) - key := block.NewTipSetKey(blk.Cid()) - chain0 := block.NewChainInfo(pid0, pid0, key, blk.Height) - nd, err := (&types.SignedMessage{}).ToNode() - require.NoError(t, err) - notDecodableLoader := simpleLoader([]format.Node{blk.ToNode(), notDecodableBlock, nd}) - mgs.stubResponseWithLoader(pid0, layer1Selector, notDecodableLoader, blk.Cid()) - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - - done := doneAt(key) - ts, err := fetcher.FetchTipSets(ctx, key, pid0, done) - require.EqualError(t, err, fmt.Sprintf("fetched data (cid %s) could not be decoded as an AMT: cbor input had wrong number of fields", notDecodableBlock.Cid().String())) - require.Nil(t, ts) - }) - - t.Run("messages don't validate", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 1, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.stubResponseWithLoader(pid0, layer1Selector, loader, final.Key().ToSlice()...) - - errorMv := mockSyntaxValidator{ - validateMessagesError: fmt.Errorf("Everything Failed"), - } - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, errorMv, fc, newFakePeerTracker(chain0)) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - require.Nil(t, ts) - require.Error(t, err, "invalid messages for for message collection (cid %s)", final.At(0).Messages.String()) - }) - - t.Run("hangup occurs during first layer fetch but recovers through fallback", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 3, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - pt := newFakePeerTracker(chain0, chain1, chain2) - - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid0, layer1Selector, loader, 0, final.At(1).Cid(), final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid1, layer1Selector, loader, final.At(1).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid1, layer1Selector, loader, 0, final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid2, layer1Selector, loader, final.At(2).Cid()) - mgs.expectRequestToRespondWithLoader(pid2, recursiveSelector(1), loader, final.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, pt) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(7) - mgs.verifyExpectations() - require.Equal(t, 2, len(ts), "the right number of tipsets is returned") - require.True(t, final.Key().Equals(ts[0].Key()), "the initial tipset is correct") - require.True(t, gen.Key().Equals(ts[1].Key()), "the remaining tipsets are correct") - }) - - t.Run("initial request hangs up and no other peers succeed", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 3, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - pt := newFakePeerTracker(chain0, chain1, chain2) - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid0, layer1Selector, loader, 0, final.At(1).Cid(), final.At(2).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid1, layer1Selector, loader, 0, final.At(1).Cid(), final.At(2).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid2, layer1Selector, loader, 0, final.At(1).Cid(), final.At(2).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, pt) - done := doneAt(gen.Key()) - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - - mgs.verifyReceivedRequestCount(7) - mgs.verifyExpectations() - require.EqualError(t, err, fmt.Sprintf("fetching tipset: %s: Unable to find any untried peers", final.Key().String())) - require.Nil(t, ts) - }) - - t.Run("partial response hangs up during recursive fetch recovers at hang up point", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildManyOn(5, gen, withMessageBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - pt := newFakePeerTracker(chain0, chain1, chain2) - - blocks := make([]*block.Block, 4) // in fetch order - prev := final.At(0) - for i := 0; i < 4; i++ { - parent := prev.Parents.Iter().Value() - prev, err = builder.GetBlock(ctx, parent) - require.NoError(t, err) - blocks[i] = prev - } - - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(1), loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid0, recursiveSelector(4), loader, 2*visitsPerBlock, blocks[0].Cid()) - mgs.expectRequestToRespondWithLoader(pid1, recursiveSelector(4), loader, blocks[2].Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, pt) - - done := func(ts block.TipSet) (bool, error) { - if ts.Key().Equals(gen.Key()) { - return true, nil - } - return false, nil - } - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(4) - mgs.verifyExpectations() - require.Equal(t, 6, len(ts), "the right number of tipsets is returned") - expectedTs := final - for _, resultTs := range ts { - require.True(t, expectedTs.Key().Equals(resultTs.Key()), "the initial tipset is correct") - key, err := expectedTs.Parents() - require.NoError(t, err) - if !key.Empty() { - expectedTs, err = builder.GetTipSet(key) - require.NoError(t, err) - } - } - }) - - t.Run("hangs up on single block in multi block tip during recursive fetch", func(t *testing.T) { - gen := builder.NewGenesis() - multi := builder.BuildOn(gen, 3, withMessageEachBuilder) - penultimate := builder.BuildManyOn(3, multi, withMessageBuilder) - final := builder.BuildOn(penultimate, 1, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(1), loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid0, recursiveSelector(4), loader, 2*visitsPerBlock, penultimate.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - - mgs.verifyReceivedRequestCount(3) - mgs.verifyExpectations() - require.EqualError(t, err, fmt.Sprintf("fetching tipset: %s: Unable to find any untried peers", multi.Key().String())) - require.Nil(t, ts) - }) - - t.Run("hangs up on single block in multi block tip during recursive fetch, recover through fallback", func(t *testing.T) { - gen := builder.NewGenesis() - multi := builder.BuildOn(gen, 3, withMessageEachBuilder) - withMultiParent := builder.BuildOn(multi, 1, withMessageEachBuilder) - penultimate := builder.BuildManyOn(2, withMultiParent, withMessageBuilder) - final := builder.BuildOn(penultimate, 1, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - chain1 := block.NewChainInfo(pid1, pid1, final.Key(), height) - chain2 := block.NewChainInfo(pid2, pid2, final.Key(), height) - - mgs := newMockableGraphsync(ctx, bs, fc, t) - mgs.expectRequestToRespondWithLoader(pid0, layer1Selector, loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid0, recursiveSelector(1), loader, final.At(0).Cid()) - mgs.expectRequestToRespondWithHangupAfter(pid0, recursiveSelector(4), loader, 2*visitsPerBlock, penultimate.At(0).Cid()) - mgs.expectRequestToRespondWithLoader(pid1, recursiveSelector(4), loader, withMultiParent.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0, chain1, chain2)) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSets(ctx, final.Key(), pid0, done) - - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(4) - mgs.verifyExpectations() - require.Equal(t, 6, len(ts), "the right number of tipsets is returned") - expectedTs := final - for _, resultTs := range ts { - require.True(t, expectedTs.Key().Equals(resultTs.Key()), "the initial tipset is correct") - key, err := expectedTs.Parents() - require.NoError(t, err) - if !key.Empty() { - expectedTs, err = builder.GetTipSet(key) - require.NoError(t, err) - } - } - }) -} - -func TestHeadersOnlyGraphsyncFetch(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - bs := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - fc := clock.NewFake(time.Now()) - genTime := uint64(1234567890) - chainClock := clock.NewChainClockFromClock(genTime, 5*time.Second, time.Second, fc) - bv := consensus.NewDefaultBlockValidator(chainClock, nil, nil) - msgV := &consensus.FakeMessageValidator{} - syntax := consensus.WrappedSyntaxValidator{ - BlockSyntaxValidator: bv, - MessageSyntaxValidator: msgV, - } - pid0 := th.RequireIntPeerID(t, 0) - builder := chain.NewBuilderWithDeps(t, address.Undef, &chain.FakeStateBuilder{}, chain.NewClockTimestamper(chainClock)) - keys := types.MustGenerateKeyInfo(1, 42) - mm := vm.NewMessageMaker(t, keys) - notDecodableBlock, err := cbor.WrapObject(notDecodable{Num: 5, Message: "applebutter"}, constants.DefaultHashFunction, -1) - require.NoError(t, err) - - alice := mm.Addresses()[0] - - ssb := selectorbuilder.NewSelectorSpecBuilder(basicnode.Style.Any) - layer1Selector, err := ssb.Matcher().Selector() - require.NoError(t, err) - - recursiveSelector := func(levels int) selector.Selector { - s, err := ssb.ExploreRecursive(selector.RecursionLimitDepth(levels), ssb.ExploreIndex(block.IndexParentsField, - ssb.ExploreUnion( - ssb.ExploreAll( - ssb.Matcher(), - ), - ssb.ExploreIndex(0, ssb.ExploreRecursiveEdge()), - ))).Selector() - require.NoError(t, err) - return s - } - - doneAt := func(tsKey block.TipSetKey) func(block.TipSet) (bool, error) { - return func(ts block.TipSet) (bool, error) { - if ts.Key().Equals(tsKey) { - return true, nil - } - return false, nil - } - } - withMessageBuilder := func(b *chain.BlockBuilder) { - b.AddMessages( - []*types.SignedMessage{mm.NewSignedMessage(alice, 1)}, - []*types.UnsignedMessage{}, - ) - } - withMessageEachBuilder := func(b *chain.BlockBuilder, i int) { - withMessageBuilder(b) - } - - verifyNoMessages := func(t *testing.T, ts block.TipSet) { - for i := 0; i < ts.Len(); i++ { - blk := ts.At(i) - stored, err := bs.Has(blk.Messages.Cid) - require.NoError(t, err) - require.False(t, stored) - } - } - - t.Run("happy path returns correct tipsets", func(t *testing.T) { - gen := builder.NewGenesis() - final := builder.BuildOn(gen, 3, withMessageEachBuilder) - height, err := final.Height() - require.NoError(t, err) - chain0 := block.NewChainInfo(pid0, pid0, final.Key(), height) - mgs := newMockableGraphsync(ctx, bs, fc, t) - loader := successHeadersLoader(ctx, builder) - mgs.stubResponseWithLoader(pid0, layer1Selector, loader, final.Key().ToSlice()...) - mgs.stubResponseWithLoader(pid0, recursiveSelector(1), loader, final.At(0).Cid()) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - done := doneAt(gen.Key()) - - ts, err := fetcher.FetchTipSetHeaders(ctx, final.Key(), pid0, done) - require.NoError(t, err, "the request completes successfully") - mgs.verifyReceivedRequestCount(4) - require.Equal(t, 2, len(ts), "the right number of tipsets is returned") - require.True(t, final.Key().Equals(ts[0].Key()), "the initial tipset is correct") - require.True(t, gen.Key().Equals(ts[1].Key()), "the remaining tipsets are correct") - verifyNoMessages(t, ts[0]) - verifyNoMessages(t, ts[1]) - }) - - t.Run("fetch succeeds when messages don't decode", func(t *testing.T) { - mgs := newMockableGraphsync(ctx, bs, fc, t) - blk := requireSimpleValidBlock(t, 3, address.Undef) - metaCid, err := builder.StoreTxMeta(ctx, types.TxMeta{SecpRoot: e.NewCid(notDecodableBlock.Cid()), BLSRoot: e.NewCid(types.EmptyMessagesCID)}) - require.NoError(t, err) - blk.Messages = e.NewCid(metaCid) - key := block.NewTipSetKey(blk.Cid()) - chain0 := block.NewChainInfo(pid0, pid0, key, blk.Height) - nd, err := (&types.SignedMessage{}).ToNode() - require.NoError(t, err) - notDecodableLoader := simpleLoader([]format.Node{blk.ToNode(), notDecodableBlock, nd}) - mgs.stubResponseWithLoader(pid0, layer1Selector, notDecodableLoader, blk.Cid()) - fetcher := fetcher.NewGraphSyncFetcher(ctx, mgs, bs, syntax, fc, newFakePeerTracker(chain0)) - - done := doneAt(key) - ts, err := fetcher.FetchTipSetHeaders(ctx, key, pid0, done) - assert.NoError(t, err) - require.Equal(t, 1, len(ts)) - assert.NoError(t, err) - assert.Equal(t, key, ts[0].Key()) - }) -} - -func TestRealWorldGraphsyncFetchOnlyHeaders(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - // setup a chain - fc, chainClock := clock.NewFakeChain(1234567890, 5*time.Second, time.Second, time.Now().Unix()) - builder := chain.NewBuilderWithDeps(t, address.Undef, &chain.FakeStateBuilder{}, chain.NewClockTimestamper(chainClock)) - keys := types.MustGenerateKeyInfo(2, 42) - mm := vm.NewMessageMaker(t, keys) - alice := mm.Addresses()[0] - bob := mm.Addresses()[1] - gen := builder.NewGenesis() - - // count > 64 force multiple layers in amts - messageCount := uint64(100) - - secpMessages := make([]*types.SignedMessage, messageCount) - blsMessages := make([]*types.UnsignedMessage, messageCount) - for i := uint64(0); i < messageCount; i++ { - secpMessages[i] = mm.NewSignedMessage(alice, i) - blsMessages[i] = &mm.NewSignedMessage(bob, i).Message - } - - tipCount := 32 - final := builder.BuildManyOn(tipCount, gen, func(b *chain.BlockBuilder) { - b.AddMessages(secpMessages, blsMessages) - }) - - // setup network - mn := mocknet.New(ctx) - - host1, err := mn.GenPeer() - if err != nil { - t.Fatal("error generating host") - } - host2, err := mn.GenPeer() - if err != nil { - t.Fatal("error generating host") - } - err = mn.LinkAll() - if err != nil { - t.Fatal("error linking hosts") - } - - gsnet1 := gsnet.NewFromLibp2pHost(host1) - - // setup receiving peer to just record message coming in - gsnet2 := gsnet.NewFromLibp2pHost(host2) - - // setup a graphsync fetcher and a graphsync responder - - bs := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - - bv := consensus.NewDefaultBlockValidator(chainClock, nil, nil) - msgV := &consensus.FakeMessageValidator{} - syntax := consensus.WrappedSyntaxValidator{BlockSyntaxValidator: bv, - MessageSyntaxValidator: msgV, - } - pt := discovery.NewPeerTracker(peer.ID("")) - pt.Track(block.NewChainInfo(host2.ID(), host2.ID(), block.TipSetKey{}, 0)) - - localLoader := gsstoreutil.LoaderForBlockstore(bs) - localStorer := gsstoreutil.StorerForBlockstore(bs) - - localGraphsync := graphsyncimpl.New(ctx, gsnet1, localLoader, localStorer) - - fetcher := fetcher.NewGraphSyncFetcher(ctx, localGraphsync, bs, syntax, fc, pt) - - remoteLoader := func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - cid := lnk.(cidlink.Link).Cid - b, err := builder.GetBlockstoreValue(ctx, cid) - if err != nil { - return nil, err - } - return bytes.NewBuffer(b.RawData()), nil - } - graphsyncimpl.New(ctx, gsnet2, remoteLoader, nil) - - tipsets, err := fetcher.FetchTipSetHeaders(ctx, final.Key(), host2.ID(), func(ts block.TipSet) (bool, error) { - if ts.Key().Equals(gen.Key()) { - return true, nil - } - return false, nil - }) - require.NoError(t, err) - - require.Equal(t, tipCount+1, len(tipsets)) - - // Check the headers are in the store. - // Check that the messages and receipts are NOT in the store. - expectedTips := builder.RequireTipSets(final.Key(), tipCount+1) - for _, ts := range expectedTips { - stored, err := bs.Has(ts.At(0).Cid()) - require.NoError(t, err) - assert.True(t, stored) - - stored, err = bs.Has(ts.At(0).Messages.Cid) - require.NoError(t, err) - assert.False(t, stored) - - stored, err = bs.Has(ts.At(0).MessageReceipts.Cid) - require.NoError(t, err) - assert.False(t, stored) - } -} - -func TestRealWorldGraphsyncFetchAcrossNetwork(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - // setup a chain - builder := chain.NewBuilder(t, address.Undef) - keys := types.MustGenerateKeyInfo(1, 42) - mm := vm.NewMessageMaker(t, keys) - alice := mm.Addresses()[0] - gen := builder.NewGenesis() - i := uint64(0) - tipCount := 32 - final := builder.BuildManyOn(tipCount, gen, func(b *chain.BlockBuilder) { - b.AddMessages( - []*types.SignedMessage{mm.NewSignedMessage(alice, i)}, - []*types.UnsignedMessage{}, - ) - }) - - // setup network - mn := mocknet.New(ctx) - - host1, err := mn.GenPeer() - if err != nil { - t.Fatal("error generating host") - } - host2, err := mn.GenPeer() - if err != nil { - t.Fatal("error generating host") - } - err = mn.LinkAll() - if err != nil { - t.Fatal("error linking hosts") - } - - gsnet1 := gsnet.NewFromLibp2pHost(host1) - - // setup receiving peer to just record message coming in - gsnet2 := gsnet.NewFromLibp2pHost(host2) - - // setup a graphsync fetcher and a graphsync responder - - bs := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - bv := th.NewFakeBlockValidator() - msgV := &consensus.FakeMessageValidator{} - syntax := consensus.WrappedSyntaxValidator{ - BlockSyntaxValidator: bv, - MessageSyntaxValidator: msgV, - } - fc := clock.NewFake(time.Now()) - pt := discovery.NewPeerTracker(peer.ID("")) - pt.Track(block.NewChainInfo(host2.ID(), host2.ID(), block.TipSetKey{}, 0)) - - localLoader := gsstoreutil.LoaderForBlockstore(bs) - localStorer := gsstoreutil.StorerForBlockstore(bs) - - localGraphsync := graphsyncimpl.New(ctx, gsnet1, localLoader, localStorer) - gsFetcher := fetcher.NewGraphSyncFetcher(ctx, localGraphsync, bs, syntax, fc, pt) - - remoteLoader := func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - cid := lnk.(cidlink.Link).Cid - node, err := tryBlockstoreValue(ctx, builder, cid) - if err != nil { - return nil, err - } - return bytes.NewBuffer(node.RawData()), nil - } - otherGraphsync := graphsyncimpl.New(ctx, gsnet2, remoteLoader, nil, graphsyncimpl.RejectAllRequestsByDefault()) - otherGraphsync.RegisterIncomingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { - _, has := requestData.Extension(fetcher.ChainsyncProtocolExtension) - if has { - hookActions.ValidateRequest() - } - }) - tipsets, err := gsFetcher.FetchTipSets(ctx, final.Key(), host2.ID(), func(ts block.TipSet) (bool, error) { - if ts.Key().Equals(gen.Key()) { - return true, nil - } - return false, nil - }) - require.NoError(t, err) - - require.Equal(t, tipCount+1, len(tipsets)) - - // Check the headers and messages structures are in the store. - expectedTips := builder.RequireTipSets(final.Key(), tipCount+1) - for _, ts := range expectedTips { - stored, err := bs.Has(ts.At(0).Cid()) - require.NoError(t, err) - assert.True(t, stored) - - rawMeta, err := bs.Get(ts.At(0).Messages.Cid) - require.NoError(t, err) - var meta types.TxMeta - err = encoding.Decode(rawMeta.RawData(), &meta) - require.NoError(t, err) - - stored, err = bs.Has(meta.SecpRoot.Cid) - require.NoError(t, err) - assert.True(t, stored) - } -} diff --git a/internal/pkg/chainsync/fetcher/graphsync_helpers_test.go b/internal/pkg/chainsync/fetcher/graphsync_helpers_test.go deleted file mode 100644 index 866c8e91ec..0000000000 --- a/internal/pkg/chainsync/fetcher/graphsync_helpers_test.go +++ /dev/null @@ -1,480 +0,0 @@ -package fetcher_test - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "reflect" - "testing" - "time" - - "github.com/filecoin-project/go-address" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -// fakeRequest captures the parameters necessary to uniquely -// identify a graphsync request -type fakeRequest struct { - p peer.ID - root ipld.Link - selector selector.Selector -} - -// fakeResponse represents the necessary data to simulate a graphsync query -// a graphsync query has: -// - two return values: -// - a channel of ResponseProgress -// - a channel of errors -// - one side effect: -// - blocks written to a block store -// when graphsync is called for a matching request, -// -- the responses array is converted to a channel -// -- the error array is converted to a channel -// -- a blks array is written to mock graphsync block store -type fakeResponse struct { - responses []graphsync.ResponseProgress - errs []error - blks []format.Node - hangupAfter int -} - -// String serializes the errs and blks field to a printable string for debug -func (fr fakeResponse) String() string { - errStr := "" - for _, err := range fr.errs { - if err != nil { - errStr += err.Error() - } else { - errStr += fmt.Sprintf("") - } - } - blkStr := "" - for _, blk := range fr.blks { - if blk == nil { - blkStr += fmt.Sprintf("") - } else { - blkStr += fmt.Sprintf("cid: %s, raw data: %x\n", blk.Cid(), blk.RawData()) - } - } - return fmt.Sprintf("ipld nodes: %s\nerrs: %s\n\n", blkStr, errStr) -} - -const noHangup = -1 - -// request response just records a request and the respond to send when its -// made for a stub -type requestResponse struct { - request fakeRequest - response fakeResponse -} - -// hungRequest represents a request that has hung, pending a timeout -// causing a cancellation, which will in turn close the channels -type hungRequest struct { - ctx context.Context - responseChan chan graphsync.ResponseProgress - errChan chan error -} - -// mockableGraphsync conforms to the graphsync exchange interface needed by -// the graphsync fetcher but will only send stubbed responses -type mockableGraphsync struct { - clock clock.Fake - hungRequests []*hungRequest - incomingHungRequest chan *hungRequest - requestsToProcess chan struct{} - ctx context.Context - stubs []requestResponse - expectedRequests []fakeRequest - receivedRequests []fakeRequest - store bstore.Blockstore - t *testing.T -} - -func (mgs *mockableGraphsync) stubString() string { - stubStr := "" - for _, reqResp := range mgs.stubs { - stubStr += reqResp.response.String() - } - return stubStr -} - -func newMockableGraphsync(ctx context.Context, store bstore.Blockstore, clock clock.Fake, t *testing.T) *mockableGraphsync { - mgs := &mockableGraphsync{ - ctx: ctx, - incomingHungRequest: make(chan *hungRequest), - requestsToProcess: make(chan struct{}, 1), - store: store, - clock: clock, - t: t, - } - go mgs.processHungRequests() - return mgs -} - -// processHungRequests handles requests that hangup, by advancing the clock until -// the fetcher cancels those requests, which then causes the channels to close -func (mgs *mockableGraphsync) processHungRequests() { - for { - select { - case hungRequest := <-mgs.incomingHungRequest: - mgs.hungRequests = append(mgs.hungRequests, hungRequest) - select { - case mgs.requestsToProcess <- struct{}{}: - default: - } - case <-mgs.requestsToProcess: - var newHungRequests []*hungRequest - for _, hungRequest := range mgs.hungRequests { - select { - case <-hungRequest.ctx.Done(): - close(hungRequest.errChan) - close(hungRequest.responseChan) - default: - newHungRequests = append(newHungRequests, hungRequest) - } - } - mgs.hungRequests = newHungRequests - if len(mgs.hungRequests) > 0 { - mgs.clock.Advance(15 * time.Second) - select { - case mgs.requestsToProcess <- struct{}{}: - default: - } - } - case <-mgs.ctx.Done(): - return - } - } -} - -// expect request will record a given set of requests as "expected", which can -// then be verified against received requests in verify expectations -func (mgs *mockableGraphsync) expectRequest(pid peer.ID, s selector.Selector, cids ...cid.Cid) { - for _, c := range cids { - mgs.expectedRequests = append(mgs.expectedRequests, fakeRequest{pid, cidlink.Link{Cid: c}, s}) - } -} - -// verifyReceivedRequestCount will fail a test if the expected number of requests were not received -func (mgs *mockableGraphsync) verifyReceivedRequestCount(n int) { - require.Equal(mgs.t, n, len(mgs.receivedRequests), "correct number of graphsync requests were made") -} - -// verifyExpectations will fail a test if all expected requests were not received -func (mgs *mockableGraphsync) verifyExpectations() { - for _, expectedRequest := range mgs.expectedRequests { - matchedRequest := false - for _, receivedRequest := range mgs.receivedRequests { - if reflect.DeepEqual(expectedRequest, receivedRequest) { - matchedRequest = true - break - } - } - require.True(mgs.t, matchedRequest, "expected request was made for peer %s, cid %s", expectedRequest.p.String(), expectedRequest.root.String()) - } -} - -// stubResponseWithLoader stubs a response when the mocked graphsync -// instance is called with the given peer, selector, one of the cids -// by executing the specified root and selector using the given cid loader -func (mgs *mockableGraphsync) stubResponseWithLoader(pid peer.ID, s selector.Selector, loader mockGraphsyncLoader, cids ...cid.Cid) { - for _, c := range cids { - mgs.stubSingleResponseWithLoader(pid, s, loader, noHangup, c) - } -} - -// stubResponseWithHangupAfter stubs a response when the mocked graphsync -// instance is called with the given peer, selector, one of the cids -// by executing the specified root and selector using the given cid loader -// however the response will hangup at stop sending on the channel after N -// responses -func (mgs *mockableGraphsync) stubResponseWithHangupAfter(pid peer.ID, s selector.Selector, loader mockGraphsyncLoader, hangup int, cids ...cid.Cid) { - for _, c := range cids { - mgs.stubSingleResponseWithLoader(pid, s, loader, hangup, c) - } -} - -var ( - errHangup = errors.New("Hangup") -) - -// stubResponseWithLoader stubs a response when the mocked graphsync -// instance is called with the given peer, selector, and cid -// by executing the specified root and selector using the given cid loader -func (mgs *mockableGraphsync) stubSingleResponseWithLoader(pid peer.ID, s selector.Selector, loader mockGraphsyncLoader, hangup int, c cid.Cid) { - var blks []format.Node - var responses []graphsync.ResponseProgress - - linkLoader := func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - cid := lnk.(cidlink.Link).Cid - node, err := loader(cid) - if err != nil { - return nil, err - } - blks = append(blks, node) - return bytes.NewBuffer(node.RawData()), nil - } - root := cidlink.Link{Cid: c} - nb := basicnode.Style.Any.NewBuilder() - err := root.Load(mgs.ctx, ipld.LinkContext{}, nb, linkLoader) - if err != nil { - mgs.stubs = append(mgs.stubs, requestResponse{ - fakeRequest{pid, root, s}, - fakeResponse{errs: []error{err}, hangupAfter: hangup}, - }) - return - } - node := nb.Build() - visited := 0 - visitor := func(tp traversal.Progress, n ipld.Node, tr traversal.VisitReason) error { - if hangup != noHangup && visited >= hangup { - return errHangup - } - visited++ - responses = append(responses, graphsync.ResponseProgress{Node: n, Path: tp.Path, LastBlock: tp.LastBlock}) - return nil - } - err = traversal.Progress{ - Cfg: &traversal.Config{ - Ctx: mgs.ctx, - LinkLoader: linkLoader, - LinkTargetNodeStyleChooser: func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodeStyle, error) { - return basicnode.Style.Any, nil - }, - }, - }.WalkAdv(node, s, visitor) - if err == errHangup { - err = nil - } - mgs.stubs = append(mgs.stubs, requestResponse{ - fakeRequest{pid, root, s}, - fakeResponse{responses, []error{err}, blks, hangup}, - }) -} - -// expectRequestToRespondWithLoader is just a combination of an expectation and a stub -- -// it expects the request to come in and responds with the given loader -func (mgs *mockableGraphsync) expectRequestToRespondWithLoader(pid peer.ID, s selector.Selector, loader mockGraphsyncLoader, cids ...cid.Cid) { - mgs.expectRequest(pid, s, cids...) - mgs.stubResponseWithLoader(pid, s, loader, cids...) -} - -// expectRequestToRespondWithHangupAfter is just a combination of an expectation and a stub -- -// it expects the request to come in and responds with the given loader, but hangup after -// the given number of responses -func (mgs *mockableGraphsync) expectRequestToRespondWithHangupAfter(pid peer.ID, s selector.Selector, loader mockGraphsyncLoader, hangup int, cids ...cid.Cid) { - mgs.expectRequest(pid, s, cids...) - mgs.stubResponseWithHangupAfter(pid, s, loader, hangup, cids...) -} - -func (mgs *mockableGraphsync) processResponse(ctx context.Context, mr fakeResponse) (<-chan graphsync.ResponseProgress, <-chan error) { - for _, block := range mr.blks { - requireBlockStorePut(mgs.t, mgs.store, block) - } - - errChan := make(chan error, len(mr.errs)) - for _, err := range mr.errs { - errChan <- err - } - responseChan := make(chan graphsync.ResponseProgress, len(mr.responses)) - for _, response := range mr.responses { - responseChan <- response - } - - if mr.hangupAfter == noHangup { - close(errChan) - close(responseChan) - } else { - mgs.incomingHungRequest <- &hungRequest{ctx, responseChan, errChan} - } - - return responseChan, errChan -} - -func (mgs *mockableGraphsync) Request(ctx context.Context, p peer.ID, root ipld.Link, selectorSpec ipld.Node, extensions ...graphsync.ExtensionData) (<-chan graphsync.ResponseProgress, <-chan error) { - parsed, err := selector.ParseSelector(selectorSpec) - if err != nil { - return mgs.processResponse(ctx, fakeResponse{nil, []error{fmt.Errorf("invalid selector")}, nil, noHangup}) - } - request := fakeRequest{p, root, parsed} - mgs.receivedRequests = append(mgs.receivedRequests, request) - for _, stub := range mgs.stubs { - if reflect.DeepEqual(stub.request, request) { - return mgs.processResponse(ctx, stub.response) - } - } - return mgs.processResponse(ctx, fakeResponse{nil, []error{fmt.Errorf("unexpected request")}, nil, noHangup}) -} - -type fakePeerTracker struct { - peers []*block.ChainInfo -} - -func newFakePeerTracker(cis ...*block.ChainInfo) *fakePeerTracker { - return &fakePeerTracker{ - peers: cis, - } -} - -func (fpt *fakePeerTracker) List() []*block.ChainInfo { - return fpt.peers -} - -func (fpt *fakePeerTracker) Self() peer.ID { - return peer.ID("") -} - -func requireBlockStorePut(t *testing.T, bs bstore.Blockstore, data format.Node) { - err := bs.Put(data) - require.NoError(t, err) -} - -func simpleBlock() *block.Block { - return &block.Block{ - ParentWeight: fbig.Zero(), - Parents: block.NewTipSetKey(), - Height: 0, - StateRoot: e.NewCid(types.EmptyMessagesCID), - Messages: e.NewCid(types.EmptyTxMetaCID), - MessageReceipts: e.NewCid(types.EmptyReceiptsCID), - BlockSig: &crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte{}}, - BLSAggregateSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte{}}, - } -} - -func requireSimpleValidBlock(t *testing.T, nonce uint64, miner address.Address) *block.Block { - b := simpleBlock() - ticket := block.Ticket{} - ticket.VRFProof = make([]byte, binary.Size(nonce)) - binary.BigEndian.PutUint64(ticket.VRFProof, nonce) - b.Ticket = ticket - bytes, err := cbor.DumpObject("null") - require.NoError(t, err) - rawRoot, err := cid.Prefix{ - Version: 1, - Codec: cid.DagCBOR, - MhType: constants.DefaultHashFunction, - MhLength: -1, - }.Sum(bytes) - require.NoError(t, err) - b.StateRoot = e.NewCid(rawRoot) - b.Miner = miner - return b -} - -type mockSyntaxValidator struct { - validateMessagesError error - validateReceiptsError error -} - -func (mv mockSyntaxValidator) ValidateSyntax(ctx context.Context, blk *block.Block) error { - return nil -} - -func (mv mockSyntaxValidator) ValidateSignedMessageSyntax(ctx context.Context, message *types.SignedMessage) error { - return mv.validateMessagesError -} - -func (mv mockSyntaxValidator) ValidateUnsignedMessageSyntax(ctx context.Context, message *types.UnsignedMessage) error { - return nil -} - -func (mv mockSyntaxValidator) ValidateReceiptsSyntax(ctx context.Context, receipts []vm.MessageReceipt) error { - return mv.validateReceiptsError -} - -// blockAndMessageProvider is any interface that can load blocks, messages, AND -// message receipts (such as a chain builder) -type blockAndMessageProvider interface { - GetBlockstoreValue(ctx context.Context, c cid.Cid) (blocks.Block, error) -} - -func tryBlockstoreValue(ctx context.Context, f blockAndMessageProvider, c cid.Cid) (format.Node, error) { - b, err := f.GetBlockstoreValue(ctx, c) - if err != nil { - return nil, err - } - - return cbor.DecodeBlock(b) -} - -func tryBlockNode(ctx context.Context, f chain.BlockProvider, c cid.Cid) (format.Node, error) { - if block, err := f.GetBlock(ctx, c); err == nil { - return block.ToNode(), nil - } - return nil, fmt.Errorf("cid could not be resolved through builder") -} - -// mockGraphsyncLoader is a function that loads cids into ipld.Nodes (or errors), -// used to construct a mock query result against a CID and a selector -type mockGraphsyncLoader func(cid.Cid) (format.Node, error) - -// successLoader will load any cids returned by the given block and message provider -// or error otherwise -func successLoader(ctx context.Context, provider blockAndMessageProvider) mockGraphsyncLoader { - return func(cidToLoad cid.Cid) (format.Node, error) { - return tryBlockstoreValue(ctx, provider, cidToLoad) - } -} - -// successHeadersLoader will load any cids returned by the given block -// provider or error otherwise. -func successHeadersLoader(ctx context.Context, provider chain.BlockProvider) mockGraphsyncLoader { - return func(cidToLoad cid.Cid) (format.Node, error) { - return tryBlockNode(ctx, provider, cidToLoad) - } -} - -// errorOnCidsLoader will override a base loader to error for the specified cids -// or otherwise return the results from the base loader -func errorOnCidsLoader(baseLoader mockGraphsyncLoader, errorOnCids ...cid.Cid) mockGraphsyncLoader { - return func(cidToLoad cid.Cid) (format.Node, error) { - for _, testCid := range errorOnCids { - if cidToLoad.Equals(testCid) { - return nil, fmt.Errorf("Everything failed") - } - } - return baseLoader(cidToLoad) - } -} - -// simple loader loads cids from a simple array of nodes -func simpleLoader(store []format.Node) mockGraphsyncLoader { - cidsToNodes := make(map[cid.Cid]format.Node, len(store)) - for _, node := range store { - cidsToNodes[node.Cid()] = node - } - return func(cidToLoad cid.Cid) (format.Node, error) { - node, has := cidsToNodes[cidToLoad] - if !has { - return nil, fmt.Errorf("Everything failed") - } - return node, nil - } -} diff --git a/internal/pkg/chainsync/internal/dispatcher/dispatcher.go b/internal/pkg/chainsync/internal/dispatcher/dispatcher.go deleted file mode 100644 index 88914a72ae..0000000000 --- a/internal/pkg/chainsync/internal/dispatcher/dispatcher.go +++ /dev/null @@ -1,433 +0,0 @@ -package dispatcher - -import ( - "container/heap" - "context" - "runtime/debug" - - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/util/moresync" -) - -var log = logging.Logger("chainsync.dispatcher") - -// DefaultInQueueSize is the size of the channel used for receiving targets from producers. -const DefaultInQueueSize = 5 - -// DefaultWorkQueueSize is the size of the work queue -const DefaultWorkQueueSize = 20 - -// MaxEpochGap is the maximum number of epochs chainsync can fall behind -// before catching up -const MaxEpochGap = 10 - -// dispatchSyncer is the interface of the logic syncing incoming chains -type dispatchSyncer interface { - HandleNewTipSet(context.Context, *block.ChainInfo, bool) error -} - -type transitionSyncer interface { - SetStagedHead(context.Context) error -} - -// chainHeadState is the interface for determining the head of the chain -type chainHeadState interface { - GetHead() block.TipSetKey - GetTipSet(block.TipSetKey) (block.TipSet, error) -} - -// NewDispatcher creates a new syncing dispatcher with default queue sizes. -func NewDispatcher(catchupSyncer dispatchSyncer, trans Transitioner) *Dispatcher { - return NewDispatcherWithSizes(catchupSyncer, trans, DefaultWorkQueueSize, DefaultInQueueSize) -} - -// NewDispatcherWithSizes creates a new syncing dispatcher. -func NewDispatcherWithSizes(syncer dispatchSyncer, trans Transitioner, workQueueSize, inQueueSize int) *Dispatcher { - return &Dispatcher{ - workQueue: NewTargetQueue(), - workQueueSize: workQueueSize, - syncer: syncer, - transitioner: trans, - incoming: make(chan Target, inQueueSize), - control: make(chan interface{}, 1), - registeredCb: func(t Target, err error) {}, - } -} - -// cbMessage registers a user callback to be fired following every successful -// sync. -type cbMessage struct { - cb func(Target, error) -} - -// Dispatcher receives, sorts and dispatches targets to the catchupSyncer to control -// chain syncing. -// -// New targets arrive over the incoming channel. The dispatcher then puts them -// into the workQueue which sorts them by their claimed chain height. The -// dispatcher pops the highest priority target from the queue and then attempts -// to sync the target using its internal catchupSyncer. -// -// The dispatcher has a simple control channel. It reads this for external -// controls. Currently there is only one kind of control message. It registers -// a callback that the dispatcher will call after every non-erroring sync. -type Dispatcher struct { - // workQueue is a priority queue of target chain heads that should be - // synced - workQueue *TargetQueue - workQueueSize int - // incoming is the queue of incoming sync targets to the dispatcher. - incoming chan Target - // syncer is used for dispatching sync targets for chain heads to sync - // local chain state to these targets. - syncer dispatchSyncer - - // catchup is true when the syncer is in catchup mode - catchup bool - // transitioner wraps logic for transitioning between catchup and follow states. - transitioner Transitioner - - // registeredCb is a callback registered over the control channel. It - // is called after every successful sync. - registeredCb func(Target, error) - // control is a queue of control messages not yet processed. - control chan interface{} - - // syncTargetCount counts the number of successful syncs. - syncTargetCount uint64 -} - -// SendHello handles chain information from bootstrap peers. -func (d *Dispatcher) SendHello(ci *block.ChainInfo) error { - return d.enqueue(ci) -} - -// SendOwnBlock handles chain info from a node's own mining system -func (d *Dispatcher) SendOwnBlock(ci *block.ChainInfo) error { - return d.enqueue(ci) -} - -// SendGossipBlock handles chain info from new blocks sent on pubsub -func (d *Dispatcher) SendGossipBlock(ci *block.ChainInfo) error { - return d.enqueue(ci) -} - -func (d *Dispatcher) enqueue(ci *block.ChainInfo) error { - d.incoming <- Target{ChainInfo: *ci} - return nil -} - -// Start launches the business logic for the syncing subsystem. -func (d *Dispatcher) Start(syncingCtx context.Context) { - go func() { - defer func() { - log.Errorf("exiting") - if r := recover(); r != nil { - log.Errorf("panic: %v", r) - debug.PrintStack() - } - }() - - var last *Target - for { - // Handle shutdown - select { - case <-syncingCtx.Done(): - log.Infof("context done") - return - default: - } - - // Handle control signals - select { - case ctrl := <-d.control: - log.Debugf("processing control: %v", ctrl) - d.processCtrl(ctrl) - default: - } - - // Handle incoming targets - var ws []Target - if last != nil { - ws = append(ws, *last) - last = nil - } - select { - case first := <-d.incoming: - ws = append(ws, first) - ws = append(ws, d.drainIncoming()...) - log.Debugf("received %d incoming targets: %v", len(ws), ws) - default: - } - catchup, err := d.transitioner.MaybeTransitionToCatchup(d.catchup, ws) - if err != nil { - log.Errorf("state update error from reading chain head %s", err) - } else { - d.catchup = catchup - } - for i, syncTarget := range ws { - // Drop targets we don't have room for - if d.workQueue.Len() >= d.workQueueSize { - log.Infof("not enough space for %d targets on work queue", len(ws)-i) - break - } - // Sort new targets by putting on work queue. - d.workQueue.Push(syncTarget) - } - - // Check for work to do - log.Debugf("processing work queue of %d", d.workQueue.Len()) - syncTarget, popped := d.workQueue.Pop() - if popped { - log.Debugf("processing %v", syncTarget) - // Do work - err := d.syncer.HandleNewTipSet(syncingCtx, &syncTarget.ChainInfo, d.catchup) - log.Debugf("finished processing %v", syncTarget) - if err != nil { - log.Infof("failed sync of %v (catchup=%t): %s", &syncTarget.ChainInfo, d.catchup, err) - } - d.syncTargetCount++ - d.registeredCb(syncTarget, err) - follow, err := d.transitioner.MaybeTransitionToFollow(syncingCtx, d.catchup, d.workQueue.Len()) - if err != nil { - log.Errorf("state update error setting head %s", err) - } else { - d.catchup = !follow - log.Debugf("catchup state: %v", d.catchup) - } - } else { - // No work left, block until something shows up - log.Debugf("drained work queue, waiting") - select { - case extra := <-d.incoming: - log.Debugf("stopped waiting, received %v", extra) - last = &extra - } - } - } - }() -} - -func (d *Dispatcher) drainIncoming() []Target { - // drainProduced reads all values within the incoming channel buffer at time - // of calling without blocking. It reads at most incomingBufferSize. - // - // Note: this relies on a single reader of the incoming channel to - // avoid blocking. - n := len(d.incoming) - produced := make([]Target, n) - for i := 0; i < n; i++ { - produced[i] = <-d.incoming - } - return produced -} - -// RegisterCallback registers a callback on the dispatcher that -// will fire after every successful target sync. -func (d *Dispatcher) RegisterCallback(cb func(Target, error)) { - d.control <- cbMessage{cb: cb} -} - -// WaiterForTarget returns a function that will block until the dispatcher -// processes the given target and returns the error produced by that targer -func (d *Dispatcher) WaiterForTarget(waitKey block.TipSetKey) func() error { - processed := moresync.NewLatch(1) - var syncErr error - d.RegisterCallback(func(t Target, err error) { - if t.ChainInfo.Head.Equals(waitKey) { - syncErr = err - processed.Done() - } - }) - return func() error { - processed.Wait() - return syncErr - } -} -func (d *Dispatcher) processCtrl(ctrlMsg interface{}) { - // processCtrl takes a control message, determines its type, and performs the - // specified action. - // - // Using interfaces is overkill for now but is the way to make this - // extensible. (Delete this comment if we add more than one control) - switch typedMsg := ctrlMsg.(type) { - case cbMessage: - d.registeredCb = typedMsg.cb - default: - // We don't know this type, log and ignore - log.Info("dispatcher control can not handle type %T", typedMsg) - } -} - -// Target tracks a logical request of the syncing subsystem to run a -// syncing job against given inputs. -type Target struct { - block.ChainInfo -} - -// Transitioner determines whether the caller should move between catchup and -// follow states. -type Transitioner interface { - MaybeTransitionToCatchup(bool, []Target) (bool, error) - MaybeTransitionToFollow(context.Context, bool, int) (bool, error) - TransitionChannel() chan bool -} - -// GapTransitioner changes state based on the detection of gaps between the -// local head and syncing targets. -type GapTransitioner struct { - // headState is used to determine the head tipset height for switching - // measuring gaps. - headState chainHeadState - // headSetter sets the chain head to the internal staged value. - headSetter transitionSyncer - // transitionCh emits true when transitioning to catchup and false - // when transitioning to follow - transitionCh chan bool -} - -// NewGapTransitioner returns a new gap transitioner -func NewGapTransitioner(headState chainHeadState, headSetter transitionSyncer) *GapTransitioner { - return &GapTransitioner{ - headState: headState, - headSetter: headSetter, - transitionCh: make(chan bool, 0), - } -} - -// MaybeTransitionToCatchup returns true if the state is already catchup, or if -// it should transition from follow to catchup. Undefined on error. -func (gt *GapTransitioner) MaybeTransitionToCatchup(inCatchup bool, targets []Target) (bool, error) { - if inCatchup { - return true, nil - } - // current head height - head, err := gt.headState.GetTipSet(gt.headState.GetHead()) - if err != nil { - return false, err - } - headHeight, err := head.Height() - if err != nil { - return false, err - } - - // transition from follow to catchup if incoming targets have gaps - // Note: we run this check even on targets we may drop - for _, target := range targets { - if target.Height > headHeight+MaxEpochGap { - gt.transitionCh <- true - return true, nil - } - } - return false, nil -} - -// MaybeTransitionToFollow returns true if the state is already follow, or if -// it should transition from catchup to follow. Undefined on error. -func (gt *GapTransitioner) MaybeTransitionToFollow(ctx context.Context, inCatchup bool, outstandingTargets int) (bool, error) { - if !inCatchup { - return true, nil - } - - // transition from catchup to follow if the work queue is empty. - // this is safe -- all gap conditions cause syncing to enter catchup - // this is pessimistic -- gap conditions could be gone before we transition - if outstandingTargets == 0 { - gt.transitionCh <- false - // set staging to head on transition catchup --> follow - return true, gt.headSetter.SetStagedHead(ctx) - } - - return false, nil -} - -// TransitionChannel returns a channel emitting transition flags. -func (gt *GapTransitioner) TransitionChannel() chan bool { - return gt.transitionCh -} - -// TargetQueue orders dispatcher syncRequests by the underlying `targetQueue`'s -// prioritization policy. -// -// It also filters the `targetQueue` so that it always contains targets with -// unique chain heads. -// -// It wraps the `targetQueue` to prevent panics during -// normal operation. -type TargetQueue struct { - q targetQueue - targetSet map[string]struct{} -} - -// NewTargetQueue returns a new target queue. -func NewTargetQueue() *TargetQueue { - rq := make(targetQueue, 0) - heap.Init(&rq) - return &TargetQueue{ - q: rq, - targetSet: make(map[string]struct{}), - } -} - -// Push adds a sync target to the target queue. -func (tq *TargetQueue) Push(t Target) { - // If already in queue drop quickly - if _, inQ := tq.targetSet[t.ChainInfo.Head.String()]; inQ { - return - } - heap.Push(&tq.q, t) - tq.targetSet[t.ChainInfo.Head.String()] = struct{}{} - return -} - -// Pop removes and returns the highest priority syncing target. If there is -// nothing in the queue the second argument returns false -func (tq *TargetQueue) Pop() (Target, bool) { - if tq.Len() == 0 { - return Target{}, false - } - req := heap.Pop(&tq.q).(Target) - popKey := req.ChainInfo.Head.String() - delete(tq.targetSet, popKey) - return req, true -} - -// Len returns the number of targets in the queue. -func (tq *TargetQueue) Len() int { - return tq.q.Len() -} - -// targetQueue orders targets by a policy. -// -// The current simple policy is to order syncing requests by claimed chain -// height. -// -// `targetQueue` can panic so it shouldn't be used unwrapped -type targetQueue []Target - -// Heavily inspired by https://golang.org/pkg/container/heap/ -func (rq targetQueue) Len() int { return len(rq) } - -func (rq targetQueue) Less(i, j int) bool { - // We want Pop to give us the highest priority so we use greater than - return rq[i].Height > rq[j].Height -} - -func (rq targetQueue) Swap(i, j int) { - rq[i], rq[j] = rq[j], rq[i] -} - -func (rq *targetQueue) Push(x interface{}) { - syncReq := x.(Target) - *rq = append(*rq, syncReq) -} - -func (rq *targetQueue) Pop() interface{} { - old := *rq - n := len(old) - item := old[n-1] - *rq = old[0 : n-1] - return item -} diff --git a/internal/pkg/chainsync/internal/dispatcher/dispatcher_test.go b/internal/pkg/chainsync/internal/dispatcher/dispatcher_test.go deleted file mode 100644 index 2ba0b0f006..0000000000 --- a/internal/pkg/chainsync/internal/dispatcher/dispatcher_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package dispatcher_test - -import ( - "context" - "strconv" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/internal/dispatcher" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/util/moresync" -) - -type mockSyncer struct { - headsCalled []block.TipSetKey -} - -type noopTransitioner struct{} - -func (nt *noopTransitioner) MaybeTransitionToCatchup(inCatchup bool, _ []dispatcher.Target) (bool, error) { - return inCatchup, nil -} -func (nt *noopTransitioner) MaybeTransitionToFollow(_ context.Context, inCatchup bool, _ int) (bool, error) { - return !inCatchup, nil -} -func (nt *noopTransitioner) TransitionChannel() chan bool { - return nil -} - -func (fs *mockSyncer) HandleNewTipSet(_ context.Context, ci *block.ChainInfo, _ bool) error { - fs.headsCalled = append(fs.headsCalled, ci.Head) - return nil -} - -func TestDispatchStartHappy(t *testing.T) { - tf.UnitTest(t) - s := &mockSyncer{ - headsCalled: make([]block.TipSetKey, 0), - } - nt := &noopTransitioner{} - testDispatch := dispatcher.NewDispatcher(s, nt) - - cis := []*block.ChainInfo{ - // We need to put these in priority order to avoid a race. - // If we send 0 before 42, it is possible the dispatcher will - // pick up 0 and start processing before it sees 42. - chainInfoFromHeight(t, 42), - chainInfoFromHeight(t, 16), - chainInfoFromHeight(t, 3), - chainInfoFromHeight(t, 2), - chainInfoFromHeight(t, 0), - } - - testDispatch.Start(context.Background()) - - // set up a blocking channel and register to unblock after 5 synced - allDone := moresync.NewLatch(5) - testDispatch.RegisterCallback(func(t dispatcher.Target, _ error) { allDone.Done() }) - - // receive requests before Start() to test deterministic order - go func() { - for _, ci := range cis { - assert.NoError(t, testDispatch.SendHello(ci)) - } - }() - allDone.Wait() - - // check that the mockSyncer synced in order - require.Equal(t, 5, len(s.headsCalled)) - for i := range cis { - assert.Equal(t, cis[i].Head, s.headsCalled[i]) - } -} - -func TestDispatcherDropsWhenFull(t *testing.T) { - tf.UnitTest(t) - s := &mockSyncer{ - headsCalled: make([]block.TipSetKey, 0), - } - nt := &noopTransitioner{} - testWorkSize := 20 - testBufferSize := 30 - testDispatch := dispatcher.NewDispatcherWithSizes(s, nt, testWorkSize, testBufferSize) - - finished := moresync.NewLatch(1) - testDispatch.RegisterCallback(func(target dispatcher.Target, _ error) { - // Fail if the work that should be dropped gets processed - assert.False(t, target.Height == 100) - assert.False(t, target.Height == 101) - assert.False(t, target.Height == 102) - if target.Height == 0 { - // 0 has lowest priority of non-dropped - finished.Done() - } - }) - for j := 0; j < testWorkSize; j++ { - ci := chainInfoFromHeight(t, j) - assert.NoError(t, testDispatch.SendHello(ci)) - } - // Should be dropped - assert.NoError(t, testDispatch.SendHello(chainInfoFromHeight(t, 100))) - assert.NoError(t, testDispatch.SendHello(chainInfoFromHeight(t, 101))) - assert.NoError(t, testDispatch.SendHello(chainInfoFromHeight(t, 102))) - - testDispatch.Start(context.Background()) - - finished.Wait() -} - -func TestQueueHappy(t *testing.T) { - tf.UnitTest(t) - testQ := dispatcher.NewTargetQueue() - - // Add syncRequests out of order - sR0 := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 0))} - sR1 := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 1))} - sR2 := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 2))} - sR47 := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 47))} - - testQ.Push(sR2) - testQ.Push(sR47) - testQ.Push(sR0) - testQ.Push(sR1) - - assert.Equal(t, 4, testQ.Len()) - - // Pop in order - out0 := requirePop(t, testQ) - out1 := requirePop(t, testQ) - out2 := requirePop(t, testQ) - out3 := requirePop(t, testQ) - - assert.Equal(t, abi.ChainEpoch(47), out0.ChainInfo.Height) - assert.Equal(t, abi.ChainEpoch(2), out1.ChainInfo.Height) - assert.Equal(t, abi.ChainEpoch(1), out2.ChainInfo.Height) - assert.Equal(t, abi.ChainEpoch(0), out3.ChainInfo.Height) - - assert.Equal(t, 0, testQ.Len()) -} - -func TestQueueDuplicates(t *testing.T) { - tf.UnitTest(t) - testQ := dispatcher.NewTargetQueue() - - // Add syncRequests with same height - sR0 := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 0))} - sR0dup := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 0))} - - testQ.Push(sR0) - testQ.Push(sR0dup) - - // Only one of these makes it onto the queue - assert.Equal(t, 1, testQ.Len()) - - // Pop - first := requirePop(t, testQ) - assert.Equal(t, abi.ChainEpoch(0), first.ChainInfo.Height) - - // Now if we push the duplicate it goes back on - testQ.Push(sR0dup) - assert.Equal(t, 1, testQ.Len()) - - second := requirePop(t, testQ) - assert.Equal(t, abi.ChainEpoch(0), second.ChainInfo.Height) -} - -func TestQueueEmptyPopErrors(t *testing.T) { - tf.UnitTest(t) - testQ := dispatcher.NewTargetQueue() - sR0 := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 0))} - sR47 := dispatcher.Target{ChainInfo: *(chainInfoFromHeight(t, 47))} - - // Push 2 - testQ.Push(sR47) - testQ.Push(sR0) - - // Pop 3 - assert.Equal(t, 2, testQ.Len()) - _ = requirePop(t, testQ) - assert.Equal(t, 1, testQ.Len()) - _ = requirePop(t, testQ) - assert.Equal(t, 0, testQ.Len()) - - _, popped := testQ.Pop() - assert.False(t, popped) - -} - -// requirePop is a helper requiring that pop does not error -func requirePop(t *testing.T, q *dispatcher.TargetQueue) dispatcher.Target { - req, popped := q.Pop() - require.True(t, popped) - return req -} - -// chainInfoFromHeight is a helper that constructs a unique chain info off of -// an int. The tipset key is a faked cid from the string of that integer and -// the height is that integer. -func chainInfoFromHeight(t *testing.T, h int) *block.ChainInfo { - hStr := strconv.Itoa(h) - c := types.CidFromString(t, hStr) - return &block.ChainInfo{ - Head: block.NewTipSetKey(c), - Height: abi.ChainEpoch(h), - } -} diff --git a/internal/pkg/chainsync/internal/syncer/syncer.go b/internal/pkg/chainsync/internal/syncer/syncer.go deleted file mode 100644 index b917206080..0000000000 --- a/internal/pkg/chainsync/internal/syncer/syncer.go +++ /dev/null @@ -1,618 +0,0 @@ -package syncer - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" - "go.opencensus.io/trace" - - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics/tracing" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -// Syncer updates its chain.Store according to the methods of its -// consensus.Protocol. It uses a bad tipset cache and a limit on new -// blocks to traverse during chain collection. The Syncer can query the -// network for blocks. The Syncer maintains the following invariant on -// its store: all tipsets that pass the syncer's validity checks are added to the -// chain store along with their state root CID. -// -// Ideally the code that syncs the chain according to consensus rules should -// be independent of any particular implementation of consensus. Currently the -// Syncer is coupled to details of Expected Consensus. This dependence -// exists in the widen function, the fact that widen is called on only one -// tipset in the incoming chain, and assumptions regarding the existence of -// grandparent state in the store. -type Syncer struct { - // fetcher is the networked block fetching service for fetching blocks - // and messages. - fetcher Fetcher - // BadTipSetCache is used to filter out collections of invalid blocks. - badTipSets *BadTipSetCache - - // Evaluates tipset messages and stores the resulting states. - fullValidator FullBlockValidator - // Validates headers and message structure - blockValidator BlockValidator - // Selects the heaviest of two chains - chainSelector ChainSelector - // Provides and stores validated tipsets and their state roots. - chainStore ChainReaderWriter - // Provides message collections given cids - messageProvider messageStore - - clock clock.Clock - // staged is the heaviest tipset seen by the syncer so far - staged block.TipSet - - // faultDetector is used to manage information about potential consensus faults - faultDetector - - // Reporter is used by the syncer to update the current status of the chain. - reporter status.Reporter -} - -// Fetcher defines an interface that may be used to fetch data from the network. -type Fetcher interface { - // FetchTipSets will only fetch TipSets that evaluate to `false` when passed to `done`, - // this includes the provided `ts`. The TipSet that evaluates to true when - // passed to `done` will be in the returned slice. The returns slice of TipSets is in Traversal order. - FetchTipSets(context.Context, block.TipSetKey, peer.ID, func(block.TipSet) (bool, error)) ([]block.TipSet, error) - - // FetchTipSetHeaders will fetch only the headers of tipset blocks. - // Returned slice in reversal order - FetchTipSetHeaders(context.Context, block.TipSetKey, peer.ID, func(block.TipSet) (bool, error)) ([]block.TipSet, error) -} - -// ChainReaderWriter reads and writes the chain store. -type ChainReaderWriter interface { - GetHead() block.TipSetKey - GetTipSet(tsKey block.TipSetKey) (block.TipSet, error) - GetTipSetStateRoot(tsKey block.TipSetKey) (cid.Cid, error) - GetTipSetReceiptsRoot(tsKey block.TipSetKey) (cid.Cid, error) - HasTipSetAndState(ctx context.Context, tsKey block.TipSetKey) bool - PutTipSetMetadata(ctx context.Context, tsas *chain.TipSetMetadata) error - SetHead(ctx context.Context, ts block.TipSet) error - HasTipSetAndStatesWithParentsAndHeight(pTsKey block.TipSetKey, h abi.ChainEpoch) bool - GetTipSetAndStatesByParentsAndHeight(pTsKey block.TipSetKey, h abi.ChainEpoch) ([]*chain.TipSetMetadata, error) -} - -type messageStore interface { - LoadMessages(context.Context, cid.Cid) ([]*types.SignedMessage, []*types.UnsignedMessage, error) - LoadReceipts(context.Context, cid.Cid) ([]vm.MessageReceipt, error) - StoreReceipts(context.Context, []vm.MessageReceipt) (cid.Cid, error) -} - -// ChainSelector chooses the heaviest between chains. -type ChainSelector interface { - // IsHeavier returns true if tipset a is heavier than tipset b and false if - // tipset b is heavier than tipset a. - IsHeavier(ctx context.Context, a, b block.TipSet, aStateID, bStateID cid.Cid) (bool, error) - // Weight returns the weight of a tipset after the upgrade to version 1 - Weight(ctx context.Context, ts block.TipSet, stRoot cid.Cid) (fbig.Int, error) -} - -// BlockValidator does semanitc validation on headers -type BlockValidator interface { - // ValidateHeaderSemantic validates conditions on a block header that can be - // checked with the parent header but not parent state. - ValidateHeaderSemantic(ctx context.Context, header *block.Block, parents block.TipSet) error - // ValidateMessagesSemantic validates a block's messages against parent state without applying the messages - ValidateMessagesSemantic(ctx context.Context, child *block.Block, parents block.TipSetKey) error -} - -// FullBlockValidator does semantic validation on fullblocks. -type FullBlockValidator interface { - // RunStateTransition returns the state root CID resulting from applying the input ts to the - // prior `stateRoot`. It returns an error if the transition is invalid. - RunStateTransition(ctx context.Context, ts block.TipSet, blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage, parentWeight fbig.Int, stateID cid.Cid, receiptRoot cid.Cid) (cid.Cid, []vm.MessageReceipt, error) -} - -// faultDetector tracks data for detecting consensus faults and emits faults -// upon detection. -type faultDetector interface { - CheckBlock(b *block.Block, p block.TipSet) error -} - -var reorgCnt *metrics.Int64Counter - -func init() { - reorgCnt = metrics.NewInt64Counter("chain/reorg_count", "The number of reorgs that have occurred.") -} - -var ( - // ErrChainHasBadTipSet is returned when the syncer traverses a chain with a cached bad tipset. - ErrChainHasBadTipSet = errors.New("input chain contains a cached bad tipset") - // ErrNewChainTooLong is returned when processing a fork that split off from the main chain too many blocks ago. - ErrNewChainTooLong = errors.New("input chain forked from best chain past finality limit") - // ErrUnexpectedStoreState indicates that the syncer's chain store is violating expected invariants. - ErrUnexpectedStoreState = errors.New("the chain store is in an unexpected state") -) - -var syncOneTimer *metrics.Float64Timer - -func init() { - syncOneTimer = metrics.NewTimerMs("syncer/sync_one", "Duration of single tipset validation in milliseconds") -} - -var logSyncer = logging.Logger("chainsync.syncer") - -// NewSyncer constructs a Syncer ready for use. The chain reader must have a -// head tipset to initialize the staging field. -func NewSyncer(fv FullBlockValidator, hv BlockValidator, cs ChainSelector, s ChainReaderWriter, m messageStore, f Fetcher, sr status.Reporter, c clock.Clock, fd faultDetector) (*Syncer, error) { - return &Syncer{ - fetcher: f, - badTipSets: &BadTipSetCache{ - bad: make(map[string]struct{}), - }, - fullValidator: fv, - blockValidator: hv, - chainSelector: cs, - chainStore: s, - messageProvider: m, - clock: c, - faultDetector: fd, - reporter: sr, - }, nil -} - -// InitStaged reads the head from the syncer's chain store and sets the syncer's -// staged field. Used for initializing syncer. -func (syncer *Syncer) InitStaged() error { - staged, err := syncer.chainStore.GetTipSet(syncer.chainStore.GetHead()) - if err != nil { - return err - } - syncer.staged = staged - return nil -} - -// SetStagedHead sets the syncer's internal staged tipset to the chain's head. -func (syncer *Syncer) SetStagedHead(ctx context.Context) error { - return syncer.chainStore.SetHead(ctx, syncer.staged) -} - -// fetchAndValidateHeaders fetches headers and runs semantic block validation -// on the chain of fetched headers -func (syncer *Syncer) fetchAndValidateHeaders(ctx context.Context, ci *block.ChainInfo) ([]block.TipSet, error) { - head, err := syncer.chainStore.GetTipSet(syncer.chainStore.GetHead()) - if err != nil { - return nil, err - } - headHeight, err := head.Height() - if err != nil { - return nil, err - } - headers, err := syncer.fetcher.FetchTipSetHeaders(ctx, ci.Head, ci.Sender, func(t block.TipSet) (bool, error) { - h, err := t.Height() - if err != nil { - return true, err - } - if h+miner.ChainFinalityish < headHeight { - return true, ErrNewChainTooLong - } - - parents, err := t.Parents() - if err != nil { - return true, err - } - return syncer.chainStore.HasTipSetAndState(ctx, parents), nil - }) - if err != nil { - return nil, err - } - // Fetcher returns chain in Traversal order, reverse it to height order - chain.Reverse(headers) - - parent, _, err := syncer.ancestorsFromStore(headers[0]) - if err != nil { - return nil, err - } - for i, ts := range headers { - for i := 0; i < ts.Len(); i++ { - err = syncer.blockValidator.ValidateHeaderSemantic(ctx, ts.At(i), parent) - if err != nil { - return nil, err - } - } - parent = headers[i] - } - return headers, nil -} - -// syncOne syncs a single tipset with the chain store. syncOne calculates the -// parent state of the tipset and calls into consensus to run a state transition -// in order to validate the tipset. In the case the input tipset is valid, -// syncOne calls into consensus to check its weight, and then updates the head -// of the store if this tipset is the heaviest. -// -// Precondition: the caller of syncOne must hold the syncer's lock (syncer.mu) to -// ensure head is not modified by another goroutine during run. -func (syncer *Syncer) syncOne(ctx context.Context, grandParent, parent, next block.TipSet) error { - priorHeadKey := syncer.chainStore.GetHead() - - // if tipset is already priorHeadKey, we've been here before. do nothing. - if priorHeadKey.Equals(next.Key()) { - return nil - } - - stopwatch := syncOneTimer.Start(ctx) - defer stopwatch.Stop(ctx) - - // Lookup parent state and receipt root. It is guaranteed by the syncer that it is in the chainStore. - stateRoot, err := syncer.chainStore.GetTipSetStateRoot(parent.Key()) - if err != nil { - return err - } - - // Gather tipset messages - var nextSecpMessages [][]*types.SignedMessage - var nextBlsMessages [][]*types.UnsignedMessage - for i := 0; i < next.Len(); i++ { - blk := next.At(i) - secpMsgs, blsMsgs, err := syncer.messageProvider.LoadMessages(ctx, blk.Messages.Cid) - if err != nil { - return errors.Wrapf(err, "syncing tip %s failed loading message list %s for block %s", next.Key(), blk.Messages, blk.Cid()) - } - - nextBlsMessages = append(nextBlsMessages, blsMsgs) - nextSecpMessages = append(nextSecpMessages, secpMsgs) - } - - // Gather validated parent weight - parentWeight, err := syncer.calculateParentWeight(ctx, parent, grandParent) - if err != nil { - return err - } - - parentReceiptRoot, err := syncer.chainStore.GetTipSetReceiptsRoot(parent.Key()) - if err != nil { - return err - } - - // Run a state transition to validate the tipset and compute - // a new state to add to the store. - root, receipts, err := syncer.fullValidator.RunStateTransition(ctx, next, nextBlsMessages, nextSecpMessages, parentWeight, stateRoot, parentReceiptRoot) - if err != nil { - return err - } - - // Now that the tipset is validated preconditions are satisfied to check - // consensus faults - for i := 0; i < next.Len(); i++ { - err := syncer.faultDetector.CheckBlock(next.At(i), parent) - if err != nil { - return err - } - } - - receiptCid, err := syncer.messageProvider.StoreReceipts(ctx, receipts) - if err != nil { - return errors.Wrapf(err, "could not store message rerceipts for tip set %s", next.String()) - } - - err = syncer.chainStore.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ - TipSet: next, - TipSetStateRoot: root, - TipSetReceipts: receiptCid, - }) - if err != nil { - return err - } - logSyncer.Debugf("Successfully updated store with %s", next.String()) - return nil -} - -// TODO #3537 this should be stored the first time it is computed and retrieved -// from disk just like aggregate state roots. -func (syncer *Syncer) calculateParentWeight(ctx context.Context, parent, grandParent block.TipSet) (fbig.Int, error) { - var baseStRoot cid.Cid - var err error - if grandParent.Equals(block.UndefTipSet) { - // use genesis state as parent of genesis block - baseStRoot, err = syncer.chainStore.GetTipSetStateRoot(parent.Key()) - } else { - baseStRoot, err = syncer.chainStore.GetTipSetStateRoot(grandParent.Key()) - } - if err != nil { - return fbig.Zero(), err - } - return syncer.chainSelector.Weight(ctx, parent, baseStRoot) -} - -// ancestorsFromStore returns the parent and grandparent tipsets of `ts` -func (syncer *Syncer) ancestorsFromStore(ts block.TipSet) (block.TipSet, block.TipSet, error) { - parentCids, err := ts.Parents() - if err != nil { - return block.UndefTipSet, block.UndefTipSet, err - } - parent, err := syncer.chainStore.GetTipSet(parentCids) - if err != nil { - return block.UndefTipSet, block.UndefTipSet, err - } - grandParentCids, err := parent.Parents() - if err != nil { - return block.UndefTipSet, block.UndefTipSet, err - } - if grandParentCids.Empty() { - // parent == genesis ==> grandParent undef - return parent, block.UndefTipSet, nil - } - grandParent, err := syncer.chainStore.GetTipSet(grandParentCids) - if err != nil { - return block.UndefTipSet, block.UndefTipSet, err - } - return parent, grandParent, nil -} - -func (syncer *Syncer) logReorg(ctx context.Context, curHead, newHead block.TipSet) { - curHeadIter := chain.IterAncestors(ctx, syncer.chainStore, curHead) - newHeadIter := chain.IterAncestors(ctx, syncer.chainStore, newHead) - commonAncestor, err := chain.FindCommonAncestor(curHeadIter, newHeadIter) - if err != nil { - // Should never get here because reorgs should always have a - // common ancestor.. - logSyncer.Warnf("unexpected error when running FindCommonAncestor for reorg log: %s", err.Error()) - return - } - - reorg := chain.IsReorg(curHead, newHead, commonAncestor) - if reorg { - reorgCnt.Inc(ctx, 1) - dropped, added, err := chain.ReorgDiff(curHead, newHead, commonAncestor) - if err == nil { - logSyncer.With( - "currentHead", curHead, - "newHead", newHead, - ).Infof("reorg dropping %d height and adding %d", dropped, added) - } else { - logSyncer.With( - "currentHead", curHead, - "newHead", newHead, - ).Infof("reorg") - logSyncer.Errorw("unexpected error from ReorgDiff during log", "error", err) - } - } -} - -// widen computes a tipset implied by the input tipset and the store that -// could potentially be the heaviest tipset. In the context of EC, widen -// returns the union of the input tipset and the biggest tipset with the same -// parents from the store. -// TODO: this leaks EC abstractions into the syncer, we should think about this. -func (syncer *Syncer) widen(ctx context.Context, ts block.TipSet) (block.TipSet, error) { - // Lookup tipsets with the same parents from the store. - parentSet, err := ts.Parents() - if err != nil { - return block.UndefTipSet, err - } - height, err := ts.Height() - if err != nil { - return block.UndefTipSet, err - } - if !syncer.chainStore.HasTipSetAndStatesWithParentsAndHeight(parentSet, height) { - return block.UndefTipSet, nil - } - candidates, err := syncer.chainStore.GetTipSetAndStatesByParentsAndHeight(parentSet, height) - if err != nil { - return block.UndefTipSet, err - } - if len(candidates) == 0 { - return block.UndefTipSet, nil - } - - // Only take the tipset with the most blocks (this is EC specific logic) - max := candidates[0].TipSet - for _, candidate := range candidates[0:] { - if candidate.TipSet.Len() > max.Len() { - max = candidate.TipSet - } - } - - // Form a new tipset from the union of ts and the largest in the store, de-duped. - var blockSlice []*block.Block - blockCids := make(map[cid.Cid]struct{}) - for i := 0; i < ts.Len(); i++ { - blk := ts.At(i) - blockCids[blk.Cid()] = struct{}{} - blockSlice = append(blockSlice, blk) - } - for i := 0; i < max.Len(); i++ { - blk := max.At(i) - if _, found := blockCids[blk.Cid()]; !found { - blockSlice = append(blockSlice, blk) - blockCids[blk.Cid()] = struct{}{} - } - } - wts, err := block.NewTipSet(blockSlice...) - if err != nil { - return block.UndefTipSet, err - } - - // check that the tipset is distinct from the input and tipsets from the store. - if wts.String() == ts.String() || wts.String() == max.String() { - return block.UndefTipSet, nil - } - - return wts, nil -} - -// HandleNewTipSet validates and syncs the chain rooted at the provided tipset -// to a chain store. Iff catchup is false then the syncer will set the head. -func (syncer *Syncer) HandleNewTipSet(ctx context.Context, ci *block.ChainInfo, catchup bool) error { - err := syncer.handleNewTipSet(ctx, ci) - if err != nil { - return err - } - if catchup { - return nil - } - return syncer.SetStagedHead(ctx) -} - -func (syncer *Syncer) handleNewTipSet(ctx context.Context, ci *block.ChainInfo) (err error) { - // handleNewTipSet extends the Syncer's chain store with the given tipset if - // the chain is a valid extension. It stages new heaviest tipsets for later - // setting the chain head - logSyncer.Debugf("Begin fetch and sync of chain with head %v", ci.Head) - ctx, span := trace.StartSpan(ctx, "Syncer.HandleNewTipSet") - span.AddAttributes(trace.StringAttribute("tipset", ci.Head.String())) - defer tracing.AddErrorEndSpan(ctx, span, &err) - - // If the store already has this tipset then the syncer is finished. - if syncer.chainStore.HasTipSetAndState(ctx, ci.Head) { - return nil - } - - syncer.reporter.UpdateStatus(status.SyncingStarted(syncer.clock.Now().Unix()), status.SyncHead(ci.Head), status.SyncHeight(ci.Height), status.SyncComplete(false)) - defer syncer.reporter.UpdateStatus(status.SyncComplete(true)) - syncer.reporter.UpdateStatus(status.SyncFetchComplete(false)) - - tipsets, err := syncer.fetchAndValidateHeaders(ctx, ci) - if err != nil { - return errors.Wrapf(err, "failure fetching or validating headers") - } - - // Once headers check out, fetch messages - _, err = syncer.fetcher.FetchTipSets(ctx, ci.Head, ci.Sender, func(t block.TipSet) (bool, error) { - parentsKey, err := t.Parents() - if err != nil { - return true, err - } - height, err := t.Height() - if err != nil { - return false, err - } - - // validate block message structure - for i := 0; i < t.Len(); i++ { - err := syncer.blockValidator.ValidateMessagesSemantic(ctx, t.At(i), parentsKey) - if err != nil { - return false, err - } - } - - // update status with latest fetched head and height - syncer.reporter.UpdateStatus(status.FetchHead(t.Key()), status.FetchHeight(height)) - return syncer.chainStore.HasTipSetAndState(ctx, parentsKey), nil - }) - if err != nil { - return errors.Wrapf(err, "failure fetching full blocks") - } - - syncer.reporter.UpdateStatus(status.SyncFetchComplete(true)) - - parent, grandParent, err := syncer.ancestorsFromStore(tipsets[0]) - if err != nil { - return err - } - - // Try adding the tipsets of the chain to the store, checking for new - // heaviest tipsets. - for i, ts := range tipsets { - // TODO: this "i==0" leaks EC specifics into syncer abstraction - // for the sake of efficiency, consider plugging up this leak. - var wts block.TipSet - if i == 0 { - wts, err = syncer.widen(ctx, ts) - if err != nil { - return err - } - if wts.Defined() { - logSyncer.Debug("attempt to sync after widen") - err = syncer.syncOne(ctx, grandParent, parent, wts) - if err != nil { - return err - } - err = syncer.stageIfHeaviest(ctx, wts) - if err != nil { - return err - } - } - } - // If the tipsets has length greater than 1, then we need to sync each tipset - // in the chain in order to process the chain fully, including the non-widened - // first tipset. - // If the chan has length == 1, we can avoid processing the non-widened tipset - // as a performance optimization, because this tipset cannot be heavier - // than the widened first tipset. - if !wts.Defined() || len(tipsets) > 1 { - err = syncer.syncOne(ctx, grandParent, parent, ts) - if err != nil { - // While `syncOne` can indeed fail for reasons other than consensus, - // adding to the badTipSets at this point is the simplest, since we - // have access to the chain. If syncOne fails for non-consensus reasons, - // there is no assumption that the running node's data is valid at all, - // so we don't really lose anything with this simplification. - syncer.badTipSets.AddChain(tipsets[i:]) - return errors.Wrapf(err, "failed to sync tipset %s, number %d of %d in chain", ts.Key(), i, len(tipsets)) - } - } - - if i%500 == 0 { - logSyncer.Infof("processing block %d of %v for chain with head at %v", i, len(tipsets), ci.Head.String()) - } - grandParent = parent - parent = ts - } - return syncer.stageIfHeaviest(ctx, parent) -} - -func (syncer *Syncer) stageIfHeaviest(ctx context.Context, candidate block.TipSet) error { - // stageIfHeaviest sets the provided candidates to the staging head of the chain if they - // are heavier. Precondtion: candidates are validated and added to the store. - parentKey, err := candidate.Parents() - if err != nil { - return err - } - candidateParentStateID, err := syncer.chainStore.GetTipSetStateRoot(parentKey) - if err != nil { - return err - } - - stagedParentKey, err := syncer.staged.Parents() - if err != nil { - return err - } - var stagedBaseStateID cid.Cid - if stagedParentKey.Empty() { // if staged is genesis base state is genesis state - stagedBaseStateID = syncer.staged.At(0).StateRoot.Cid - } else { - stagedBaseStateID, err = syncer.chainStore.GetTipSetStateRoot(stagedParentKey) - if err != nil { - return err - } - } - - heavier, err := syncer.chainSelector.IsHeavier(ctx, candidate, syncer.staged, candidateParentStateID, stagedBaseStateID) - if err != nil { - return err - } - - // If it is the heaviest update the chainStore. - if heavier { - // Gather the entire new chain for reorg comparison and logging. - syncer.logReorg(ctx, syncer.staged, candidate) - syncer.staged = candidate - } - - return nil -} - -// Status returns the current syncer status. -func (syncer *Syncer) Status() status.Status { - return syncer.reporter.Status() -} diff --git a/internal/pkg/chainsync/internal/syncer/syncer_integration_test.go b/internal/pkg/chainsync/internal/syncer/syncer_integration_test.go deleted file mode 100644 index fe8ac69e51..0000000000 --- a/internal/pkg/chainsync/internal/syncer/syncer_integration_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package syncer_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-address" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/internal/syncer" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// Syncer is capable of recovering from a fork reorg after the store is loaded. -// This is a regression test to guard against the syncer assuming that the store having all -// blocks from a tipset means the syncer has computed its state. -// Such a case happens when the store has just loaded, but this tipset is not on its heaviest chain). -// See https://github.com/filecoin-project/go-filecoin/issues/1148#issuecomment-432008060 -func TestLoadFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - // Set up in the standard way, but retain references to the repo and cbor stores. - builder := chain.NewBuilder(t, address.Undef) - genesis := builder.NewGenesis() - genStateRoot, err := builder.GetTipSetStateRoot(genesis.Key()) - require.NoError(t, err) - - repo := repo.NewInMemoryRepo() - bs := bstore.NewBlockstore(repo.Datastore()) - cborStore := cborutil.NewIpldStore(bs) - store := chain.NewStore(repo.ChainDatastore(), cborStore, chain.NewStatusReporter(), genesis.At(0).Cid()) - require.NoError(t, store.PutTipSetMetadata(ctx, &chain.TipSetMetadata{TipSetStateRoot: genStateRoot, TipSet: genesis, TipSetReceipts: types.EmptyReceiptsCID})) - require.NoError(t, store.SetHead(ctx, genesis)) - - // Note: the chain builder is passed as the fetcher, from which blocks may be requested, but - // *not* as the store, to which the syncer must ensure to put blocks. - eval := &chain.FakeStateEvaluator{} - sel := &chain.FakeChainSelector{} - s, err := syncer.NewSyncer(eval, eval, sel, store, builder, builder, status.NewReporter(), clock.NewFake(time.Unix(1234567890, 0)), &noopFaultDetector{}) - require.NoError(t, err) - require.NoError(t, s.InitStaged()) - - base := builder.AppendManyOn(3, genesis) - left := builder.AppendManyOn(4, base) - right := builder.AppendManyOn(3, base) - - // Sync the two branches, which stores all blocks in the underlying stores. - assert.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo("", "", left.Key(), heightFromTip(t, left)), false)) - assert.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo("", "", right.Key(), heightFromTip(t, right)), false)) - verifyHead(t, store, left) - - // The syncer/store assume that the fetcher populates the underlying block store such that - // tipsets can be reconstructed. The chain builder used for testing doesn't do that, so do - // it manually here. - for _, tip := range []block.TipSet{left, right} { - for itr := chain.IterAncestors(ctx, builder, tip); !itr.Complete(); require.NoError(t, itr.Next()) { - for _, block := range itr.Value().ToSlice() { - _, err := cborStore.Put(ctx, block) - require.NoError(t, err) - } - } - } - - // Load a new chain store on the underlying data. It will only compute state for the - // left (heavy) branch. It has a fetcher that can't provide blocks. - newStore := chain.NewStore(repo.ChainDatastore(), cborStore, chain.NewStatusReporter(), genesis.At(0).Cid()) - require.NoError(t, newStore.Load(ctx)) - fakeFetcher := th.NewTestFetcher() - offlineSyncer, err := syncer.NewSyncer(eval, eval, sel, newStore, builder, fakeFetcher, status.NewReporter(), clock.NewFake(time.Unix(1234567890, 0)), &noopFaultDetector{}) - require.NoError(t, err) - require.NoError(t, offlineSyncer.InitStaged()) - - assert.True(t, newStore.HasTipSetAndState(ctx, left.Key())) - assert.False(t, newStore.HasTipSetAndState(ctx, right.Key())) - - // The newRight head extends right. The store already has the individual blocks up to the point - // `right`, but has not computed their state (because it's not the heavy branch). - // Obtuse code organisation means that the syncer will - // attempt to fetch `newRight` *and `right`* blocks from the network in the process of computing - // the state sequence for them all. Yes, this is a bit silly - the `right` blocks are already local. - // The test is guarding against a prior incorrect behaviour where the syncer would not attempt to - // fetch the `right` blocks (because it already has them) but *also* would not compute their state. - // We detect this by making the final `newRight` blocks fetchable, but not the `right` blocks, and - // expect the syncer to fail due to that failed fetch. - // This test would fail to work if the syncer could inspect the store directly to avoid requesting - // blocks already local, but also correctly recomputed the state. - - // Note that since the blocks are in the store, and a real fetcher will consult the store before - // trying the network, this won't actually cause a network request. But it's really hard to follow. - newRight := builder.AppendManyOn(1, right) - fakeFetcher.AddSourceBlocks(newRight.ToSlice()...) - - // Test that the syncer can't sync a block chained from on the right (originally shorter) chain - // without getting old blocks from network. i.e. the store index has been trimmed - // of non-heaviest chain blocks. - - err = offlineSyncer.HandleNewTipSet(ctx, block.NewChainInfo("", "", newRight.Key(), heightFromTip(t, newRight)), false) - assert.Error(t, err) - - // The left chain is ok without any fetching though. - assert.NoError(t, offlineSyncer.HandleNewTipSet(ctx, block.NewChainInfo("", "", left.Key(), heightFromTip(t, left)), false)) -} - -type noopFaultDetector struct{} - -func (fd *noopFaultDetector) CheckBlock(_ *block.Block, _ block.TipSet) error { - return nil -} diff --git a/internal/pkg/chainsync/internal/syncer/syncer_test.go b/internal/pkg/chainsync/internal/syncer/syncer_test.go deleted file mode 100644 index e66e9aef9a..0000000000 --- a/internal/pkg/chainsync/internal/syncer/syncer_test.go +++ /dev/null @@ -1,621 +0,0 @@ -package syncer_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/ipfs/go-cid" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/internal/syncer" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -func heightFromTip(t *testing.T, tip block.TipSet) abi.ChainEpoch { - h, err := tip.Height() - if err != nil { - t.Fatal(err) - } - return h -} - -func TestOneBlock(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - t1 := builder.AppendOn(genesis, 1) - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t1.Key(), heightFromTip(t, t1)), false)) - - verifyTip(t, store, t1, t1.At(0).StateRoot.Cid) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t1) -} - -func TestMultiBlockTip(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - tip := builder.AppendOn(genesis, 2) - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", tip.Key(), heightFromTip(t, tip)), false)) - - verifyTip(t, store, tip, builder.StateForKey(tip.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, tip) -} - -func TestTipSetIncremental(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - t1 := builder.AppendOn(genesis, 1) - - t2 := builder.AppendOn(genesis, 1) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t1.Key(), heightFromTip(t, t1)), false)) - - verifyTip(t, store, t1, builder.StateForKey(t1.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t1) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t2.Key(), heightFromTip(t, t2)), false)) - _, err := store.GetTipSet(t2.Key()) - require.Error(t, err) - - merged := block.RequireNewTipSet(t, t1.At(0), t2.At(0)) - verifyTip(t, store, merged, builder.StateForKey(merged.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, merged) -} - -func TestChainIncremental(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - t1 := builder.AppendOn(genesis, 2) - - t2 := builder.AppendOn(t1, 3) - - t3 := builder.AppendOn(t2, 1) - - t4 := builder.AppendOn(t3, 2) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t1.Key(), heightFromTip(t, t1)), false)) - verifyTip(t, store, t1, builder.StateForKey(t1.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t1) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t2.Key(), heightFromTip(t, t2)), false)) - verifyTip(t, store, t2, builder.StateForKey(t2.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t2) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t3.Key(), heightFromTip(t, t3)), false)) - verifyTip(t, store, t3, builder.StateForKey(t3.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t3) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t4.Key(), heightFromTip(t, t4)), false)) - verifyTip(t, store, t4, builder.StateForKey(t4.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t4) -} - -func TestChainJump(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - t1 := builder.AppendOn(genesis, 2) - t2 := builder.AppendOn(t1, 3) - t3 := builder.AppendOn(t2, 1) - t4 := builder.AppendOn(t3, 2) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t4.Key(), heightFromTip(t, t4)), false)) - verifyTip(t, store, t1, builder.StateForKey(t1.Key())) - verifyTip(t, store, t2, builder.StateForKey(t2.Key())) - verifyTip(t, store, t3, builder.StateForKey(t3.Key())) - verifyTip(t, store, t4, builder.StateForKey(t4.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t4) -} - -func TestIgnoreLightFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - forkbase := builder.AppendOn(genesis, 1) - forkHead := builder.AppendOn(forkbase, 1) - - t1 := builder.AppendOn(forkbase, 1) - t2 := builder.AppendOn(t1, 1) - t3 := builder.AppendOn(t2, 1) - t4 := builder.AppendOn(t3, 1) - - // Sync heaviest branch first. - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t4.Key(), heightFromTip(t, t4)), false)) - verifyTip(t, store, t4, builder.StateForKey(t4.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t4) - - // Lighter fork is processed but not change head. - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", forkHead.Key(), heightFromTip(t, forkHead)), false)) - _, err := store.GetTipSet(forkHead.Key()) - require.Error(t, err) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, t4) -} - -func TestAcceptHeavierFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - forkbase := builder.AppendOn(genesis, 1) - - main1 := builder.AppendOn(forkbase, 1) - main2 := builder.AppendOn(main1, 1) - main3 := builder.AppendOn(main2, 1) - main4 := builder.AppendOn(main3, 1) - - // Fork is heavier with more blocks, despite shorter (with default fake weighing function - // from FakeStateEvaluator). - fork1 := builder.AppendOn(forkbase, 3) - fork2 := builder.AppendOn(fork1, 1) - fork3 := builder.AppendOn(fork2, 1) - - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", main4.Key(), heightFromTip(t, main4)), false)) - verifyTip(t, store, main4, builder.StateForKey(main4.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, main4) - - // Heavier fork updates head3 - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", fork3.Key(), heightFromTip(t, fork3)), false)) - verifyTip(t, store, fork1, builder.StateForKey(fork1.Key())) - verifyTip(t, store, fork2, builder.StateForKey(fork2.Key())) - verifyTip(t, store, fork3, builder.StateForKey(fork3.Key())) - require.NoError(t, syncer.SetStagedHead(ctx)) - verifyHead(t, store, fork3) -} - -func TestRejectFinalityFork(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, s := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - head := builder.AppendManyOn(int(miner.ChainFinalityish+2), genesis) - assert.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", head.Key(), heightFromTip(t, head)), false)) - - // Differentiate fork for a new chain. Fork has FinalityEpochs + 1 - // blocks on top of genesis so forkFinalityBase is more than FinalityEpochs - // behind head - forkFinalityBase := builder.BuildOneOn(genesis, func(bb *chain.BlockBuilder) { - bb.SetTicket([]byte{0xbe}) - }) - forkFinalityHead := builder.AppendManyOn(int(miner.ChainFinalityish), forkFinalityBase) - assert.Error(t, s.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", forkFinalityHead.Key(), heightFromTip(t, forkFinalityHead)), false)) -} - -func TestNoUncessesaryFetch(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, s := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - head := builder.AppendManyOn(4, genesis) - assert.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", head.Key(), heightFromTip(t, head)), false)) - - // A new syncer unable to fetch blocks from the network can handle a tipset that's already - // in the store and linked to genesis. - emptyFetcher := chain.NewBuilder(t, address.Undef) - newSyncer, err := syncer.NewSyncer(&chain.FakeStateEvaluator{}, &chain.FakeStateEvaluator{}, &chain.FakeChainSelector{}, store, builder, emptyFetcher, status.NewReporter(), clock.NewFake(time.Unix(1234567890, 0)), &noopFaultDetector{}) - require.NoError(t, err) - require.NoError(t, newSyncer.InitStaged()) - assert.NoError(t, newSyncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", head.Key(), heightFromTip(t, head)), false)) -} - -// Syncer must track state of subsets of parent tipsets tracked in the store -// when they are the ancestor in a chain. This is in order to maintain the -// invariant that the aggregate state of the parents of the base of a collected chain -// is kept in the store. This invariant allows chains built on subsets of -// tracked tipsets to be handled correctly. -// This test tests that the syncer stores the state of such a base tipset of a collected chain, -// i.e. a subset of an existing tipset in the store. -// -// Ex: {A1, A2} -> {B1, B2, B3} in store to start -// {B1, B2} -> {C1, C2} chain 1 input to syncer -// C1 -> D1 chain 2 input to syncer -// -// The last operation will fail if the state of subset {B1, B2} is not -// kept in the store because syncing C1 requires retrieving parent state. -func TestSubsetParent(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, s := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - // Set up chain with {A1, A2} -> {B1, B2, B3} - tipA1A2 := builder.AppendOn(genesis, 2) - tipB1B2B3 := builder.AppendOn(tipA1A2, 3) - require.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", tipB1B2B3.Key(), heightFromTip(t, tipB1B2B3)), false)) - - // Sync one tipset with a parent equal to a subset of an existing - // tipset in the store: {B1, B2} -> {C1, C2} - tipB1B2 := block.RequireNewTipSet(t, tipB1B2B3.At(0), tipB1B2B3.At(1)) - tipC1C2 := builder.AppendOn(tipB1B2, 2) - - assert.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", tipC1C2.Key(), heightFromTip(t, tipC1C2)), false)) - - // Sync another tipset with a parent equal to a subset of the tipset - // just synced: C1 -> D1 - tipC1 := block.RequireNewTipSet(t, tipC1C2.At(0)) - tipD1OnC1 := builder.AppendOn(tipC1, 1) - assert.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", tipD1OnC1.Key(), heightFromTip(t, tipD1OnC1)), false)) - - // A full parent also works fine: {C1, C2} -> D1 - tipD1OnC1C2 := builder.AppendOn(tipC1C2, 1) - assert.NoError(t, s.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", tipD1OnC1C2.Key(), heightFromTip(t, tipD1OnC1C2)), false)) -} - -// Check that the syncer correctly adds widened chain ancestors to the store. -func TestWidenChainAncestor(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - link1 := builder.AppendOn(genesis, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.AppendOn(link3, 2) - - // Build another block with parents link1, but not included in link2. - link2Alt := builder.AppendOn(link1, 1) - // Build a tipset containing one block from link2, plus this new sibling. - link2UnionSubset := block.RequireNewTipSet(t, link2.At(0), link2Alt.At(0)) - - // Sync the subset of link2 first - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", link2UnionSubset.Key(), heightFromTip(t, link2UnionSubset)), false)) - verifyTip(t, store, link2UnionSubset, builder.StateForKey(link2UnionSubset.Key())) - verifyHead(t, store, link2UnionSubset) - - // Sync chain with head at link4 - require.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", link4.Key(), heightFromTip(t, link4)), false)) - verifyTip(t, store, link4, builder.StateForKey(link4.Key())) - verifyHead(t, store, link4) - - // Check that the widened tipset (link2UnionSubset U link2) is tracked - link2Union := block.RequireNewTipSet(t, link2.At(0), link2.At(1), link2.At(2), link2Alt.At(0)) - verifyTip(t, store, link2Union, builder.StateForKey(link2Union.Key())) -} - -// Syncer finds a heaviest tipset by combining blocks from the ancestors of a -// chain and blocks already in the store. -// -// A guide to this test -- the point is that sometimes when merging chains the syncer -// will find a new heaviest tipset that is not the head of either chain. The syncer -// should correctly set this tipset as the head. -// -// From above we have the test-chain: -// genesis -> (link1blk1, link1blk2) -> (link2blk1, link2blk2, link2blk3) -> link3blk1 -> (link4blk1, link4blk2) -// -// Now we introduce a disjoint fork on top of link1 -// genesis -> (link1blk1, link1blk2) -> (forklink2blk1, forklink2blk2, forklink2blk3, forklink3blk4) -> forklink3blk1 -// -// When all blocks contribute equally to weight: -// So, the weight of the head of the test chain = -// W(link1) + 3 + 1 + 2 = W(link1) + 6 = 8 -// and the weight of the head of the fork chain = -// W(link1) + 4 + 1 = W(link1) + 5 = 7 -// and the weight of the union of link2 of both branches (a valid tipset) is -// W(link1) + 7 = 9 -// -// Therefore the syncer should set the head of the store to the union of the links.. -func TestHeaviestIsWidenedAncestor(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - link1 := builder.AppendOn(genesis, 2) - link2 := builder.AppendOn(link1, 3) - link3 := builder.AppendOn(link2, 1) - link4 := builder.AppendOn(link3, 2) - - forkLink2 := builder.AppendOn(link1, 4) - forkLink3 := builder.AppendOn(forkLink2, 1) - - // Sync main chain - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", link4.Key(), heightFromTip(t, link4)), false)) - - // Sync fork chain - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", forkLink3.Key(), heightFromTip(t, forkLink3)), false)) - - // Assert that widened chain is the new head - wideBlocks := link2.ToSlice() - wideBlocks = append(wideBlocks, forkLink2.ToSlice()...) - wideTs := block.RequireNewTipSet(t, wideBlocks...) - - verifyTip(t, store, wideTs, builder.ComputeState(wideTs)) - verifyHead(t, store, wideTs) -} - -func TestBlocksNotATipSetRejected(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - b1 := builder.AppendBlockOn(genesis) - b2 := builder.AppendBlockOnBlocks(b1) - - badKey := block.NewTipSetKey(b1.Cid(), b2.Cid()) - err := syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", badKey, b1.Height), false) - assert.Error(t, err) - - _, err = store.GetTipSet(badKey) - assert.Error(t, err) // Not present -} - -func TestBlockNotLinkedRejected(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - // Set up a parallel builder from which the syncer cannot fetch. - // The two builders are expected to produce exactly the same blocks from the same sequence - // of calls. - shadowBuilder := chain.NewBuilder(t, address.Undef) - gen2 := block.RequireNewTipSet(t, shadowBuilder.AppendBlockOnBlocks()) - require.True(t, genesis.Equals(gen2)) - - // The syncer fails to fetch this block so cannot sync it. - b1 := shadowBuilder.AppendOn(genesis, 1) - assert.Error(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", b1.Key(), heightFromTip(t, b1)), false)) - - // Make the same block available from the syncer's builder - builder.AppendBlockOn(genesis) - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", b1.Key(), heightFromTip(t, b1)), false)) -} - -type poisonValidator struct { - headerFailureTS uint64 - fullFailureTS uint64 -} - -func newPoisonValidator(t *testing.T, headerFailure, fullFailure uint64) *poisonValidator { - return &poisonValidator{headerFailureTS: headerFailure, fullFailureTS: fullFailure} -} - -func (pv *poisonValidator) RunStateTransition(_ context.Context, ts block.TipSet, _ [][]*types.UnsignedMessage, _ [][]*types.SignedMessage, - _ fbig.Int, _ cid.Cid, _ cid.Cid) (cid.Cid, []vm.MessageReceipt, error) { - stamp := ts.At(0).Timestamp - if pv.fullFailureTS == stamp { - return cid.Undef, nil, errors.New("run state transition fails on poison timestamp") - } - return cid.Undef, nil, nil -} - -func (pv *poisonValidator) ValidateHeaderSemantic(_ context.Context, header *block.Block, _ block.TipSet) error { - if pv.headerFailureTS == header.Timestamp { - return errors.New("val semantic fails on poison timestamp") - } - return nil -} - -// ValidateHeaderSemantic is a stub that always returns no error -func (pv *poisonValidator) ValidateMessagesSemantic(_ context.Context, _ *block.Block, _ block.TipSetKey) error { - return nil -} - -func TestSemanticallyBadTipSetFails(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - eval := newPoisonValidator(t, 98, 99) - builder, store, syncer := setupWithValidator(ctx, t, eval, eval) - genesis := builder.RequireTipSet(store.GetHead()) - - // Build a chain with messages that will fail semantic header validation - kis := types.MustGenerateKeyInfo(1, 42) - mm := vm.NewMessageMaker(t, kis) - alice := mm.Addresses()[0] - m1 := mm.NewSignedMessage(alice, 0) - m2 := mm.NewSignedMessage(alice, 1) - m3 := mm.NewSignedMessage(alice, 3) - - link1 := builder.BuildOneOn(genesis, func(bb *chain.BlockBuilder) { - bb.AddMessages( - []*types.SignedMessage{m1, m2, m3}, - []*types.UnsignedMessage{}, - ) - bb.SetTimestamp(98) // poison header val - }) - - // Set up a fresh builder without any of this data - err := syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", link1.Key(), heightFromTip(t, link1)), false) - require.Error(t, err) - assert.Contains(t, err.Error(), "val semantic fails") -} - -func TestSyncerStatus(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - // verify default status - s0 := syncer.Status() - assert.Equal(t, int64(0), s0.SyncingStarted) - assert.Equal(t, block.UndefTipSet.Key(), s0.SyncingHead) - assert.Equal(t, abi.ChainEpoch(0), s0.SyncingHeight) - assert.Equal(t, false, s0.SyncingTrusted) - assert.Equal(t, true, s0.SyncingComplete) - assert.Equal(t, true, s0.SyncingFetchComplete) - assert.Equal(t, block.UndefTipSet.Key(), s0.FetchingHead) - assert.Equal(t, abi.ChainEpoch(0), s0.FetchingHeight) - - // initial sync and status check - t1 := builder.AppendOn(genesis, 1) - require.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t1.Key(), heightFromTip(t, t1)), false)) - s1 := syncer.Status() - assert.Equal(t, t1.Key(), s1.FetchingHead) - assert.Equal(t, abi.ChainEpoch(1), s1.FetchingHeight) - - assert.Equal(t, true, s1.SyncingFetchComplete) - assert.Equal(t, true, s1.SyncingComplete) - - // advance the chain head, ensure status changes - t2 := builder.AppendOn(t1, 1) - require.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t2.Key(), heightFromTip(t, t2)), false)) - s2 := syncer.Status() - assert.Equal(t, false, s2.SyncingTrusted) - - assert.Equal(t, t2.Key(), s2.FetchingHead) - assert.Equal(t, abi.ChainEpoch(2), s2.FetchingHeight) - - assert.Equal(t, true, s2.SyncingFetchComplete) - assert.Equal(t, true, s2.SyncingComplete) - - // do not advance the chain head, ensure valid head remains constant but fetching head changes - t1 = builder.AppendOn(genesis, 1) - require.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t1.Key(), heightFromTip(t, t1)), false)) - s2 = syncer.Status() - assert.Equal(t, false, s2.SyncingTrusted) - - assert.Equal(t, t1.Key(), s2.FetchingHead) - assert.Equal(t, abi.ChainEpoch(1), s2.FetchingHeight) - - assert.Equal(t, true, s2.SyncingFetchComplete) - assert.Equal(t, true, s2.SyncingComplete) -} - -func TestStoresMessageReceipts(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - builder, store, syncer := setup(ctx, t) - genesis := builder.RequireTipSet(store.GetHead()) - - keys := types.MustGenerateKeyInfo(1, 42) - mm := vm.NewMessageMaker(t, keys) - alice := mm.Addresses()[0] - t1 := builder.Build(genesis, 4, func(b *chain.BlockBuilder, i int) { - b.AddMessages([]*types.SignedMessage{}, []*types.UnsignedMessage{mm.NewUnsignedMessage(alice, uint64(i))}) - }) - assert.NoError(t, syncer.HandleNewTipSet(ctx, block.NewChainInfo(peer.ID(""), "", t1.Key(), heightFromTip(t, t1)), false)) - - receiptsCid, err := store.GetTipSetReceiptsRoot(t1.Key()) - require.NoError(t, err) - - receipts, err := builder.LoadReceipts(ctx, receiptsCid) - require.NoError(t, err) - - assert.Len(t, receipts, 4) -} - -///// Set-up ///// - -// Initializes a chain builder, store and syncer. -// The chain builder has a single genesis block, which is set as the head of the store. -func setup(ctx context.Context, t *testing.T) (*chain.Builder, *chain.Store, *syncer.Syncer) { - eval := &chain.FakeStateEvaluator{} - return setupWithValidator(ctx, t, eval, eval) -} - -func setupWithValidator(ctx context.Context, t *testing.T, fullVal syncer.FullBlockValidator, headerVal syncer.BlockValidator) (*chain.Builder, *chain.Store, *syncer.Syncer) { - builder := chain.NewBuilder(t, address.Undef) - genesis := builder.NewGenesis() - genStateRoot, err := builder.GetTipSetStateRoot(genesis.Key()) - require.NoError(t, err) - ds := repo.NewInMemoryRepo().ChainDatastore() - bs := bstore.NewBlockstore(ds) - cst := cborutil.NewIpldStore(bs) - - store := chain.NewStore(ds, cst, chain.NewStatusReporter(), genesis.At(0).Cid()) - // Initialize chainStore store genesis state and tipset as head. - require.NoError(t, store.PutTipSetMetadata(ctx, &chain.TipSetMetadata{TipSetStateRoot: genStateRoot, TipSet: genesis, TipSetReceipts: types.EmptyReceiptsCID})) - require.NoError(t, store.SetHead(ctx, genesis)) - - // Note: the chain builder is passed as the fetcher, from which blocks may be requested, but - // *not* as the store, to which the syncer must ensure to put blocks. - sel := &chain.FakeChainSelector{} - syncer, err := syncer.NewSyncer(fullVal, headerVal, sel, store, builder, builder, status.NewReporter(), clock.NewFake(time.Unix(1234567890, 0)), &noopFaultDetector{}) - require.NoError(t, err) - require.NoError(t, syncer.InitStaged()) - - return builder, store, syncer -} - -///// Verification helpers ///// - -// Sub-interface of the store used for verification. -type syncStoreReader interface { - GetHead() block.TipSetKey - GetTipSet(block.TipSetKey) (block.TipSet, error) - GetTipSetStateRoot(tsKey block.TipSetKey) (cid.Cid, error) - GetTipSetAndStatesByParentsAndHeight(block.TipSetKey, abi.ChainEpoch) ([]*chain.TipSetMetadata, error) -} - -// Verifies that a tipset and associated state root are stored in the chain store. -func verifyTip(t *testing.T, store syncStoreReader, tip block.TipSet, stateRoot cid.Cid) { - foundTip, err := store.GetTipSet(tip.Key()) - require.NoError(t, err) - assert.Equal(t, tip, foundTip) - - foundState, err := store.GetTipSetStateRoot(tip.Key()) - require.NoError(t, err) - assert.Equal(t, stateRoot, foundState) - - parent, err := tip.Parents() - assert.NoError(t, err) - h, err := tip.Height() - assert.NoError(t, err) - childTsasSlice, err := store.GetTipSetAndStatesByParentsAndHeight(parent, h) - assert.NoError(t, err) - assert.True(t, containsTipSet(childTsasSlice, tip)) -} - -// Verifies that the store's head is as expected. -func verifyHead(t *testing.T, store syncStoreReader, head block.TipSet) { - headTipSet, err := store.GetTipSet(store.GetHead()) - require.NoError(t, err) - assert.Equal(t, head, headTipSet) -} - -func containsTipSet(tsasSlice []*chain.TipSetMetadata, ts block.TipSet) bool { - for _, tsas := range tsasSlice { - if tsas.TipSet.String() == ts.String() { //bingo - return true - } - } - return false -} diff --git a/internal/pkg/chainsync/status/status.go b/internal/pkg/chainsync/status/status.go deleted file mode 100644 index 0fac13fa9b..0000000000 --- a/internal/pkg/chainsync/status/status.go +++ /dev/null @@ -1,155 +0,0 @@ -package status - -import ( - "fmt" - "sync" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - logging "github.com/ipfs/go-log/v2" -) - -// Reporter defines an interface to updating and reporting the status of the blockchain. -type Reporter interface { - UpdateStatus(...UpdateFn) - Status() Status -} - -// Status defines a structure used to represent the state of a chain store and syncer. -type Status struct { - // They head of the chain currently being fetched/validated, or undef if none. - SyncingHead block.TipSetKey - // The height of SyncingHead. - SyncingHeight abi.ChainEpoch - // Whether SyncingTip is trusted as a head far away from the validated head. - SyncingTrusted bool - // Unix time at which syncing of chain at SyncingHead began, zero if valdation hasn't started. - SyncingStarted int64 - // Whether SyncingHead has been validated. - SyncingComplete bool - // Whether SyncingHead has been fetched. - SyncingFetchComplete bool - - // The key of the tipset currently being fetched - FetchingHead block.TipSetKey - // The height of FetchingHead - FetchingHeight abi.ChainEpoch -} - -type reporter struct { - statusMu sync.Mutex - status *Status -} - -// UpdateFn defines a type for ipdating syncer status. -type UpdateFn func(*Status) - -var logChainStatus = logging.Logger("status") - -// NewReporter initializes a new status reporter. -func NewReporter() Reporter { - return &reporter{ - status: NewDefaultChainStatus(), - } -} - -// NewDefaultChainStatus returns a ChainStaus with the default empty values. -func NewDefaultChainStatus() *Status { - return &Status{ - SyncingHead: block.UndefTipSet.Key(), - SyncingHeight: 0, - SyncingTrusted: false, - SyncingStarted: 0, - SyncingComplete: true, - SyncingFetchComplete: true, - FetchingHead: block.UndefTipSet.Key(), - FetchingHeight: 0, - } -} - -// String returns the Status as a string -func (s Status) String() string { - return fmt.Sprintf("syncingStarted=%d, syncingHead=%s, syncingHeight=%d, syncingTrusted=%t, syncingComplete=%t syncingFetchComplete=%t fetchingHead=%s, fetchingHeight=%d", - s.SyncingStarted, - s.SyncingHead, s.SyncingHeight, s.SyncingTrusted, s.SyncingComplete, s.SyncingFetchComplete, - s.FetchingHead, s.FetchingHeight) -} - -// UpdateStatus updates the status heald by StatusReporter. -func (sr *reporter) UpdateStatus(update ...UpdateFn) { - sr.statusMu.Lock() - defer sr.statusMu.Unlock() - for _, u := range update { - u(sr.status) - } - logChainStatus.Debugf("syncing status: %s", sr.status.String()) -} - -// Status returns a copy of the current status. -func (sr *reporter) Status() Status { - return *sr.status -} - -// -// Syncing Updates -// - -// SyncHead updates the head. -func SyncHead(u block.TipSetKey) UpdateFn { - return func(s *Status) { - s.SyncingHead = u - } -} - -// SyncHeight updates the head. -func SyncHeight(u abi.ChainEpoch) UpdateFn { - return func(s *Status) { - s.SyncingHeight = u - } -} - -// SyncTrusted updates the trusted. -func SyncTrusted(u bool) UpdateFn { - return func(s *Status) { - s.SyncingTrusted = u - } -} - -// SyncingStarted marks the syncing as started. -func SyncingStarted(u int64) UpdateFn { - return func(s *Status) { - s.SyncingStarted = u - } -} - -// SyncComplete marks the fetch as complete. -func SyncComplete(u bool) UpdateFn { - return func(s *Status) { - s.SyncingComplete = u - } -} - -// SyncFetchComplete determines if the fetch is complete. -func SyncFetchComplete(u bool) UpdateFn { - return func(s *Status) { - s.SyncingFetchComplete = u - } -} - -// -// Fetching Updates -// - -// FetchHead gets the the head. -func FetchHead(u block.TipSetKey) UpdateFn { - return func(s *Status) { - s.FetchingHead = u - } -} - -// FetchHeight gets the height. -func FetchHeight(u abi.ChainEpoch) UpdateFn { - return func(s *Status) { - s.FetchingHeight = u - } -} diff --git a/internal/pkg/chainsync/status/status_test.go b/internal/pkg/chainsync/status/status_test.go deleted file mode 100644 index 151c1042ad..0000000000 --- a/internal/pkg/chainsync/status/status_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package status_test - -import ( - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status" - "github.com/stretchr/testify/assert" - - //"github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestStatus(t *testing.T) { - tf.UnitTest(t) - - sr := status.NewReporter() - assert.Equal(t, *status.NewDefaultChainStatus(), sr.Status()) - assert.Equal(t, status.NewDefaultChainStatus().String(), sr.Status().String()) - - // single update - cidFn := types.NewCidForTestGetter() - - // multi update - t2 := block.NewTipSetKey(cidFn()) - t3 := block.NewTipSetKey(cidFn()) - expStatus := status.Status{ - SyncingHead: t2, - SyncingHeight: 456, - SyncingTrusted: true, - SyncingStarted: 123, - SyncingComplete: false, - SyncingFetchComplete: true, - FetchingHead: t3, - FetchingHeight: 789, - } - sr.UpdateStatus(status.SyncingStarted(123), status.SyncHead(t2), - status.SyncHeight(456), status.SyncTrusted(true), status.SyncComplete(false), status.SyncFetchComplete(true), - status.FetchHead(t3), status.FetchHeight(789)) - assert.Equal(t, expStatus, sr.Status()) -} diff --git a/internal/pkg/clock/chainclock_test.go b/internal/pkg/clock/chainclock_test.go deleted file mode 100644 index b8549f1fa4..0000000000 --- a/internal/pkg/clock/chainclock_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package clock_test - -import ( - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestChainEpochClock(t *testing.T) { - tf.UnitTest(t) - - now := int64(123456789) - bt := clock.DefaultEpochDuration - pd := clock.DefaultPropagationDelay - cec := clock.NewChainClock(uint64(now), bt, pd) - - epoch0Start := time.Unix(now, 0) - epoch1Start := epoch0Start.Add(bt) - - assert.Equal(t, abi.ChainEpoch(0), cec.EpochAtTime(epoch0Start)) - assert.Equal(t, abi.ChainEpoch(1), cec.EpochAtTime(epoch1Start)) - - epoch2Start := epoch1Start.Add(bt) - epoch2Middle := epoch2Start.Add(bt / time.Duration(5)) - assert.Equal(t, abi.ChainEpoch(2), cec.EpochAtTime(epoch2Start)) - assert.Equal(t, abi.ChainEpoch(2), cec.EpochAtTime(epoch2Middle)) - - epoch200Start := epoch0Start.Add(time.Duration(200) * bt) - assert.Equal(t, abi.ChainEpoch(200), cec.EpochAtTime(epoch200Start)) -} diff --git a/internal/pkg/clock/testing.go b/internal/pkg/clock/testing.go deleted file mode 100644 index fa6ada6fa8..0000000000 --- a/internal/pkg/clock/testing.go +++ /dev/null @@ -1,294 +0,0 @@ -package clock - -import ( - "sync" - "time" -) - -// Creates a new fake clock and chain clock wrapping it. -func NewFakeChain(genesis uint64, epochDuration time.Duration, propDelay time.Duration, now int64) (Fake, ChainEpochClock) { - fake := NewFake(time.Unix(now, 0)) - return fake, NewChainClockFromClock(genesis, epochDuration, propDelay, fake) -} - -// Fake provides an interface for a clock which can be manually advanced. -// Adapted from: https://github.com/jonboulle/clockwork -type Fake interface { - Clock - // Advance advances the Fake to a new point in time, ensuring any existing - // sleepers are notified appropriately before returning - Advance(d time.Duration) - // BlockUntil will block until the Fake has the given number of - // sleepers (callers of Sleep or After) - BlockUntil(n int) -} - -// Returns a Fake initialised at the given time.Time. -func NewFake(n time.Time) Fake { - return &fakeClock{ - time: n, - } -} - -type fakeClock struct { - timers []*fakeTimer - blockers []*blocker - time time.Time - - l sync.RWMutex -} - -// fakeTimer represents a waiting fakeTimer from NewTimer, Sleep, After, etc. -type fakeTimer struct { - callback func(interface{}, time.Time) - arg interface{} - - c chan time.Time - lk sync.RWMutex - done bool - until time.Time - - clock *fakeClock // needed for Reset() -} - -// blocker represents a caller of BlockUntil -type blocker struct { - count int - ch chan struct{} -} - -func (s *fakeTimer) awaken(now time.Time) { - s.lk.Lock() - if s.done { - s.lk.Unlock() - return - } - s.done = true - s.lk.Unlock() - s.callback(s.arg, now) -} - -func (s *fakeTimer) Chan() <-chan time.Time { return s.c } - -func (s *fakeTimer) Reset(d time.Duration) bool { - wasActive := s.Stop() - until := s.clock.Now().Add(d) - s.lk.Lock() - s.until = until - s.done = false - s.lk.Unlock() - s.clock.addTimer(s) - return wasActive -} - -func (s *fakeTimer) Stop() bool { - now := s.clock.Now() - s.lk.Lock() - if s.done { - s.lk.Unlock() - return false - } - s.done = true - // Expire the timer and notify blockers - s.until = now - s.lk.Unlock() - s.clock.Advance(0) - return true -} - -func (s *fakeTimer) whenToTrigger() time.Time { - s.lk.RLock() - defer s.lk.RUnlock() - return s.until -} - -func (fc *fakeClock) addTimer(s *fakeTimer) { - fc.l.Lock() - defer fc.l.Unlock() - - now := fc.time - if now.Sub(s.whenToTrigger()) >= 0 { - // special case - trigger immediately - s.awaken(now) - } else { - // otherwise, add to the set of sleepers - fc.timers = append(fc.timers, s) - // and notify any blockers - fc.blockers = notifyBlockers(fc.blockers, len(fc.timers)) - } -} - -// After mimics time.After; it waits for the given duration to elapse on the -// fakeClock, then sends the current time on the returned channel. -func (fc *fakeClock) After(d time.Duration) <-chan time.Time { - return fc.NewTimer(d).Chan() -} - -// notifyBlockers notifies all the blockers waiting until the -// given number of sleepers are waiting on the fakeClock. It -// returns an updated slice of blockers (i.e. those still waiting) -func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { - for _, b := range blockers { - if b.count == count { - close(b.ch) - } else { - newBlockers = append(newBlockers, b) - } - } - return -} - -// Sleep blocks until the given duration has passed on the fakeClock -func (fc *fakeClock) Sleep(d time.Duration) { - <-fc.After(d) -} - -// Time returns the current time of the fakeClock -func (fc *fakeClock) Now() time.Time { - fc.l.RLock() - t := fc.time - fc.l.RUnlock() - return t -} - -// Since returns the duration that has passed since the given time on the fakeClock -func (fc *fakeClock) Since(t time.Time) time.Duration { - return fc.Now().Sub(t) -} - -func (fc *fakeClock) NewTicker(d time.Duration) Ticker { - ft := &fakeTicker{ - c: make(chan time.Time, 1), - stop: make(chan bool, 1), - clock: fc, - period: d, - } - go ft.tick() - return ft -} - -// NewTimer creates a new Timer that will send the current time on its channel -// after the given duration elapses on the fake clock. -func (fc *fakeClock) NewTimer(d time.Duration) Timer { - done := make(chan time.Time, 1) - sendTime := func(c interface{}, now time.Time) { - select { - case c.(chan time.Time) <- now: - default: - } - } - - s := &fakeTimer{ - clock: fc, - until: fc.Now().Add(d), - callback: sendTime, - arg: done, - c: done, - } - fc.addTimer(s) - return s -} - -// AfterFunc waits for the duration to elapse on the fake clock and then calls f -// in its own goroutine. -// It returns a Timer that can be used to cancel the call using its Stop method. -func (fc *fakeClock) AfterFunc(d time.Duration, f func()) Timer { - goFunc := func(fn interface{}, _ time.Time) { - go fn.(func())() - } - - s := &fakeTimer{ - clock: fc, - until: fc.Now().Add(d), - callback: goFunc, - arg: f, - // zero-valued c, the same as it is in the `time` pkg - } - fc.addTimer(s) - return s -} - -// Advance advances fakeClock to a new point in time, ensuring channels from any -// previous invocations of After are notified appropriately before returning -func (fc *fakeClock) Advance(d time.Duration) { - fc.l.Lock() - defer fc.l.Unlock() - - end := fc.time.Add(d) - var newSleepers []*fakeTimer - for _, s := range fc.timers { - if end.Sub(s.whenToTrigger()) >= 0 { - s.awaken(end) - } else { - newSleepers = append(newSleepers, s) - } - } - fc.timers = newSleepers - fc.blockers = notifyBlockers(fc.blockers, len(fc.timers)) - fc.time = end -} - -// BlockUntil will block until the fakeClock has the given number of sleepers -// (callers of Sleep or After) -func (fc *fakeClock) BlockUntil(n int) { - fc.l.Lock() - // Fast path: current number of sleepers is what we're looking for - if len(fc.timers) == n { - fc.l.Unlock() - return - } - // Otherwise, set up a new blocker - b := &blocker{ - count: n, - ch: make(chan struct{}), - } - fc.blockers = append(fc.blockers, b) - fc.l.Unlock() - <-b.ch -} - -type fakeTicker struct { - c chan time.Time - stop chan bool - clock Fake - period time.Duration -} - -func (ft *fakeTicker) Chan() <-chan time.Time { - return ft.c -} - -func (ft *fakeTicker) Stop() { - ft.stop <- true -} - -// tick sends the tick time to the ticker channel after every period. -// Tick events are discarded if the underlying ticker channel does -// not have enough capacity. -func (ft *fakeTicker) tick() { - tick := ft.clock.Now() - for { - tick = tick.Add(ft.period) - remaining := tick.Sub(ft.clock.Now()) - if remaining <= 0 { - // The tick should have already happened. This can happen when - // Advance() is called on the fake clock with a duration larger - // than this ticker's period. - select { - case ft.c <- tick: - default: - } - continue - } - - select { - case <-ft.stop: - return - case <-ft.clock.After(remaining): - select { - case ft.c <- tick: - default: - } - } - } -} diff --git a/internal/pkg/clock/testing_test.go b/internal/pkg/clock/testing_test.go deleted file mode 100644 index 08ad4ed9d8..0000000000 --- a/internal/pkg/clock/testing_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package clock_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -var startTime = time.Unix(123456789, 0) - -func TestFakeAfter(t *testing.T) { - tf.UnitTest(t) - fc := clock.NewFake(startTime) - - zero := fc.After(0) - select { - case <-zero: - default: - t.Errorf("zero did not return!") - } - one := fc.After(1) - two := fc.After(2) - six := fc.After(6) - ten := fc.After(10) - fc.Advance(1) - select { - case <-one: - default: - t.Errorf("one did not return!") - } - select { - case <-two: - t.Errorf("two returned prematurely!") - case <-six: - t.Errorf("six returned prematurely!") - case <-ten: - t.Errorf("ten returned prematurely!") - default: - } - fc.Advance(1) - select { - case <-two: - default: - t.Errorf("two did not return!") - } - select { - case <-six: - t.Errorf("six returned prematurely!") - case <-ten: - t.Errorf("ten returned prematurely!") - default: - } - fc.Advance(1) - select { - case <-six: - t.Errorf("six returned prematurely!") - case <-ten: - t.Errorf("ten returned prematurely!") - default: - } - fc.Advance(3) - select { - case <-six: - default: - t.Errorf("six did not return!") - } - select { - case <-ten: - t.Errorf("ten returned prematurely!") - default: - } - fc.Advance(100) - select { - case <-ten: - default: - t.Errorf("ten did not return!") - } -} - -func TestNewFakeAt(t *testing.T) { - tf.UnitTest(t) - t1 := time.Date(1999, time.February, 3, 4, 5, 6, 7, time.UTC) - fc := clock.NewFake(t1) - now := fc.Now() - assert.Equalf(t, now, t1, "Fake.Now() returned unexpected non-initialised value: want=%#v, got %#v", t1, now) -} - -func TestFakeSince(t *testing.T) { - tf.UnitTest(t) - fc := clock.NewFake(startTime) - now := fc.Now() - elapsedTime := time.Second - fc.Advance(elapsedTime) - assert.Truef(t, fc.Since(now) == elapsedTime, "Fake.Since() returned unexpected duration, got: %d, want: %d", fc.Since(now), elapsedTime) -} - -func TestFakeTimers(t *testing.T) { - tf.UnitTest(t) - fc := clock.NewFake(startTime) - - zero := fc.NewTimer(0) - - assert.False(t, zero.Stop(), "zero timer could be stopped") - select { - case <-zero.Chan(): - default: - t.Errorf("zero timer didn't emit time") - } - - one := fc.NewTimer(1) - - select { - case <-one.Chan(): - t.Errorf("non-zero timer did emit time") - default: - } - - assert.True(t, one.Stop(), "non-zero timer couldn't be stopped") - - fc.Advance(5) - - select { - case <-one.Chan(): - t.Errorf("stopped timer did emit time") - default: - } - - assert.False(t, one.Reset(1), "resetting stopped timer didn't return false") - assert.True(t, one.Reset(1), "resetting active timer didn't return true") - - fc.Advance(1) - - assert.False(t, one.Stop(), "triggered timer could be stopped") - - select { - case <-one.Chan(): - default: - t.Errorf("triggered timer didn't emit time") - } - - fc.Advance(1) - - select { - case <-one.Chan(): - t.Errorf("triggered timer emitted time more than once") - default: - } - - one.Reset(0) - - assert.False(t, one.Stop(), "reset to zero timer could be stopped") - select { - case <-one.Chan(): - default: - t.Errorf("reset to zero timer didn't emit time") - } -} - -type syncFunc func(didAdvance func(), shouldAdvance func(string), shouldBlock func(string)) - -func inSync(t *testing.T, func1 syncFunc, func2 syncFunc) { - stepChan1 := make(chan struct{}, 16) - stepChan2 := make(chan struct{}, 16) - go func() { - func1(func() { stepChan1 <- struct{}{} }, func(point string) { - select { - case <-stepChan2: - case <-time.After(time.Second): - t.Errorf("Did not advance, should have %s", point) - } - }, - func(point string) { - select { - case <-stepChan2: - t.Errorf("Was able to advance, should not have %s", point) - case <-time.After(10 * time.Millisecond): - } - }, - ) - }() - func2(func() { stepChan2 <- struct{}{} }, func(point string) { - select { - case <-stepChan1: - case <-time.After(time.Second): - t.Errorf("Did not advance, should have %s", point) - } - }, - func(point string) { - select { - case <-stepChan1: - t.Errorf("Was able to advance, should not have %s", point) - case <-time.After(10 * time.Millisecond): - } - }) -} - -func TestBlockingOnTimers(t *testing.T) { - tf.UnitTest(t) - fc := clock.NewFake(startTime) - - inSync(t, func(didAdvance func(), shouldAdvance func(string), _ func(string)) { - fc.BlockUntil(0) - didAdvance() - fc.BlockUntil(1) - didAdvance() - shouldAdvance("timers stopped") - fc.BlockUntil(0) - didAdvance() - fc.BlockUntil(1) - didAdvance() - fc.BlockUntil(2) - didAdvance() - fc.BlockUntil(3) - didAdvance() - shouldAdvance("timers stopped") - fc.BlockUntil(2) - didAdvance() - shouldAdvance("time advanced") - fc.BlockUntil(0) - didAdvance() - }, func(didAdvance func(), shouldAdvance func(string), shouldBlock func(string)) { - shouldAdvance("when only blocking for 0 timers") - shouldBlock("when waiting for 1 timer") - fc.NewTimer(0) - shouldBlock("when immediately expired timer added") - one := fc.NewTimer(1) - shouldAdvance("once a timer exists") - one.Stop() - didAdvance() - shouldAdvance("when only blocking for 0 timers") - shouldBlock("when all timers are stopped and waiting for a timer") - one.Reset(1) - shouldAdvance("once timer is restarted") - shouldBlock("when waiting for 2 timers with one active") - _ = fc.NewTimer(2) - shouldAdvance("when second timer added") - shouldBlock("when waiting for 3 timers with 2 active") - _ = fc.NewTimer(3) - shouldAdvance("when third timer added") - one.Stop() - didAdvance() - shouldAdvance("when blocking for 2 timers if a third is stopped") - fc.Advance(3) - didAdvance() - shouldAdvance("waiting for no timers") - }) -} - -func TestAdvancePastAfter(t *testing.T) { - tf.UnitTest(t) - - fc := clock.NewFake(startTime) - - start := fc.Now() - one := fc.After(1) - two := fc.After(2) - six := fc.After(6) - - fc.Advance(1) - assert.False(t, start.Add(1).Sub(<-one) > 0, "timestamp is too early") - - fc.Advance(5) - assert.False(t, start.Add(2).Sub(<-two) > 0, "timestamp is too early") - assert.False(t, start.Add(6).Sub(<-six) > 0, "timestamp is too early") -} - -func TestFakeTickerStop(t *testing.T) { - tf.UnitTest(t) - fc := clock.NewFake(startTime) - - ft := fc.NewTicker(1) - ft.Stop() - fc.Advance(1) - select { - case <-ft.Chan(): - t.Errorf("received unexpected tick!") - default: - } -} - -func TestFakeTickerTick(t *testing.T) { - tf.UnitTest(t) - fc := clock.NewFake(startTime) - now := fc.Now() - - // The tick at now.Add(2) should not get through since we advance time by - // two units below and the channel can hold at most one tick until it's - // consumed. - first := now.Add(1) - second := now.Add(3) - - // We wrap the Advance() calls with blockers to make sure that the ticker - // can go to sleep and produce ticks without time passing in parallel. - ft := fc.NewTicker(1) - fc.BlockUntil(1) - fc.Advance(2) - fc.BlockUntil(1) - - select { - case tick := <-ft.Chan(): - assert.Truef(t, tick == first, "wrong tick time, got: %v, want: %v", tick, first) - default: - t.Errorf("expected tick!") - } - - // Advance by one more unit, we should get another tick now. - fc.Advance(1) - fc.BlockUntil(1) - - select { - case tick := <-ft.Chan(): - assert.Truef(t, tick == second, "wrong tick time, got: %v, want: %v", tick, second) - default: - t.Errorf("expected tick!") - } - ft.Stop() -} diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go deleted file mode 100644 index f11aff24a4..0000000000 --- a/internal/pkg/config/config.go +++ /dev/null @@ -1,422 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "reflect" - "regexp" - "strings" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// Config is an in memory representation of the filecoin configuration file -type Config struct { - API *APIConfig `json:"api"` - Bootstrap *BootstrapConfig `json:"bootstrap"` - Datastore *DatastoreConfig `json:"datastore"` - Drand *DrandConfig `json:"drand"` - Mining *MiningConfig `json:"mining"` - Mpool *MessagePoolConfig `json:"mpool"` - NetworkParams *NetworkParamsConfig `json:"parameters"` - Observability *ObservabilityConfig `json:"observability"` - SectorBase *SectorBaseConfig `json:"sectorbase"` - Swarm *SwarmConfig `json:"swarm"` - Wallet *WalletConfig `json:"wallet"` -} - -// APIConfig holds all configuration options related to the api. -type APIConfig struct { - Address string `json:"address"` - AccessControlAllowOrigin []string `json:"accessControlAllowOrigin"` - AccessControlAllowCredentials bool `json:"accessControlAllowCredentials"` - AccessControlAllowMethods []string `json:"accessControlAllowMethods"` -} - -func newDefaultAPIConfig() *APIConfig { - return &APIConfig{ - Address: "/ip4/127.0.0.1/tcp/3453", - AccessControlAllowOrigin: []string{ - "http://localhost:8080", - "https://localhost:8080", - "http://127.0.0.1:8080", - "https://127.0.0.1:8080", - }, - AccessControlAllowMethods: []string{"GET", "POST", "PUT"}, - } -} - -// DatastoreConfig holds all the configuration options for the datastore. -// TODO: use the advanced datastore configuration from ipfs -type DatastoreConfig struct { - Type string `json:"type"` - Path string `json:"path"` -} - -// Validators hold the list of validation functions for each configuration -// property. Validators must take a key and json string respectively as -// arguments, and must return either an error or nil depending on whether or not -// the given key and value are valid. Validators will only be run if a property -// being set matches the name given in this map. -var Validators = map[string]func(string, string) error{ - "heartbeat.nickname": validateLettersOnly, -} - -func newDefaultDatastoreConfig() *DatastoreConfig { - return &DatastoreConfig{ - Type: "badgerds", - Path: "badger", - } -} - -// SwarmConfig holds all configuration options related to the swarm. -type SwarmConfig struct { - Address string `json:"address"` - PublicRelayAddress string `json:"public_relay_address,omitempty"` -} - -func newDefaultSwarmConfig() *SwarmConfig { - return &SwarmConfig{ - Address: "/ip4/0.0.0.0/tcp/6000", - } -} - -// BootstrapConfig holds all configuration options related to bootstrap nodes -type BootstrapConfig struct { - Addresses []string `json:"addresses"` - MinPeerThreshold int `json:"minPeerThreshold"` - Period string `json:"period,omitempty"` -} - -// TODO: provide bootstrap node addresses -func newDefaultBootstrapConfig() *BootstrapConfig { - return &BootstrapConfig{ - Addresses: []string{}, - MinPeerThreshold: 0, // TODO: we don't actually have an bootstrap peers yet. - Period: "1m", - } -} - -// MiningConfig holds all configuration options related to mining. -type MiningConfig struct { - MinerAddress address.Address `json:"minerAddress"` - AutoSealIntervalSeconds uint `json:"autoSealIntervalSeconds"` - StoragePrice types.AttoFIL `json:"storagePrice"` -} - -func newDefaultMiningConfig() *MiningConfig { - return &MiningConfig{ - MinerAddress: address.Undef, - AutoSealIntervalSeconds: 120, - StoragePrice: types.ZeroAttoFIL, - } -} - -// WalletConfig holds all configuration options related to the wallet. -type WalletConfig struct { - DefaultAddress address.Address `json:"defaultAddress,omitempty"` -} - -func newDefaultWalletConfig() *WalletConfig { - return &WalletConfig{ - DefaultAddress: address.Undef, - } -} - -// DrandConfig holds all configuration options related to pulling randomness from Drand servers -type DrandConfig struct { - // Addresses are are drand server addresses in the format - Addresses []string `json:"addresses"` - // Secure is whether or not the drand address are secure (e.g. TLS) - Secure bool `json:"secure"` - // DistKey is the distributed public key of the server group expressed as hex encoded coefficients - DistKey [][]byte `json:"distKey"` - StartTimeUnix int64 `json:"startTimeUnix"` - RoundSeconds int `json:"roundSeconds"` -} - -func newDefaultDrandConfig() *DrandConfig { - return &DrandConfig{ - Addresses: []string{ - "localhost:8080", - "localhost:8081", - "localhost:8082", - "localhost:8083", - "localhost:8084", - }, - Secure: false, - DistKey: [][]byte{}, - StartTimeUnix: 0, - RoundSeconds: 30, - } -} - -// HeartbeatConfig holds all configuration options related to node heartbeat. -type HeartbeatConfig struct { - // BeatTarget represents the address the filecoin node will send heartbeats to. - BeatTarget string `json:"beatTarget"` - // BeatPeriod represents how frequently heartbeats are sent. - // Golang duration units are accepted. - BeatPeriod string `json:"beatPeriod"` - // ReconnectPeriod represents how long the node waits before attempting to reconnect. - // Golang duration units are accepted. - ReconnectPeriod string `json:"reconnectPeriod"` - // Nickname represents the nickname of the filecoin node, - Nickname string `json:"nickname"` -} - -// ObservabilityConfig is a container for configuration related to observables. -type ObservabilityConfig struct { - Metrics *MetricsConfig `json:"metrics"` - Tracing *TraceConfig `json:"tracing"` -} - -func newDefaultObservabilityConfig() *ObservabilityConfig { - return &ObservabilityConfig{ - Metrics: newDefaultMetricsConfig(), - Tracing: newDefaultTraceConfig(), - } -} - -// MetricsConfig holds all configuration options related to node metrics. -type MetricsConfig struct { - // Enabled will enable prometheus metrics when true. - PrometheusEnabled bool `json:"prometheusEnabled"` - // ReportInterval represents how frequently filecoin will update its prometheus metrics. - ReportInterval string `json:"reportInterval"` - // PrometheusEndpoint represents the address filecoin will expose prometheus metrics at. - PrometheusEndpoint string `json:"prometheusEndpoint"` -} - -func newDefaultMetricsConfig() *MetricsConfig { - return &MetricsConfig{ - PrometheusEnabled: false, - ReportInterval: "5s", - PrometheusEndpoint: "/ip4/0.0.0.0/tcp/9400", - } -} - -// TraceConfig holds all configuration options related to enabling and exporting -// filecoin node traces. -type TraceConfig struct { - // JaegerTracingEnabled will enable exporting traces to jaeger when true. - JaegerTracingEnabled bool `json:"jaegerTracingEnabled"` - // ProbabilitySampler will sample fraction of traces, 1.0 will sample all traces. - ProbabilitySampler float64 `json:"probabilitySampler"` - // JaegerEndpoint is the URL traces are collected on. - JaegerEndpoint string `json:"jaegerEndpoint"` -} - -func newDefaultTraceConfig() *TraceConfig { - return &TraceConfig{ - JaegerEndpoint: "http://localhost:14268/api/traces", - JaegerTracingEnabled: false, - ProbabilitySampler: 1.0, - } -} - -// MessagePoolConfig holds all configuration options related to nodes message pool (mpool). -type MessagePoolConfig struct { - // MaxPoolSize is the maximum number of pending messages will will allow in the message pool at any time - MaxPoolSize uint `json:"maxPoolSize"` - // MaxNonceGap is the maximum nonce of a message past the last received on chain - MaxNonceGap uint64 `json:"maxNonceGap"` -} - -func newDefaultMessagePoolConfig() *MessagePoolConfig { - return &MessagePoolConfig{ - MaxPoolSize: 1000000, - MaxNonceGap: 100, - } -} - -type NetworkParamsConfig struct { - ConsensusMinerMinPower uint64 // uint64 goes up to 18 EiB - ReplaceProofTypes []int64 -} - -func newDefaultNetworkParamsConfig() *NetworkParamsConfig { - return &NetworkParamsConfig{ - ConsensusMinerMinPower: 0, // 0 means don't override the value - ReplaceProofTypes: []int64{ - int64(abi.RegisteredProof_StackedDRG2KiBSeal), - int64(abi.RegisteredProof_StackedDRG512MiBSeal), - int64(abi.RegisteredProof_StackedDRG32GiBSeal), - int64(abi.RegisteredProof_StackedDRG64GiBSeal), - }, - } -} - -// SectorBaseConfig holds all configuration options related to the node's -// sector storage. -type SectorBaseConfig struct { - // RootDir is the absolute path to the root directory holding sector data. - // If empty the default of /sectors is implied. - RootDirPath string `json:"rootdir"` - - // PreSealedSectorsDir is the absolute path to the directory holding any - // pre-sealed sector files and corresponding metadata JSON. - // If empty, it is assumed that no pre-sealed sectors exist. - PreSealedSectorsDirPath string `json:"preSealedSectorsDir"` -} - -func newDefaultSectorbaseConfig() *SectorBaseConfig { - return &SectorBaseConfig{ - RootDirPath: "", - PreSealedSectorsDirPath: "", - } -} - -// NewDefaultConfig returns a config object with all the fields filled out to -// their default values -func NewDefaultConfig() *Config { - return &Config{ - API: newDefaultAPIConfig(), - Bootstrap: newDefaultBootstrapConfig(), - Datastore: newDefaultDatastoreConfig(), - Drand: newDefaultDrandConfig(), - Mining: newDefaultMiningConfig(), - Mpool: newDefaultMessagePoolConfig(), - NetworkParams: newDefaultNetworkParamsConfig(), - Observability: newDefaultObservabilityConfig(), - SectorBase: newDefaultSectorbaseConfig(), - Swarm: newDefaultSwarmConfig(), - Wallet: newDefaultWalletConfig(), - } -} - -// WriteFile writes the config to the given filepath. -func (cfg *Config) WriteFile(file string) error { - f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return err - } - defer f.Close() // nolint: errcheck - - configString, err := json.MarshalIndent(*cfg, "", "\t") - if err != nil { - return err - } - - _, err = fmt.Fprint(f, string(configString)) - return err -} - -// ReadFile reads a config file from disk. -func ReadFile(file string) (*Config, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - - cfg := NewDefaultConfig() - rawConfig, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - if len(rawConfig) == 0 { - return cfg, nil - } - - err = json.Unmarshal(rawConfig, &cfg) - if err != nil { - return nil, err - } - - return cfg, nil -} - -// Set sets the config sub-struct referenced by `key`, e.g. 'api.address' -// or 'datastore' to the json key value pair encoded in jsonVal. -func (cfg *Config) Set(dottedKey string, jsonString string) error { - if !json.Valid([]byte(jsonString)) { - jsonBytes, _ := json.Marshal(jsonString) - jsonString = string(jsonBytes) - } - - if err := validate(dottedKey, jsonString); err != nil { - return err - } - - keys := strings.Split(dottedKey, ".") - for i := len(keys) - 1; i >= 0; i-- { - jsonString = fmt.Sprintf(`{ "%s": %s }`, keys[i], jsonString) - } - - decoder := json.NewDecoder(strings.NewReader(jsonString)) - decoder.DisallowUnknownFields() - - return decoder.Decode(&cfg) -} - -// Get gets the config sub-struct referenced by `key`, e.g. 'api.address' -func (cfg *Config) Get(key string) (interface{}, error) { - v := reflect.Indirect(reflect.ValueOf(cfg)) - keyTags := strings.Split(key, ".") -OUTER: - for j, keyTag := range keyTags { - if v.Type().Kind() == reflect.Struct { - for i := 0; i < v.NumField(); i++ { - jsonTag := strings.Split( - v.Type().Field(i).Tag.Get("json"), - ",")[0] - if jsonTag == keyTag { - v = v.Field(i) - if j == len(keyTags)-1 { - return v.Interface(), nil - } - v = reflect.Indirect(v) // only attempt one dereference - continue OUTER - } - } - } - - return nil, fmt.Errorf("key: %s invalid for config", key) - } - // Cannot get here as len(strings.Split(s, sep)) >= 1 with non-empty sep - return nil, fmt.Errorf("empty key is invalid") -} - -// validate runs validations on a given key and json string. validate uses the -// validators map defined at the top of this file to determine which validations -// to use for each key. -func validate(dottedKey string, jsonString string) error { - var obj interface{} - if err := json.Unmarshal([]byte(jsonString), &obj); err != nil { - return err - } - // recursively validate sub-keys by partially unmarshalling - if reflect.ValueOf(obj).Kind() == reflect.Map { - var obj map[string]json.RawMessage - if err := json.Unmarshal([]byte(jsonString), &obj); err != nil { - return err - } - for key := range obj { - if err := validate(dottedKey+"."+key, string(obj[key])); err != nil { - return err - } - } - return nil - } - - if validationFunc, present := Validators[dottedKey]; present { - return validationFunc(dottedKey, jsonString) - } - - return nil -} - -// validateLettersOnly validates that a given value contains only letters. If it -// does not, an error is returned using the given key for the message. -func validateLettersOnly(key string, value string) error { - if match, _ := regexp.MatchString("^\"[a-zA-Z]+\"$", value); !match { - return errors.Errorf(`"%s" must only contain letters`, key) - } - return nil -} diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go deleted file mode 100644 index dc41b158ac..0000000000 --- a/internal/pkg/config/config_test.go +++ /dev/null @@ -1,296 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestDefaults(t *testing.T) { - tf.UnitTest(t) - - cfg := NewDefaultConfig() - - bs := []string{} - assert.Equal(t, "/ip4/127.0.0.1/tcp/3453", cfg.API.Address) - assert.Equal(t, "/ip4/0.0.0.0/tcp/6000", cfg.Swarm.Address) - assert.Equal(t, bs, cfg.Bootstrap.Addresses) -} - -func TestWriteFile(t *testing.T) { - tf.UnitTest(t) - - dir, err := ioutil.TempDir("", "config") - assert.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() - - cfg := NewDefaultConfig() - - cfgJSON, err := json.MarshalIndent(*cfg, "", "\t") - require.NoError(t, err) - expected := string(cfgJSON) - - SanityCheck(t, expected) - - assert.NoError(t, cfg.WriteFile(filepath.Join(dir, "config.json"))) - content, err := ioutil.ReadFile(filepath.Join(dir, "config.json")) - assert.NoError(t, err) - - assert.Equal(t, expected, string(content)) - assert.NoError(t, os.Remove(filepath.Join(dir, "config.json"))) -} - -func TestConfigRoundtrip(t *testing.T) { - tf.UnitTest(t) - - dir, err := ioutil.TempDir("", "config") - assert.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() - - cfg := NewDefaultConfig() - - cfgpath := filepath.Join(dir, "config.json") - assert.NoError(t, cfg.WriteFile(cfgpath)) - - cfgout, err := ReadFile(cfgpath) - assert.NoError(t, err) - - assert.Equal(t, cfg, cfgout) -} - -func TestConfigReadFileDefaults(t *testing.T) { - tf.UnitTest(t) - - t.Run("all sections exist", func(t *testing.T) { - cfgpath, cleaner, err := createConfigFile(` - { - "api": { - "address": "/ip4/127.0.0.1/tcp/9999", - "keyThatDoesntExit": false - }, - "swarm": { - "keyThatDoesntExit": "hello" - } - }`) - assert.NoError(t, err) - defer func() { - require.NoError(t, cleaner()) - }() - cfg, err := ReadFile(cfgpath) - assert.NoError(t, err) - - assert.Equal(t, cfg.API.Address, "/ip4/127.0.0.1/tcp/9999") - assert.Equal(t, cfg.Swarm.Address, "/ip4/0.0.0.0/tcp/6000") - }) - - t.Run("missing one section", func(t *testing.T) { - cfgpath, cleaner, err := createConfigFile(` - { - "api": { - "address": "/ip4/127.0.0.1/tcp/9999", - "keyThatDoesntExit'": false - } - }`) - assert.NoError(t, err) - defer func() { - require.NoError(t, cleaner()) - }() - cfg, err := ReadFile(cfgpath) - assert.NoError(t, err) - - assert.Equal(t, cfg.API.Address, "/ip4/127.0.0.1/tcp/9999") - assert.Equal(t, cfg.Swarm.Address, "/ip4/0.0.0.0/tcp/6000") - }) - - t.Run("empty file", func(t *testing.T) { - cfgpath, cleaner, err := createConfigFile("") - assert.NoError(t, err) - defer func() { - require.NoError(t, cleaner()) - }() - cfg, err := ReadFile(cfgpath) - assert.NoError(t, err) - - assert.Equal(t, cfg.API.Address, "/ip4/127.0.0.1/tcp/3453") - assert.Equal(t, cfg.Swarm.Address, "/ip4/0.0.0.0/tcp/6000") - }) -} - -func TestConfigGet(t *testing.T) { - tf.UnitTest(t) - - t.Run("valid gets", func(t *testing.T) { - cfg := NewDefaultConfig() - - out, err := cfg.Get("api.address") - assert.NoError(t, err) - assert.Equal(t, cfg.API.Address, out) - - out, err = cfg.Get("api.accessControlAllowOrigin") - assert.NoError(t, err) - assert.Equal(t, cfg.API.AccessControlAllowOrigin, out) - - out, err = cfg.Get("api") - assert.NoError(t, err) - assert.Equal(t, cfg.API, out) - - out, err = cfg.Get("bootstrap.addresses") - assert.NoError(t, err) - assert.Equal(t, cfg.Bootstrap.Addresses, out) - - out, err = cfg.Get("bootstrap") - assert.NoError(t, err) - assert.Equal(t, cfg.Bootstrap, out) - - out, err = cfg.Get("datastore.path") - assert.NoError(t, err) - assert.Equal(t, cfg.Datastore.Path, out) - - // TODO we can test this as soon as we have bootstrap addresses. - // out, err = cfg.Get("bootstrap.addresses.0") - // assert.NoError(err) - // assert.Equal(cfg.Bootstrap.Addresses[0], out) - }) - - t.Run("invalid gets", func(t *testing.T) { - cfg := NewDefaultConfig() - - _, err := cfg.Get("datastore.") - assert.Error(t, err) - - _, err = cfg.Get(".datastore") - assert.Error(t, err) - - _, err = cfg.Get("invalidfield") - assert.Error(t, err) - - _, err = cfg.Get("bootstrap.addresses.toomuch") - assert.Error(t, err) - - _, err = cfg.Get("api-address") - assert.Error(t, err) - - // TODO: temporary as we don't have any ATM. - _, err = cfg.Get("bootstrap.addresses.0") - assert.Error(t, err) - }) -} - -func TestConfigSet(t *testing.T) { - tf.UnitTest(t) - - t.Run("set leaf values", func(t *testing.T) { - cfg := NewDefaultConfig() - - // set string - err := cfg.Set("api.address", `"/ip4/127.9.9.9/tcp/0"`) - assert.NoError(t, err) - assert.Equal(t, cfg.API.Address, "/ip4/127.9.9.9/tcp/0") - - // set slice - err = cfg.Set("api.accessControlAllowOrigin", `["http://localroast:7854"]`) - assert.NoError(t, err) - assert.Equal(t, cfg.API.AccessControlAllowOrigin, []string{"http://localroast:7854"}) - }) - - t.Run("set table value", func(t *testing.T) { - cfg := NewDefaultConfig() - - jsonBlob := `{"type": "badgerbadgerbadgerds", "path": "mushroom-mushroom"}` - err := cfg.Set("datastore", jsonBlob) - assert.NoError(t, err) - assert.Equal(t, cfg.Datastore.Type, "badgerbadgerbadgerds") - assert.Equal(t, cfg.Datastore.Path, "mushroom-mushroom") - - cfg1path, cleaner, err := createConfigFile(fmt.Sprintf(`{"datastore": %s}`, jsonBlob)) - assert.NoError(t, err) - defer func() { - require.NoError(t, cleaner()) - }() - - cfg1, err := ReadFile(cfg1path) - assert.NoError(t, err) - assert.Equal(t, cfg1.Datastore, cfg.Datastore) - - // inline tables - jsonBlob = `{"type": "badgerbadgerbadgerds", "path": "mushroom-mushroom"}` - err = cfg.Set("datastore", jsonBlob) - assert.NoError(t, err) - - assert.Equal(t, cfg1.Datastore, cfg.Datastore) - }) - - t.Run("invalid set", func(t *testing.T) { - cfg := NewDefaultConfig() - - // bad key - err := cfg.Set("datastore.nope", `"too bad, fake key"`) - assert.Error(t, err) - - // not json - err = cfg.Set("bootstrap.addresses", `nota.json?key`) - assert.Error(t, err) - - // newlines in inline tables are invalid - tomlB := `{type = "badgerbadgerbadgerds", -path = "mushroom-mushroom"}` - err = cfg.Set("datastore", tomlB) - assert.Error(t, err) - - // setting values of wrong type - err = cfg.Set("datastore.type", `["not a", "string"]`) - assert.Error(t, err) - - err = cfg.Set("bootstrap.addresses", `"not a list"`) - assert.Error(t, err) - - err = cfg.Set("api", `"strings aren't structs"`) - assert.Error(t, err) - - // Corrupt address won't pass checksum - //err = cfg.Set("mining.defaultAddress", "fcqv3gmsd9gd7dqfe60d28euf4tx9v7929corrupt") - //assert.Contains(err.Error(), "invalid") - - err = cfg.Set("wallet.defaultAddress", "corruptandtooshort") - assert.Contains(t, err.Error(), address.ErrUnknownNetwork.Error()) - }) - - t.Run("setting leaves does not interfere with neighboring leaves", func(t *testing.T) { - cfg := NewDefaultConfig() - - err := cfg.Set("bootstrap.period", `"3m"`) - assert.NoError(t, err) - err = cfg.Set("bootstrap.minPeerThreshold", `5`) - assert.NoError(t, err) - assert.Equal(t, cfg.Bootstrap.Period, "3m") - }) -} - -func createConfigFile(content string) (string, func() error, error) { - dir, err := ioutil.TempDir("", "config") - if err != nil { - return "", nil, err - } - cfgpath := filepath.Join(dir, "config.json") - - if err := ioutil.WriteFile(cfgpath, []byte(content), 0644); err != nil { - return "", nil, err - } - - return cfgpath, func() error { - return os.RemoveAll(dir) - }, nil -} diff --git a/internal/pkg/config/testing.go b/internal/pkg/config/testing.go deleted file mode 100644 index 8d31379034..0000000000 --- a/internal/pkg/config/testing.go +++ /dev/null @@ -1,19 +0,0 @@ -package config - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -// Makes some basic checks of a serialized config to ascertain that it looks kind of right. -// This is instead of brittle hardcoded exact config expectations. -func SanityCheck(t *testing.T, cfgJSON string) { - assert.True(t, strings.Contains(cfgJSON, "accessControlAllowOrigin")) - assert.True(t, strings.Contains(cfgJSON, "http://localhost:8080")) - assert.True(t, strings.Contains(cfgJSON, "bootstrap")) - assert.True(t, strings.Contains(cfgJSON, "bootstrap")) - assert.True(t, strings.Contains(cfgJSON, "\"minPeerThreshold\": 0")) - assert.True(t, strings.Contains(cfgJSON, "minerAddress")) -} diff --git a/internal/pkg/consensus/block_validation.go b/internal/pkg/consensus/block_validation.go deleted file mode 100644 index f0a478cca6..0000000000 --- a/internal/pkg/consensus/block_validation.go +++ /dev/null @@ -1,235 +0,0 @@ -package consensus - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/specs-actors/actors/builtin" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/ipfs/go-cid" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -type messageStore interface { - LoadMessages(context.Context, cid.Cid) ([]*types.SignedMessage, []*types.UnsignedMessage, error) - LoadReceipts(context.Context, cid.Cid) ([]vm.MessageReceipt, error) -} - -type chainState interface { - GetActorAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address) (*actor.Actor, error) -} - -// BlockValidator defines an interface used to validate a blocks syntax and -// semantics. -type BlockValidator interface { - BlockSemanticValidator - BlockSyntaxValidator -} - -// SyntaxValidator defines and interface used to validate block's syntax and the -// syntax of constituent messages -type SyntaxValidator interface { - BlockSyntaxValidator - MessageSyntaxValidator -} - -// BlockSemanticValidator defines an interface used to validate a blocks -// semantics. -type BlockSemanticValidator interface { - ValidateHeaderSemantic(ctx context.Context, child *block.Block, parents block.TipSet) error - ValidateMessagesSemantic(ctx context.Context, child *block.Block, parents block.TipSetKey) error -} - -// BlockSyntaxValidator defines an interface used to validate a blocks -// syntax. -type BlockSyntaxValidator interface { - ValidateSyntax(ctx context.Context, blk *block.Block) error -} - -// MessageSyntaxValidator defines an interface used to validate a message's -// syntax. -type MessageSyntaxValidator interface { - ValidateSignedMessageSyntax(ctx context.Context, smsg *types.SignedMessage) error - ValidateUnsignedMessageSyntax(ctx context.Context, msg *types.UnsignedMessage) error -} - -// DefaultBlockValidator implements the BlockValidator interface. -type DefaultBlockValidator struct { - clock.ChainEpochClock - ms messageStore - cs chainState -} - -// WrappedSyntaxValidator implements syntax validator interface -type WrappedSyntaxValidator struct { - BlockSyntaxValidator - MessageSyntaxValidator -} - -// NewDefaultBlockValidator returns a new DefaultBlockValidator. It uses `blkTime` -// to validate blocks and uses the DefaultBlockValidationClock. -func NewDefaultBlockValidator(c clock.ChainEpochClock, m messageStore, cs chainState) *DefaultBlockValidator { - return &DefaultBlockValidator{ - ChainEpochClock: c, - ms: m, - cs: cs, - } -} - -// NotFutureBlock errors if the block belongs to a future epoch according to -// the chain clock. -func (dv *DefaultBlockValidator) NotFutureBlock(b *block.Block) error { - currentEpoch := dv.EpochAtTime(dv.Now()) - if b.Height > currentEpoch { - return fmt.Errorf("block %s with timestamp %d generate in future epoch %d", b.Cid().String(), b.Timestamp, b.Height) - } - return nil -} - -// TimeMatchesEpoch errors if the epoch and time don't match according to the -// chain clock. -func (dv *DefaultBlockValidator) TimeMatchesEpoch(b *block.Block) error { - earliestExpected, latestExpected := dv.EpochRangeAtTimestamp(b.Timestamp) - blockEpoch := b.Height - if (blockEpoch < earliestExpected) || (blockEpoch > latestExpected) { - return fmt.Errorf( - "block %s with timestamp %d generated in wrong epoch %d, expected epoch in range [%d, %d]", - b.Cid().String(), - b.Timestamp, - b.Height, - earliestExpected, - latestExpected, - ) - } - return nil -} - -// ValidateHeaderSemantic checks validation conditions on a header that can be -// checked given only the parent header. -func (dv *DefaultBlockValidator) ValidateHeaderSemantic(ctx context.Context, child *block.Block, parents block.TipSet) error { - ph, err := parents.Height() - if err != nil { - return err - } - - if child.Height <= ph { - return fmt.Errorf("block %s has invalid height %d", child.Cid().String(), child.Height) - } - - return nil -} - -// ValidateFullSemantic checks validation conditions on a block's messages that don't require message execution. -func (dv *DefaultBlockValidator) ValidateMessagesSemantic(ctx context.Context, child *block.Block, parents block.TipSetKey) error { - // validate call sequence numbers - secpMsgs, blsMsgs, err := dv.ms.LoadMessages(ctx, child.Messages.Cid) - if err != nil { - return errors.Wrapf(err, "block validation failed loading message list %s for block %s", child.Messages, child.Cid()) - } - - expectedCallSeqNum := map[address.Address]uint64{} - for _, msg := range blsMsgs { - msgCid, err := msg.Cid() - if err != nil { - return err - } - - from, err := dv.getAndValidateFromActor(ctx, msg, parents) - if err != nil { - return errors.Wrapf(err, "from actor %s for message %s of block %s invalid", msg.From, msgCid, child.Cid()) - } - - err = dv.validateMessage(msg, expectedCallSeqNum, from) - if err != nil { - return errors.Wrapf(err, "message %s of block %s invalid", msgCid, child.Cid()) - } - } - - for _, msg := range secpMsgs { - msgCid, err := msg.Cid() - if err != nil { - return err - } - - from, err := dv.getAndValidateFromActor(ctx, &msg.Message, parents) - if err != nil { - return errors.Wrapf(err, "from actor %s for message %s of block %s invalid", msg.Message.From, msgCid, child.Cid()) - } - - err = dv.validateMessage(&msg.Message, expectedCallSeqNum, from) - if err != nil { - return errors.Wrapf(err, "message %s of block %s invalid", msgCid, child.Cid()) - } - } - - return nil -} - -func (dv *DefaultBlockValidator) getAndValidateFromActor(ctx context.Context, msg *types.UnsignedMessage, parents block.TipSetKey) (*actor.Actor, error) { - actor, err := dv.cs.GetActorAt(ctx, parents, msg.From) - if err != nil { - return nil, err - } - - // ensure actor is an account actor - if !actor.Code.Equals(builtin.AccountActorCodeID) { - return nil, errors.New("sent from non-account actor") - } - - return actor, nil -} - -func (dv *DefaultBlockValidator) validateMessage(msg *types.UnsignedMessage, expectedCallSeqNum map[address.Address]uint64, fromActor *actor.Actor) error { - callSeq, ok := expectedCallSeqNum[msg.From] - if !ok { - callSeq = fromActor.CallSeqNum - } - - // ensure message is in the correct order - if callSeq != msg.CallSeqNum { - return fmt.Errorf("callseqnum (%d) out of order (expected %d) from %s", msg.CallSeqNum, callSeq, msg.From) - } - - expectedCallSeqNum[msg.From] = callSeq + 1 - return nil -} - -// ValidateSyntax validates a single block is correctly formed. -// TODO this is an incomplete implementation #3277 -func (dv *DefaultBlockValidator) ValidateSyntax(ctx context.Context, blk *block.Block) error { - // TODO special handling for genesis block #3121 - if blk.Height == 0 { - return nil - } - err := dv.NotFutureBlock(blk) - if err != nil { - return err - } - err = dv.TimeMatchesEpoch(blk) - if err != nil { - return err - } - if !blk.StateRoot.Defined() { - return fmt.Errorf("block %s has nil StateRoot", blk.Cid()) - } - if blk.Miner.Empty() { - return fmt.Errorf("block %s has nil miner address", blk.Cid()) - } - if len(blk.Ticket.VRFProof) == 0 { - return fmt.Errorf("block %s has nil ticket", blk.Cid()) - } - if blk.BlockSig == nil { - return fmt.Errorf("block %s has nil signature", blk.Cid()) - } - - //TODO: validate all the messages syntax - - return nil -} diff --git a/internal/pkg/consensus/block_validation_test.go b/internal/pkg/consensus/block_validation_test.go deleted file mode 100644 index c82957e94d..0000000000 --- a/internal/pkg/consensus/block_validation_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package consensus_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" -) - -func TestBlockValidHeaderSemantic(t *testing.T) { - tf.UnitTest(t) - - blockTime := clock.DefaultEpochDuration - ts := time.Unix(1234567890, 0) - genTime := ts - mclock := clock.NewChainClockFromClock(uint64(genTime.Unix()), blockTime, clock.DefaultPropagationDelay, clock.NewFake(ts)) - ctx := context.Background() - - validator := consensus.NewDefaultBlockValidator(mclock, nil, nil) - - t.Run("reject block with same height as parents", func(t *testing.T) { - // passes with valid height - c := &block.Block{Height: 2, Timestamp: uint64(ts.Add(blockTime).Unix())} - p := &block.Block{Height: 1, Timestamp: uint64(ts.Unix())} - parents := consensus.RequireNewTipSet(require.New(t), p) - require.NoError(t, validator.ValidateHeaderSemantic(ctx, c, parents)) - - // invalidate parent by matching child height - p = &block.Block{Height: 2, Timestamp: uint64(ts.Unix())} - parents = consensus.RequireNewTipSet(require.New(t), p) - - err := validator.ValidateHeaderSemantic(ctx, c, parents) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid height") - }) -} - -func TestBlockValidMessageSemantic(t *testing.T) { - tf.UnitTest(t) - - blockTime := clock.DefaultEpochDuration - ts := time.Unix(1234567890, 0) - genTime := ts - mclock := clock.NewChainClockFromClock(uint64(genTime.Unix()), blockTime, clock.DefaultPropagationDelay, clock.NewFake(ts)) - ctx := context.Background() - - c := &block.Block{Height: 2, Timestamp: uint64(ts.Add(blockTime).Unix())} - p := &block.Block{Height: 1, Timestamp: uint64(ts.Unix())} - parents := consensus.RequireNewTipSet(require.New(t), p) - - msg0 := &types.UnsignedMessage{From: address.TestAddress, CallSeqNum: 1} - msg1 := &types.UnsignedMessage{From: address.TestAddress, CallSeqNum: 2} - msg2 := &types.UnsignedMessage{From: address.TestAddress, CallSeqNum: 3} - msg3 := &types.UnsignedMessage{From: address.TestAddress, CallSeqNum: 4} - - t.Run("rejects block with message from missing actor", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg1}, - }, &fakeChainState{ - err: blockstore.ErrNotFound, - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - }) - - t.Run("rejects block with message from non-account actor", func(t *testing.T) { - actor := newActor(t, 0, 2) - - // set invalid code - actor.Code = e.NewCid(builtin.RewardActorCodeID) - - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg1}, - }, &fakeChainState{ - actor: actor, - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "non-account actor") - }) - - t.Run("accepts block with bls messages in monotonic sequence", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg1, msg2, msg3}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.NoError(t, err) - }) - - t.Run("accepts block with secp messages in monotonic sequence", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - secpMessages: []*types.SignedMessage{{Message: *msg1}, {Message: *msg2}, {Message: *msg3}}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.NoError(t, err) - }) - - t.Run("rejects block with messages out of order", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg1, msg3, msg2}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "out of order") - }) - - t.Run("rejects block with gaps", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg1, msg3}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "out of order") - }) - - t.Run("rejects block with bls message with nonce too low", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg0}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "out of order") - }) - - t.Run("rejects block with secp message with nonce too low", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - secpMessages: []*types.SignedMessage{{Message: *msg0}}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "out of order") - }) - - t.Run("rejects block with message too high", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg2}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "out of order") - }) - - t.Run("rejects secp message < bls messages", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - secpMessages: []*types.SignedMessage{{Message: *msg1}}, - blsMessages: []*types.UnsignedMessage{msg2}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.Error(t, err) - require.Contains(t, err.Error(), "out of order") - }) - - t.Run("accepts bls message < secp messages", func(t *testing.T) { - validator := consensus.NewDefaultBlockValidator(mclock, &fakeMsgSource{ - blsMessages: []*types.UnsignedMessage{msg1}, - secpMessages: []*types.SignedMessage{{Message: *msg2}}, - }, &fakeChainState{ - actor: newActor(t, 0, 2), - }) - - err := validator.ValidateMessagesSemantic(ctx, c, parents.Key()) - require.NoError(t, err) - }) -} - -func TestMismatchedTime(t *testing.T) { - tf.UnitTest(t) - - blockTime := clock.DefaultEpochDuration - genTime := time.Unix(1234567890, 1234567890%int64(time.Second)) - fc := clock.NewFake(genTime) - mclock := clock.NewChainClockFromClock(uint64(genTime.Unix()), blockTime, clock.DefaultPropagationDelay, fc) - validator := consensus.NewDefaultBlockValidator(mclock, nil, nil) - - fc.Advance(blockTime) - - // Passes with correct timestamp - c := &block.Block{Height: 1, Timestamp: uint64(fc.Now().Unix())} - require.NoError(t, validator.TimeMatchesEpoch(c)) - - // fails with invalid timestamp - c = &block.Block{Height: 1, Timestamp: uint64(genTime.Unix())} - err := validator.TimeMatchesEpoch(c) - assert.Error(t, err) - assert.Contains(t, err.Error(), "wrong epoch") -} - -func TestFutureEpoch(t *testing.T) { - tf.UnitTest(t) - - blockTime := clock.DefaultEpochDuration - genTime := time.Unix(1234567890, 1234567890%int64(time.Second)) - fc := clock.NewFake(genTime) - mclock := clock.NewChainClockFromClock(uint64(genTime.Unix()), blockTime, clock.DefaultPropagationDelay, fc) - validator := consensus.NewDefaultBlockValidator(mclock, nil, nil) - - // Fails in future epoch - c := &block.Block{Height: 1, Timestamp: uint64(genTime.Add(blockTime).Unix())} - err := validator.NotFutureBlock(c) - assert.Error(t, err) - assert.Contains(t, err.Error(), "future epoch") -} - -func TestBlockValidSyntax(t *testing.T) { - tf.UnitTest(t) - - blockTime := clock.DefaultEpochDuration - ts := time.Unix(1234567890, 0) - mclock := clock.NewFake(ts) - chainClock := clock.NewChainClockFromClock(uint64(ts.Unix()), blockTime, clock.DefaultPropagationDelay, mclock) - ctx := context.Background() - mclock.Advance(blockTime) - - validator := consensus.NewDefaultBlockValidator(chainClock, nil, nil) - - validTs := uint64(mclock.Now().Unix()) - validSt := e.NewCid(types.NewCidForTestGetter()()) - validAd := vmaddr.NewForTestGetter()() - validTi := block.Ticket{VRFProof: []byte{1}} - // create a valid block - blk := &block.Block{ - Timestamp: validTs, - StateRoot: validSt, - Miner: validAd, - Ticket: validTi, - Height: 1, - - BlockSig: &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: []byte{0x3}, - }, - } - require.NoError(t, validator.ValidateSyntax(ctx, blk)) - - // below we will invalidate each part of the block, assert that it fails - // validation, then revalidate the block - - // invalidate timestamp - blk.Timestamp = uint64(ts.Add(time.Duration(3) * blockTime).Unix()) - require.Error(t, validator.ValidateSyntax(ctx, blk)) - blk.Timestamp = validTs - require.NoError(t, validator.ValidateSyntax(ctx, blk)) - - // invalidate stateroot - blk.StateRoot = e.NewCid(cid.Undef) - require.Error(t, validator.ValidateSyntax(ctx, blk)) - blk.StateRoot = validSt - require.NoError(t, validator.ValidateSyntax(ctx, blk)) - - // invalidate miner address - blk.Miner = address.Undef - require.Error(t, validator.ValidateSyntax(ctx, blk)) - blk.Miner = validAd - require.NoError(t, validator.ValidateSyntax(ctx, blk)) - - // invalidate ticket - blk.Ticket = block.Ticket{} - require.Error(t, validator.ValidateSyntax(ctx, blk)) - blk.Ticket = validTi - require.NoError(t, validator.ValidateSyntax(ctx, blk)) - -} - -type fakeMsgSource struct { - blsMessages []*types.UnsignedMessage - secpMessages []*types.SignedMessage -} - -func (fms *fakeMsgSource) LoadMessages(context.Context, cid.Cid) ([]*types.SignedMessage, []*types.UnsignedMessage, error) { - return fms.secpMessages, fms.blsMessages, nil -} - -func (fms *fakeMsgSource) LoadReceipts(context.Context, cid.Cid) ([]vm.MessageReceipt, error) { - return nil, nil -} - -type fakeChainState struct { - actor *actor.Actor - err error -} - -func (fcs *fakeChainState) GetActorAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address) (*actor.Actor, error) { - return fcs.actor, fcs.err -} diff --git a/internal/pkg/consensus/chain_selector.go b/internal/pkg/consensus/chain_selector.go deleted file mode 100644 index 8a7f5d8a19..0000000000 --- a/internal/pkg/consensus/chain_selector.go +++ /dev/null @@ -1,123 +0,0 @@ -package consensus - -// This is to implement Expected Consensus protocol -// See: https://github.com/filecoin-project/specs/blob/master/expected-consensus.md - -import ( - "bytes" - "context" - "errors" - "strings" - - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -var ( - wRatioNum = fbig.NewInt(1) - wRatioDen = fbig.NewInt(2) - wPrecision = fbig.NewInt(256) -) - -// ChainSelector weighs and compares chains. -type ChainSelector struct { - cstore cbor.IpldStore - state StateViewer - genesisCid cid.Cid -} - -// NewChainSelector is the constructor for chain selection module. -func NewChainSelector(cs cbor.IpldStore, state StateViewer, gCid cid.Cid) *ChainSelector { - return &ChainSelector{ - cstore: cs, - state: state, - genesisCid: gCid, - } -} - -func log2b(x fbig.Int) fbig.Int { - bits := x.BitLen() - return fbig.NewInt(int64(bits - 1)) -} - -// Weight returns the EC weight of this TipSet as a filecoin big int. -func (c *ChainSelector) Weight(ctx context.Context, ts block.TipSet, pStateID cid.Cid) (fbig.Int, error) { - // Retrieve parent weight. - parentWeight, err := ts.ParentWeight() - if err != nil { - return fbig.Zero(), err - } - if !pStateID.Defined() { - return fbig.Zero(), errors.New("undefined state passed to chain selector new weight") - } - powerTableView := NewPowerTableView(c.state.PowerStateView(pStateID), c.state.FaultStateView(pStateID)) - networkPower, err := powerTableView.NetworkTotalPower(ctx) - if err != nil { - return fbig.Zero(), err - } - powerMeasure := log2b(networkPower) - - wPowerFactor := fbig.Mul(wPrecision, powerMeasure) - wBlocksFactorNum := fbig.Mul(wRatioNum, fbig.Mul(powerMeasure, fbig.NewInt(int64(ts.Len())))) - wBlocksFactorDen := fbig.Mul(wRatioDen, fbig.NewInt(int64(expectedLeadersPerEpoch))) - wBlocksFactor := fbig.Div(fbig.Mul(wBlocksFactorNum, wPrecision), wBlocksFactorDen) - deltaWeight := fbig.Add(wPowerFactor, wBlocksFactor) - - return fbig.Add(parentWeight, deltaWeight), nil -} - -// IsHeavier returns true if tipset a is heavier than tipset b, and false -// vice versa. In the rare case where two tipsets have the same weight ties -// are broken by taking the tipset with the smallest ticket. In the event that -// tickets are the same, IsHeavier will break ties by comparing the -// concatenation of block cids in the tipset. -// TODO BLOCK CID CONCAT TIE BREAKER IS NOT IN THE SPEC AND SHOULD BE -// EVALUATED BEFORE GETTING TO PRODUCTION. -func (c *ChainSelector) IsHeavier(ctx context.Context, a, b block.TipSet, aStateID, bStateID cid.Cid) (bool, error) { - aW, err := c.Weight(ctx, a, aStateID) - if err != nil { - return false, err - } - bW, err := c.Weight(ctx, b, bStateID) - if err != nil { - return false, err - } - // Without ties pass along the comparison. - if !aW.Equals(bW) { - return aW.GreaterThan(bW), nil - } - - // To break ties compare the min tickets. - aTicket, err := a.MinTicket() - if err != nil { - return false, err - } - bTicket, err := b.MinTicket() - if err != nil { - return false, err - } - - cmp := bytes.Compare(bTicket.VRFProof, aTicket.VRFProof) - if cmp != 0 { - // a is heavier if b's ticket is greater than a's ticket. - return cmp == 1, nil - } - - // Tie break on cid ids. - // TODO: I think this is drastically impacted by number of blocks in tipset - // i.e. bigger tipset is always heavier. Not sure if this is ok, need to revist. - cmp = strings.Compare(a.String(), b.String()) - if cmp == 0 { - // Caller is mistakenly calling on two identical tipsets. - return false, ErrUnorderedTipSets - } - return cmp == 1, nil -} - -func (c *ChainSelector) loadStateTree(ctx context.Context, id cid.Cid) (*state.State, error) { - return state.LoadState(ctx, c.cstore, id) -} diff --git a/internal/pkg/consensus/election.go b/internal/pkg/consensus/election.go deleted file mode 100644 index c15b9e776c..0000000000 --- a/internal/pkg/consensus/election.go +++ /dev/null @@ -1,261 +0,0 @@ -package consensus - -import ( - "context" - "fmt" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/minio/blake2b-simd" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// Interface to PoSt verification. -type EPoStVerifier interface { - // VerifyWinningPoSt verifies an election PoSt. - VerifyWinningPoSt(ctx context.Context, post abi.WinningPoStVerifyInfo) (bool, error) - GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) -} - -type SectorsStateView interface { - MinerSectorConfiguration(ctx context.Context, maddr address.Address) (*state.MinerSectorConfiguration, error) - MinerSectorStates(ctx context.Context, maddr address.Address) (*state.MinerSectorStates, error) - MinerGetSector(ctx context.Context, maddr address.Address, sectorNum abi.SectorNumber) (*miner.SectorOnChainInfo, bool, error) -} - -// ElectionMachine generates and validates PoSt partial tickets and PoSt proofs. -type ElectionMachine struct{} - -func NewElectionMachine(_ ChainRandomness) *ElectionMachine { - return &ElectionMachine{} -} - -func (em ElectionMachine) GenerateElectionProof(ctx context.Context, entry *drand.Entry, - epoch abi.ChainEpoch, miner address.Address, worker address.Address, signer types.Signer) (crypto.VRFPi, error) { - randomness, err := electionVRFRandomness(entry, miner, epoch) - if err != nil { - return nil, errors.Wrap(err, "failed to generate election randomness randomness") - } - vrfProof, err := signer.SignBytes(ctx, randomness, worker) - if err != nil { - return nil, errors.Wrap(err, "failed to sign election post randomness") - } - return vrfProof.Data, nil -} - -// GenerateWinningPoSt creates a PoSt proof over the input miner ID and sector infos. -func (em ElectionMachine) GenerateWinningPoSt(ctx context.Context, entry *drand.Entry, epoch abi.ChainEpoch, ep postgenerator.PoStGenerator, maddr address.Address, sectors SectorsStateView) ([]block.PoStProof, error) { - entropy, err := encoding.Encode(maddr) - if err != nil { - return nil, err - } - - seed := blake2b.Sum256(entry.Data) - randomness, err := crypto.BlendEntropy(acrypto.DomainSeparationTag_WinningPoStChallengeSeed, seed[:], epoch, entropy) - - if err != nil { - return nil, err - } - poStRandomness := abi.PoStRandomness(randomness) - - minerIDuint64, err := address.IDFromAddress(maddr) - if err != nil { - return nil, err - } - minerID := abi.ActorID(minerIDuint64) - - challengedSectorInfos, err := computeWinningPoStSectorChallenges(ctx, sectors, maddr, poStRandomness) - if err != nil { - return nil, err - } - - posts, err := ep.GenerateWinningPoSt(ctx, minerID, challengedSectorInfos, poStRandomness) - if err != nil { - return nil, err - } - - return block.FromABIPoStProofs(posts...), nil -} - -func (em ElectionMachine) VerifyElectionProof(_ context.Context, entry *drand.Entry, epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, vrfProof crypto.VRFPi) error { - randomness, err := electionVRFRandomness(entry, miner, epoch) - if err != nil { - return errors.Wrap(err, "failed to reproduce election randomness") - } - - return crypto.ValidateBlsSignature(randomness, workerSigner, vrfProof) -} - -// IsWinner returns true if the input challengeTicket wins the election -func (em ElectionMachine) IsWinner(challengeTicket []byte, minerPower, networkPower abi.StoragePower) bool { - // (ChallengeTicket / MaxChallengeTicket) < ExpectedLeadersPerEpoch * (MinerPower / NetworkPower) - // -> - // ChallengeTicket * NetworkPower < ExpectedLeadersPerEpoch * MinerPower * MaxChallengeTicket - - lhs := big.PositiveFromUnsignedBytes(challengeTicket[:]) - lhs = big.Mul(lhs, networkPower) - - rhs := big.Lsh(minerPower, challengeBits) - rhs = big.Mul(rhs, big.NewInt(expectedLeadersPerEpoch)) - - return big.Cmp(lhs, rhs) < 0 -} - -// VerifyWinningPoSt verifies a Winning PoSt proof. -func (em ElectionMachine) VerifyWinningPoSt(ctx context.Context, ep EPoStVerifier, seedEntry *drand.Entry, epoch abi.ChainEpoch, proofs []block.PoStProof, mIDAddr address.Address, sectors SectorsStateView) (bool, error) { - if len(proofs) == 0 { - return false, nil - } - - entropy, err := encoding.Encode(mIDAddr) - if err != nil { - return false, err - } - - seed := blake2b.Sum256(seedEntry.Data) - randomness, err := crypto.BlendEntropy(acrypto.DomainSeparationTag_WinningPoStChallengeSeed, seed[:], epoch, entropy) - if err != nil { - return false, err - } - poStRandomness := abi.PoStRandomness(randomness) - - minerIDuint64, err := address.IDFromAddress(mIDAddr) - if err != nil { - return false, err - } - minerID := abi.ActorID(minerIDuint64) - - challengedSectorInfos, err := computeWinningPoStSectorChallenges(ctx, sectors, mIDAddr, poStRandomness) - if err != nil { - return false, err - } - - proofsPrime := make([]abi.PoStProof, len(proofs)) - for idx := range proofsPrime { - proofsPrime[idx] = abi.PoStProof{ - RegisteredProof: proofs[idx].RegisteredProof, - ProofBytes: proofs[idx].ProofBytes, - } - } - - verifyInfo := abi.WinningPoStVerifyInfo{ - Randomness: poStRandomness, - Proofs: proofsPrime, - ChallengedSectors: challengedSectorInfos, - Prover: minerID, - } - return ep.VerifyWinningPoSt(ctx, verifyInfo) -} - -// Loads infos for sectors challenged by a Winning PoSt. -func computeWinningPoStSectorChallenges(ctx context.Context, sectors SectorsStateView, mIDAddr address.Address, poStRandomness abi.PoStRandomness) ([]abi.SectorInfo, error) { - provingSet, err := computeProvingSet(ctx, sectors, mIDAddr) - if err != nil { - return nil, err - } - sectorCount, err := provingSet.Count() - if err != nil { - return nil, err - } - - conf, err := sectors.MinerSectorConfiguration(ctx, mIDAddr) - if err != nil { - return nil, err - } - rp, err := conf.SealProofType.RegisteredWinningPoStProof() - if err != nil { - return nil, err - } - - minerIDuint64, err := address.IDFromAddress(mIDAddr) - if err != nil { - return nil, err - } - minerID := abi.ActorID(minerIDuint64) - - challengeIndexes, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(ctx, rp, minerID, poStRandomness, sectorCount) - if err != nil { - return nil, err - } - challengedSectorInfos, err := loadChallengedSectors(ctx, sectors, mIDAddr, provingSet, challengeIndexes) - if err != nil { - return nil, err - } - return challengedSectorInfos, nil -} - -// Computes the set of sectors that may be challenged by Winning PoSt for a miner. -func computeProvingSet(ctx context.Context, sectors SectorsStateView, maddr address.Address) (*abi.BitField, error) { - sectorStates, err := sectors.MinerSectorStates(ctx, maddr) - if err != nil { - return nil, err - } - - pset, err := abi.BitFieldUnion(sectorStates.Deadlines...) - if err != nil { - return nil, err - } - - // Exclude sectors declared faulty. - // Recoveries are a subset of faults, so not needed explicitly here. - pset, err = bitfield.SubtractBitField(pset, sectorStates.Faults) - if err != nil { - return nil, err - } - - // Include new sectors. - // This is to replicate existing incorrect behaviour in Lotus. - // https://github.com/filecoin-project/go-filecoin/issues/4141 - pset, err = bitfield.MergeBitFields(pset, sectorStates.NewSectors) - return pset, err -} - -func loadChallengedSectors(ctx context.Context, sectors SectorsStateView, maddr address.Address, provingSet *abi.BitField, challengeIndexes []uint64) ([]abi.SectorInfo, error) { - challengedSectorInfos := make([]abi.SectorInfo, len(challengeIndexes)) - for i, ci := range challengeIndexes { - // TODO: replace Slice()+First() with provingSet.Get(ci) when it exists. - sectorNums, err := provingSet.Slice(ci, 1) - if err != nil { - return nil, err - } - sectorNum, err := sectorNums.First() - if err != nil { - return nil, err - } - si, found, err := sectors.MinerGetSector(ctx, maddr, abi.SectorNumber(sectorNum)) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("no sector %d challenging %d", sectorNum, ci) - } - challengedSectorInfos[i] = abi.SectorInfo{ - RegisteredProof: si.Info.RegisteredProof, - SectorNumber: si.Info.SectorNumber, - SealedCID: si.Info.SealedCID, - } - } - return challengedSectorInfos, nil -} - -func electionVRFRandomness(entry *drand.Entry, miner address.Address, epoch abi.ChainEpoch) (abi.Randomness, error) { - entropy, err := encoding.Encode(miner) - if err != nil { - return nil, errors.Wrapf(err, "failed to encode entropy") - } - seed := blake2b.Sum256(entry.Data) - return crypto.BlendEntropy(acrypto.DomainSeparationTag_ElectionProofProduction, seed[:], epoch, entropy) -} diff --git a/internal/pkg/consensus/expected.go b/internal/pkg/consensus/expected.go deleted file mode 100644 index 56847e6ed6..0000000000 --- a/internal/pkg/consensus/expected.go +++ /dev/null @@ -1,446 +0,0 @@ -package consensus - -import ( - "context" - "time" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - cid "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/pkg/errors" - "go.opencensus.io/trace" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics/tracing" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -var ( - // ErrStateRootMismatch is returned when the computed state root doesn't match the expected result. - ErrStateRootMismatch = errors.New("blocks state root does not match computed result") - // ErrUnorderedTipSets is returned when weight and minticket are the same between two tipsets. - ErrUnorderedTipSets = errors.New("trying to order two identical tipsets") - // ErrReceiptRootMismatch is returned when the block's receipt root doesn't match the receipt root computed for the parent tipset. - ErrReceiptRootMismatch = errors.New("blocks receipt root does not match parent tip set") -) - -// challengeBits is the number of bits in the challenge ticket's domain -const challengeBits = 256 - -// expectedLeadersPerEpoch is the mean number of leaders per epoch -const expectedLeadersPerEpoch = 5 - -// WinningPoStSectorSetLookback is the past epoch offset for reading the -// winning post sector set -const WinningPoStSectorSetLookback = 10 - -// ElectionPowerTableLookback is the past epoch offset for reading the -// election power values -const ElectionPowerTableLookback = 10 - -// DRANDEpochLookback is the past filecoin epoch offset at which DRAND entries -// in that epoch should be included in a block. -const DRANDEpochLookback = 2 - -// A Processor processes all the messages in a block or tip set. -type Processor interface { - // ProcessTipSet processes all messages in a tip set. - ProcessTipSet(context.Context, state.Tree, vm.Storage, block.TipSet, []vm.BlockMessagesInfo) ([]vm.MessageReceipt, error) -} - -// TicketValidator validates that an input ticket is valid. -type TicketValidator interface { - IsValidTicket(ctx context.Context, base block.TipSetKey, entry *drand.Entry, newPeriod bool, epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket block.Ticket) error -} - -// ElectionValidator validates that an election fairly produced a winner. -type ElectionValidator interface { - IsWinner(challengeTicket []byte, minerPower, networkPower abi.StoragePower) bool - VerifyElectionProof(ctx context.Context, entry *drand.Entry, epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, vrfProof crypto.VRFPi) error - VerifyWinningPoSt(ctx context.Context, ep EPoStVerifier, seedEntry *drand.Entry, epoch abi.ChainEpoch, proofs []block.PoStProof, mIDAddr address.Address, sectors SectorsStateView) (bool, error) -} - -// StateViewer provides views into the chain state. -type StateViewer interface { - PowerStateView(root cid.Cid) PowerStateView - FaultStateView(root cid.Cid) FaultStateView -} - -type chainReader interface { - GetTipSet(tsKey block.TipSetKey) (block.TipSet, error) - GetTipSetStateRoot(tsKey block.TipSetKey) (cid.Cid, error) -} - -// Expected implements expected consensus. -type Expected struct { - // ElectionValidator validates election proofs. - ElectionValidator - - // TicketValidator validates ticket generation - TicketValidator - - // cstore is used for loading state trees during message running. - cstore cbor.IpldStore - - // bstore contains data referenced by actors within the state - // during message running. Additionally bstore is used for - // accessing the power table. - bstore blockstore.Blockstore - - // chainState is a reference to the current chain state - chainState chainReader - - // processor is what we use to process messages and pay rewards - processor Processor - - // state produces snapshots - state StateViewer - - blockTime time.Duration - - // postVerifier verifies PoSt proofs and associated data - postVerifier EPoStVerifier - - clock clock.ChainEpochClock - drand drand.IFace -} - -// Ensure Expected satisfies the Protocol interface at compile time. -var _ Protocol = (*Expected)(nil) - -// NewExpected is the constructor for the Expected consenus.Protocol module. -func NewExpected(cs cbor.IpldStore, bs blockstore.Blockstore, processor Processor, state StateViewer, bt time.Duration, - ev ElectionValidator, tv TicketValidator, pv EPoStVerifier, chainState chainReader, clock clock.ChainEpochClock, drand drand.IFace) *Expected { - return &Expected{ - cstore: cs, - blockTime: bt, - bstore: bs, - processor: processor, - state: state, - ElectionValidator: ev, - TicketValidator: tv, - postVerifier: pv, - chainState: chainState, - clock: clock, - drand: drand, - } -} - -// BlockTime returns the block time used by the consensus protocol. -func (c *Expected) BlockTime() time.Duration { - return c.blockTime -} - -// RunStateTransition applies the messages in a tipset to a state, and persists that new state. -// It errors if the tipset was not mined according to the EC rules, or if any of the messages -// in the tipset results in an error. -func (c *Expected) RunStateTransition(ctx context.Context, ts block.TipSet, blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage, - parentWeight big.Int, parentStateRoot cid.Cid, parentReceiptRoot cid.Cid) (root cid.Cid, receipts []vm.MessageReceipt, err error) { - ctx, span := trace.StartSpan(ctx, "Expected.RunStateTransition") - span.AddAttributes(trace.StringAttribute("tipset", ts.String())) - defer tracing.AddErrorEndSpan(ctx, span, &err) - - if err := c.validateMining(ctx, ts, parentStateRoot, blsMessages, secpMessages, parentWeight, parentReceiptRoot); err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - - priorState, err := c.loadStateTree(ctx, parentStateRoot) - if err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - vms := vm.NewStorage(c.bstore) - var newState state.Tree - newState, receipts, err = c.runMessages(ctx, priorState, vms, ts, blsMessages, secpMessages) - if err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - err = vms.Flush() - if err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - - root, err = newState.Commit(ctx) - if err != nil { - return cid.Undef, []vm.MessageReceipt{}, err - } - return root, receipts, err -} - -// validateMining checks validity of the ticket, proof, signature and miner -// address of every block in the tipset. -func (c *Expected) validateMining(ctx context.Context, - ts block.TipSet, - parentStateRoot cid.Cid, - blsMsgs [][]*types.UnsignedMessage, - secpMsgs [][]*types.SignedMessage, - parentWeight big.Int, - parentReceiptRoot cid.Cid) error { - - keyStateView := c.state.PowerStateView(parentStateRoot) - sigValidator := appstate.NewSignatureValidator(keyStateView) - faultsStateView := c.state.FaultStateView(parentStateRoot) - keyPowerTable := NewPowerTableView(keyStateView, faultsStateView) - - tsHeight, err := ts.Height() - if err != nil { - return errors.Wrap(err, "could not get new tipset's height") - } - - sectorSetAncestor, err := chain.FindTipsetAtEpoch(ctx, ts, tsHeight-WinningPoStSectorSetLookback, c.chainState) - if err != nil { - return errors.Wrap(err, "failed to find sector set lookback ancestor") - } - sectorSetStateRoot, err := c.chainState.GetTipSetStateRoot(sectorSetAncestor.Key()) - if err != nil { - return errors.Wrap(err, "failed to get state root for sectorSet ancestor") - } - sectorSetStateView := c.state.PowerStateView(sectorSetStateRoot) - - electionPowerAncestor, err := chain.FindTipsetAtEpoch(ctx, ts, tsHeight-ElectionPowerTableLookback, c.chainState) - if err != nil { - return errors.Wrap(err, "failed to find election power lookback ancestor") - } - electionPowerStateRoot, err := c.chainState.GetTipSetStateRoot(electionPowerAncestor.Key()) - if err != nil { - return errors.Wrap(err, "failed to get state root for election power ancestor") - } - electionPowerStateView := c.state.PowerStateView(electionPowerStateRoot) - electionPowerTable := NewPowerTableView(electionPowerStateView, faultsStateView) - - for i := 0; i < ts.Len(); i++ { - blk := ts.At(i) - - // confirm block state root matches parent state root - if !parentStateRoot.Equals(blk.StateRoot.Cid) { - return ErrStateRootMismatch - } - - // confirm block receipts match parent receipts - if !parentReceiptRoot.Equals(blk.MessageReceipts.Cid) { - return ErrReceiptRootMismatch - } - - if !parentWeight.Equals(blk.ParentWeight) { - return errors.Errorf("block %s has invalid parent weight %d expected %d", blk.Cid().String(), blk.ParentWeight, parentWeight) - } - workerAddr, err := keyPowerTable.WorkerAddr(ctx, blk.Miner) - if err != nil { - return errors.Wrap(err, "failed to read worker address of block miner") - } - workerSignerAddr, err := keyPowerTable.SignerAddress(ctx, workerAddr) - if err != nil { - return errors.Wrapf(err, "failed to convert address, %s, to a signing address", workerAddr.String()) - } - // Validate block signature - if blk.BlockSig == nil { - return errors.Errorf("invalid nil block signature") - } - if err := crypto.ValidateSignature(blk.SignatureData(), workerSignerAddr, *blk.BlockSig); err != nil { - return errors.Wrap(err, "block signature invalid") - } - - // Verify that the BLS signature aggregate is correct - if err := sigValidator.ValidateBLSMessageAggregate(ctx, blsMsgs[i], blk.BLSAggregateSig); err != nil { - return errors.Wrapf(err, "bls message verification failed for block %s", blk.Cid()) - } - - // Verify that all secp message signatures are correct - for i, msg := range secpMsgs[i] { - if err := sigValidator.ValidateMessageSignature(ctx, msg); err != nil { - return errors.Wrapf(err, "invalid signature for secp message %d in block %s", i, blk.Cid()) - } - } - - err = c.validateDRANDEntries(ctx, blk) - if err != nil { - return errors.Wrapf(err, "invalid DRAND entries") - } - - electionEntry, err := c.electionEntry(ctx, blk) - if err != nil { - return errors.Wrapf(err, "failed to get election entry") - } - err = c.VerifyElectionProof(ctx, electionEntry, blk.Height, blk.Miner, workerSignerAddr, blk.ElectionProof.VRFProof) - if err != nil { - return errors.Wrapf(err, "failed to verify election proof") - } - // TODO this is not using nominal power, which must take into account undeclared faults - // TODO the nominal power must be tested against the minimum (power.minerNominalPowerMeetsConsensusMinimum) - // See https://github.com/filecoin-project/go-filecoin/issues/3958 - minerPower, err := electionPowerTable.MinerClaimedPower(ctx, blk.Miner) - if err != nil { - return errors.Wrap(err, "failed to read miner claim from power table") - } - networkPower, err := electionPowerTable.NetworkTotalPower(ctx) - if err != nil { - return errors.Wrap(err, "failed to read power table") - } - electionVRFDigest := blk.ElectionProof.VRFProof.Digest() - wins := c.IsWinner(electionVRFDigest[:], minerPower, networkPower) - if !wins { - return errors.Errorf("Block did not win election") - } - - valid, err := c.VerifyWinningPoSt(ctx, c.postVerifier, electionEntry, blk.Height, blk.PoStProofs, blk.Miner, sectorSetStateView) - if err != nil { - return errors.Wrapf(err, "failed verifying winning post") - } - if !valid { - return errors.Errorf("Invalid winning post") - } - - // Ticket was correctly generated by miner - sampleEpoch := blk.Height - miner.ElectionLookback - newPeriod := len(blk.BeaconEntries) > 0 - if err := c.IsValidTicket(ctx, blk.Parents, electionEntry, newPeriod, sampleEpoch, blk.Miner, workerSignerAddr, blk.Ticket); err != nil { - return errors.Wrapf(err, "invalid ticket: %s in block %s", blk.Ticket.String(), blk.Cid()) - } - } - return nil -} - -func (c *Expected) validateDRANDEntries(ctx context.Context, blk *block.Block) error { - targetEpoch := blk.Height - DRANDEpochLookback - parent, err := c.chainState.GetTipSet(blk.Parents) - if err != nil { - return err - } - - numEntries := len(blk.BeaconEntries) - // Note we don't check for genesis condition because first block must include > 0 drand entries - if numEntries == 0 { - prevEntry, err := chain.FindLatestDRAND(ctx, parent, c.chainState) - if err != nil { - return err - } - nextDRANDTime := c.drand.StartTimeOfRound(prevEntry.Round + drand.Round(1)) - if c.clock.EpochAtTime(nextDRANDTime) > targetEpoch { - return nil - } - return errors.New("Block missing required DRAND entry") - } - - lastRound := blk.BeaconEntries[numEntries-1].Round - nextDRANDTime := c.drand.StartTimeOfRound(lastRound + 1) - - if !(c.clock.EpochAtTime(nextDRANDTime) > targetEpoch) { - return errors.New("Block does not include all drand entries required") - } - - // Validate that DRAND entries link up - // Detect case where we have just mined with genesis block as parent - parentHeight, err := parent.Height() - if err != nil { - return err - } - // No prevEntry in first block so this is skipped first time around - if parentHeight != abi.ChainEpoch(0) { - prevEntry, err := chain.FindLatestDRAND(ctx, parent, c.chainState) - if err != nil { - return err - } - valid, err := c.drand.VerifyEntry(prevEntry, blk.BeaconEntries[0]) - if err != nil { - return err - } - if !valid { - return errors.Errorf("invalid DRAND link rounds %d and %d", prevEntry.Round, blk.BeaconEntries[0].Round) - } - } - for i := 0; i < numEntries-1; i++ { - valid, err := c.drand.VerifyEntry(blk.BeaconEntries[i], blk.BeaconEntries[i+1]) - if err != nil { - return err - } - if !valid { - return errors.Errorf("invalid DRAND link rounds %d and %d", blk.BeaconEntries[i].Round, blk.BeaconEntries[i+1].Round) - } - } - - return nil - -} - -func (c *Expected) electionEntry(ctx context.Context, blk *block.Block) (*drand.Entry, error) { - if len(blk.BeaconEntries) > 0 { - return blk.BeaconEntries[len(blk.BeaconEntries)-1], nil - } - - parent, err := c.chainState.GetTipSet(blk.Parents) - if err != nil { - return nil, err - } - return chain.FindLatestDRAND(ctx, parent, c.chainState) -} - -// runMessages applies the messages of all blocks within the input -// tipset to the input base state. Messages are extracted from tipset -// blocks sorted by their ticket bytes and run as a single state transition -// for the entire tipset. The output state must be flushed after calling to -// guarantee that the state transitions propagate. -// Messages that fail to apply are dropped on the floor (and no receipt is emitted). -func (c *Expected) runMessages(ctx context.Context, st state.Tree, vms vm.Storage, ts block.TipSet, - blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage) (state.Tree, []vm.MessageReceipt, error) { - msgs := []vm.BlockMessagesInfo{} - - // build message information per block - for i := 0; i < ts.Len(); i++ { - blk := ts.At(i) - - messageCount := len(blsMessages[i]) + len(secpMessages[i]) - if messageCount > block.BlockMessageLimit { - return nil, nil, errors.Errorf("Number of messages in block %s is %d which exceeds block message limit", blk.Cid(), messageCount) - } - - msgInfo := vm.BlockMessagesInfo{ - BLSMessages: blsMessages[i], - SECPMessages: secpMessages[i], - Miner: blk.Miner, - } - - msgs = append(msgs, msgInfo) - } - - // process tipset - receipts, err := c.processor.ProcessTipSet(ctx, st, vms, ts, msgs) - if err != nil { - return nil, nil, errors.Wrap(err, "error validating tipset") - } - - return st, receipts, nil -} - -func (c *Expected) loadStateTree(ctx context.Context, id cid.Cid) (*state.State, error) { - return state.LoadState(ctx, c.cstore, id) -} - -// DefaultStateViewer a state viewer to the power state view interface. -type DefaultStateViewer struct { - *appstate.Viewer -} - -// AsDefaultStateViewer adapts a state viewer to a power state viewer. -func AsDefaultStateViewer(v *appstate.Viewer) DefaultStateViewer { - return DefaultStateViewer{v} -} - -// PowerStateView returns a power state view for a state root. -func (v *DefaultStateViewer) PowerStateView(root cid.Cid) PowerStateView { - return v.Viewer.StateView(root) -} - -// FaultStateView returns a fault state view for a state root. -func (v *DefaultStateViewer) FaultStateView(root cid.Cid) FaultStateView { - return v.Viewer.StateView(root) -} diff --git a/internal/pkg/consensus/expected_test.go b/internal/pkg/consensus/expected_test.go deleted file mode 100644 index 3dc6f39e57..0000000000 --- a/internal/pkg/consensus/expected_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package consensus_test - -import ( - "context" - "errors" - "strings" - "testing" - "time" - - "github.com/filecoin-project/go-address" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -type TestChainReader struct{} - -func (reader *TestChainReader) GetTipSet(tsKey block.TipSetKey) (block.TipSet, error) { - return block.UndefTipSet, errors.New("TestChainReader unimplemented") -} -func (reader *TestChainReader) GetTipSetStateRoot(tsKey block.TipSetKey) (cid.Cid, error) { - return cid.Undef, errors.New("TestChainReader unimplemented") -} - -// TestExpected_RunStateTransition_validateMining is concerned only with validateMining behavior. -// Fully unit-testing RunStateTransition is difficult due to this requiring that you -// completely set up a valid state tree with a valid matching TipSet. RunStateTransition is tested -// with integration tests (see chain_daemon_test.go for example) -func TestExpected_RunStateTransition_validateMining(t *testing.T) { - tf.UnitTest(t) - t.Skip("requires VM to support state construction by messages") - - ctx := context.Background() - mockSigner, kis := types.NewMockSignersAndKeyInfo(3) - fc := clock.NewFake(time.Unix(1234567890, 0)) - blockTime := 30 * time.Second - propDelay := 5 * time.Second - cl := clock.NewChainClockFromClock(1234567890, blockTime, propDelay, fc) - drand := &drand.Fake{} - - t.Run("passes the validateMining section when given valid mining blocks", func(t *testing.T) { - cistore, bstore := setupCborBlockstore() - genesisBlock, err := gengen.DefaultGenesis(cistore, bstore) - require.NoError(t, err) - - //Set miner actor - - pTipSet := block.RequireNewTipSet(t, genesisBlock) - nextRoot, miners, m2w := setTree(ctx, t, kis, cistore, bstore, genesisBlock.StateRoot.Cid) - - views := consensus.AsDefaultStateViewer(appstate.NewViewer(cistore)) - exp := consensus.NewExpected(cistore, bstore, th.NewFakeProcessor(), &views, th.BlockTimeTest, - &consensus.FakeElectionMachine{}, &consensus.FakeTicketMachine{}, &consensus.TestElectionPoster{}, &TestChainReader{}, cl, drand) - - nextBlocks := requireMakeNBlocks(t, 3, pTipSet, nextRoot, types.EmptyReceiptsCID, miners, m2w, mockSigner) - tipSet := block.RequireNewTipSet(t, nextBlocks...) - - emptyBLSMessages, emptyMessages := emptyMessages(len(nextBlocks)) - _, _, err = exp.RunStateTransition(ctx, tipSet, emptyBLSMessages, emptyMessages, - nextBlocks[0].ParentWeight, nextBlocks[0].StateRoot.Cid, nextBlocks[0].MessageReceipts.Cid) - assert.NoError(t, err) - }) - - t.Run("returns nil + mining error when election proof validation fails", func(t *testing.T) { - cistore, bstore := setupCborBlockstore() - genesisBlock, err := gengen.DefaultGenesis(cistore, bstore) - require.NoError(t, err) - - pTipSet := block.RequireNewTipSet(t, genesisBlock) - - miners, minerToWorker := minerToWorkerFromAddrs(ctx, t, state.NewState(cistore), vm.NewStorage(bstore), kis) - views := consensus.AsDefaultStateViewer(appstate.NewViewer(cistore)) - exp := consensus.NewExpected(cistore, bstore, consensus.NewDefaultProcessor(&vm.FakeSyscalls{}, &consensus.FakeChainRandomness{}), &views, th.BlockTimeTest, - &consensus.FailingElectionValidator{}, &consensus.FakeTicketMachine{}, &consensus.TestElectionPoster{}, &TestChainReader{}, cl, drand) - - nextBlocks := requireMakeNBlocks(t, 3, pTipSet, genesisBlock.StateRoot.Cid, types.EmptyReceiptsCID, miners, minerToWorker, mockSigner) - tipSet := block.RequireNewTipSet(t, nextBlocks...) - - emptyBLSMessages, emptyMessages := emptyMessages(len(nextBlocks)) - - _, _, err = exp.RunStateTransition(ctx, tipSet, emptyBLSMessages, emptyMessages, genesisBlock.ParentWeight, genesisBlock.StateRoot.Cid, genesisBlock.MessageReceipts.Cid) - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), "lost election")) - }) - - // TODO: test that the correct tickets are processed for election and ticket generation - - t.Run("fails when bls signature is not valid across bls messages", func(t *testing.T) { - cistore, bstore := setupCborBlockstore() - genesisBlock, err := gengen.DefaultGenesis(cistore, bstore) - require.NoError(t, err) - - miners, minerToWorker := minerToWorkerFromAddrs(ctx, t, state.NewState(cistore), vm.NewStorage(bstore), kis) - views := consensus.AsDefaultStateViewer(appstate.NewViewer(cistore)) - exp := consensus.NewExpected(cistore, bstore, th.NewFakeProcessor(), &views, th.BlockTimeTest, &consensus.FakeElectionMachine{}, &consensus.FakeTicketMachine{}, &consensus.TestElectionPoster{}, &TestChainReader{}, cl, drand) - - pTipSet := block.RequireNewTipSet(t, genesisBlock) - nextBlocks := requireMakeNBlocks(t, 3, pTipSet, genesisBlock.StateRoot.Cid, types.EmptyReceiptsCID, miners, minerToWorker, mockSigner) - tipSet := block.RequireNewTipSet(t, nextBlocks...) - - _, emptyMessages := emptyMessages(len(nextBlocks)) - - // Create BLS messages but do not update signature - blsKey := bls.PrivateKeyPublicKey(bls.PrivateKeyGenerate()) - blsAddr, err := address.NewBLSAddress(blsKey[:]) - require.NoError(t, err) - - blsMessages := make([][]*types.UnsignedMessage, tipSet.Len()) - msg := types.NewUnsignedMessage(blsAddr, vmaddr.RequireIDAddress(t, 100), 0, types.NewAttoFILFromFIL(0), builtin.MethodSend, []byte{}) - blsMessages[0] = append(blsMessages[0], msg) - - _, _, err = exp.RunStateTransition(ctx, tipSet, blsMessages, emptyMessages, nextBlocks[0].ParentWeight, nextBlocks[0].StateRoot.Cid, nextBlocks[0].MessageReceipts.Cid) - require.Error(t, err) - assert.Contains(t, err.Error(), "block BLS signature does not validate") - }) - - t.Run("fails when secp message has invalid signature", func(t *testing.T) { - cistore, bstore := setupCborBlockstore() - genesisBlock, err := gengen.DefaultGenesis(cistore, bstore) - require.NoError(t, err) - - miners, minerToWorker := minerToWorkerFromAddrs(ctx, t, state.NewState(cistore), vm.NewStorage(bstore), kis) - views := consensus.AsDefaultStateViewer(appstate.NewViewer(cistore)) - exp := consensus.NewExpected(cistore, bstore, th.NewFakeProcessor(), &views, th.BlockTimeTest, &consensus.FakeElectionMachine{}, &consensus.FakeTicketMachine{}, &consensus.TestElectionPoster{}, &TestChainReader{}, cl, drand) - - pTipSet := block.RequireNewTipSet(t, genesisBlock) - nextBlocks := requireMakeNBlocks(t, 3, pTipSet, genesisBlock.StateRoot.Cid, types.EmptyReceiptsCID, miners, minerToWorker, mockSigner) - tipSet := block.RequireNewTipSet(t, nextBlocks...) - - emptyBLSMessages, _ := emptyMessages(len(nextBlocks)) - - // Create secp message with invalid signature - keys := types.MustGenerateKeyInfo(1, 42) - blsAddr, err := address.NewSecp256k1Address(keys[0].PublicKey()) - require.NoError(t, err) - - secpMessages := make([][]*types.SignedMessage, tipSet.Len()) - msg := types.NewUnsignedMessage(blsAddr, vmaddr.RequireIDAddress(t, 100), 0, types.NewAttoFILFromFIL(0), builtin.MethodSend, []byte{}) - smsg := &types.SignedMessage{ - Message: *msg, - Signature: crypto.Signature{ - Type: crypto.SigTypeSecp256k1, - Data: []byte("not a signature"), - }, - } - secpMessages[0] = append(secpMessages[0], smsg) - - _, _, err = exp.RunStateTransition(ctx, tipSet, emptyBLSMessages, secpMessages, nextBlocks[0].ParentWeight, nextBlocks[0].StateRoot.Cid, nextBlocks[0].MessageReceipts.Cid) - require.Error(t, err) - assert.Contains(t, err.Error(), "secp message signature invalid") - }) - - t.Run("returns nil + mining error when ticket validation fails", func(t *testing.T) { - cistore, bstore := setupCborBlockstore() - genesisBlock, err := gengen.DefaultGenesis(cistore, bstore) - require.NoError(t, err) - - miners, minerToWorker := minerToWorkerFromAddrs(ctx, t, state.NewState(cistore), vm.NewStorage(bstore), kis) - views := consensus.AsDefaultStateViewer(appstate.NewViewer(cistore)) - exp := consensus.NewExpected(cistore, bstore, th.NewFakeProcessor(), &views, th.BlockTimeTest, &consensus.FakeElectionMachine{}, &consensus.FailingTicketValidator{}, &consensus.TestElectionPoster{}, &TestChainReader{}, cl, drand) - - pTipSet := block.RequireNewTipSet(t, genesisBlock) - nextBlocks := requireMakeNBlocks(t, 3, pTipSet, genesisBlock.StateRoot.Cid, types.EmptyReceiptsCID, miners, minerToWorker, mockSigner) - tipSet := block.RequireNewTipSet(t, nextBlocks...) - - emptyBLSMessages, emptyMessages := emptyMessages(len(nextBlocks)) - - _, _, err = exp.RunStateTransition(ctx, tipSet, emptyBLSMessages, emptyMessages, genesisBlock.ParentWeight, genesisBlock.StateRoot.Cid, genesisBlock.MessageReceipts.Cid) - require.NotNil(t, err) - assert.Contains(t, err.Error(), "invalid ticket") - }) - - t.Run("returns nil + mining error when signature is invalid", func(t *testing.T) { - cistore, bstore := setupCborBlockstore() - genesisBlock, err := gengen.DefaultGenesis(cistore, bstore) - require.NoError(t, err) - - miners, minerToWorker := minerToWorkerFromAddrs(ctx, t, state.NewState(cistore), vm.NewStorage(bstore), kis) - views := consensus.AsDefaultStateViewer(appstate.NewViewer(cistore)) - exp := consensus.NewExpected(cistore, bstore, th.NewFakeProcessor(), &views, th.BlockTimeTest, &consensus.FakeElectionMachine{}, &consensus.FakeTicketMachine{}, &consensus.TestElectionPoster{}, &TestChainReader{}, cl, drand) - - pTipSet := block.RequireNewTipSet(t, genesisBlock) - nextBlocks := requireMakeNBlocks(t, 3, pTipSet, genesisBlock.StateRoot.Cid, types.EmptyReceiptsCID, miners, minerToWorker, mockSigner) - - // Give block 0 an invalid signature - nextBlocks[0].BlockSig = nextBlocks[1].BlockSig - - tipSet := block.RequireNewTipSet(t, nextBlocks...) - emptyBLSMessages, emptyMessages := emptyMessages(len(nextBlocks)) - - _, _, err = exp.RunStateTransition(ctx, tipSet, emptyBLSMessages, emptyMessages, nextBlocks[0].ParentWeight, nextBlocks[0].StateRoot.Cid, nextBlocks[0].MessageReceipts.Cid) - assert.EqualError(t, err, "block signature invalid") - }) - - t.Run("returns nil + error when parent weight invalid", func(t *testing.T) { - cistore, bstore := setupCborBlockstore() - genesisBlock, err := gengen.DefaultGenesis(cistore, bstore) - require.NoError(t, err) - - miners, minerToWorker := minerToWorkerFromAddrs(ctx, t, state.NewState(cistore), vm.NewStorage(bstore), kis) - views := consensus.AsDefaultStateViewer(appstate.NewViewer(cistore)) - exp := consensus.NewExpected(cistore, bstore, th.NewFakeProcessor(), &views, th.BlockTimeTest, &consensus.FakeElectionMachine{}, &consensus.FakeTicketMachine{}, &consensus.TestElectionPoster{}, &TestChainReader{}, cl, drand) - - pTipSet := block.RequireNewTipSet(t, genesisBlock) - nextBlocks := requireMakeNBlocks(t, 3, pTipSet, genesisBlock.StateRoot.Cid, types.EmptyReceiptsCID, miners, minerToWorker, mockSigner) - tipSet := block.RequireNewTipSet(t, nextBlocks...) - - invalidParentWeight := fbig.NewInt(6) - - emptyBLSMessages, emptyMessages := emptyMessages(len(nextBlocks)) - - _, _, err = exp.RunStateTransition(ctx, tipSet, emptyBLSMessages, emptyMessages, invalidParentWeight, nextBlocks[0].StateRoot.Cid, nextBlocks[0].MessageReceipts.Cid) - assert.Contains(t, err.Error(), "invalid parent weight") - }) -} - -func emptyMessages(numBlocks int) ([][]*types.UnsignedMessage, [][]*types.SignedMessage) { - var emptyBLSMessages [][]*types.UnsignedMessage - var emptyMessages [][]*types.SignedMessage - for i := 0; i < numBlocks; i++ { - emptyBLSMessages = append(emptyBLSMessages, []*types.UnsignedMessage{}) - emptyMessages = append(emptyMessages, []*types.SignedMessage{}) - } - return emptyBLSMessages, emptyMessages -} - -func setupCborBlockstore() (*cborutil.IpldStore, blockstore.Blockstore) { - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - cis := cborutil.NewIpldStore(bs) - - return cis, bs -} - -// requireMakeNBlocks sets up 3 blocks with 3 owner actors and 3 miner actors and puts them in the state tree. -// the owner actors have associated mockSigners for signing blocks and tickets. -func requireMakeNBlocks(t *testing.T, n int, pTipSet block.TipSet, root cid.Cid, receiptRoot cid.Cid, minerAddrs []address.Address, m2w map[address.Address]address.Address, signer types.Signer) []*block.Block { - require.True(t, n <= len(minerAddrs)) - blocks := make([]*block.Block, n) - for i := 0; i < n; i++ { - blocks[i] = th.RequireSignedTestBlockFromTipSet(t, pTipSet, root, receiptRoot, 1, minerAddrs[i], m2w[minerAddrs[i]], signer) - } - return blocks -} - -func minerToWorkerFromAddrs(ctx context.Context, t *testing.T, tree state.Tree, vms vm.Storage, kis []crypto.KeyInfo) ([]address.Address, map[address.Address]address.Address) { - minerAddrs := make([]address.Address, len(kis)) - require.Equal(t, len(kis), len(minerAddrs)) - minerToWorker := make(map[address.Address]address.Address, len(kis)) - for i := 0; i < len(kis); i++ { - addr, err := kis[i].Address() - require.NoError(t, err) - - _, minerAddrs[i] = th.RequireNewMinerActor(ctx, t, tree, vms, addr, 10000, th.RequireRandomPeerID(t), types.ZeroAttoFIL) - - minerToWorker[minerAddrs[i]] = addr - } - return minerAddrs, minerToWorker -} - -func setTree(ctx context.Context, t *testing.T, kis []crypto.KeyInfo, cstore cbor.IpldStore, bstore blockstore.Blockstore, inRoot cid.Cid) (cid.Cid, []address.Address, map[address.Address]address.Address) { - tree, err := state.LoadState(ctx, cstore, inRoot) - require.NoError(t, err) - miners := make([]address.Address, len(kis)) - m2w := make(map[address.Address]address.Address, len(kis)) - vms := vm.NewStorage(bstore) - for i, ki := range kis { - workerAddr, err := ki.Address() - require.NoError(t, err) - _, minerAddr := th.RequireNewMinerActor(ctx, t, tree, vms, workerAddr, 10000, th.RequireRandomPeerID(t), types.ZeroAttoFIL) - miners[i] = minerAddr - m2w[minerAddr] = workerAddr - } - root, err := tree.Commit(ctx) - require.NoError(t, err) - return root, miners, m2w -} diff --git a/internal/pkg/consensus/power_table_view.go b/internal/pkg/consensus/power_table_view.go deleted file mode 100644 index ff553d0d6a..0000000000 --- a/internal/pkg/consensus/power_table_view.go +++ /dev/null @@ -1,82 +0,0 @@ -package consensus - -import ( - "context" - - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - - "github.com/filecoin-project/go-filecoin/internal/pkg/state" -) - -// PowerStateView is a view of chain state for election computations, typically at some lookback from the -// immediate parent state. -// This type isn't doing much that the state view doesn't already do, consider removing it. -type PowerStateView interface { - state.AccountStateView - MinerSectorConfiguration(ctx context.Context, maddr addr.Address) (*state.MinerSectorConfiguration, error) - MinerControlAddresses(ctx context.Context, maddr addr.Address) (owner, worker addr.Address, err error) - MinerSectorStates(ctx context.Context, maddr addr.Address) (*state.MinerSectorStates, error) - MinerGetSector(ctx context.Context, maddr addr.Address, sectorNum abi.SectorNumber) (*miner.SectorOnChainInfo, bool, error) - PowerNetworkTotal(ctx context.Context) (*state.NetworkPower, error) - MinerClaimedPower(ctx context.Context, miner addr.Address) (raw, qa abi.StoragePower, err error) -} - -// FaultStateView is a view of chain state for adjustment of miner power claims based on changes since the -// power state's lookback (primarily, the miner ceasing to be registered). -type FaultStateView interface { - MinerExists(ctx context.Context, maddr addr.Address) (bool, error) -} - -// An interface to the network power table for elections. -// Elections use the quality-adjusted power, rather than raw byte power. -type PowerTableView struct { - state PowerStateView - faultState FaultStateView -} - -func NewPowerTableView(state PowerStateView, faultState FaultStateView) PowerTableView { - return PowerTableView{ - state: state, - faultState: faultState, - } -} - -// Returns the network's total quality-adjusted power. -func (v PowerTableView) NetworkTotalPower(ctx context.Context) (abi.StoragePower, error) { - total, err := v.state.PowerNetworkTotal(ctx) - if err != nil { - return big.Zero(), err - } - return total.QualityAdjustedPower, nil -} - -// Returns a miner's claimed quality-adjusted power. -func (v PowerTableView) MinerClaimedPower(ctx context.Context, mAddr addr.Address) (abi.StoragePower, error) { - _, qa, err := v.state.MinerClaimedPower(ctx, mAddr) - if err != nil { - return big.Zero(), err - } - // Only return claim if fault state still tracks miner - exists, err := v.faultState.MinerExists(ctx, mAddr) - if err != nil { - return big.Zero(), err - } - if !exists { // miner was slashed - return big.Zero(), nil - } - return qa, nil -} - -// WorkerAddr returns the worker address for a miner actor. -func (v PowerTableView) WorkerAddr(ctx context.Context, mAddr addr.Address) (addr.Address, error) { - _, worker, err := v.state.MinerControlAddresses(ctx, mAddr) - return worker, err -} - -// SignerAddress returns the public key address associated with the given address. -func (v PowerTableView) SignerAddress(ctx context.Context, a addr.Address) (addr.Address, error) { - return v.state.AccountSignerAddress(ctx, a) -} diff --git a/internal/pkg/consensus/power_table_view_test.go b/internal/pkg/consensus/power_table_view_test.go deleted file mode 100644 index 4438cb492a..0000000000 --- a/internal/pkg/consensus/power_table_view_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package consensus_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -func TestTotal(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - numCommittedSectors := uint64(19) - numMiners := 3 - kis := types.MustGenerateBLSKeyInfo(numMiners, 0) - - cst, _, root := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) - - table := consensus.NewPowerTableView(state.NewView(cst, root), state.NewView(cst, root)) - networkPower, err := table.NetworkTotalPower(ctx) - require.NoError(t, err) - - // TODO: test that the QA power is used when it differs from raw byte power after gengen computes it properly - // https://github.com/filecoin-project/go-filecoin/issues/4011 - expected := big.NewIntUnsigned(uint64(constants.DevSectorSize) * numCommittedSectors * uint64(numMiners)) - assert.True(t, expected.Equals(networkPower)) -} - -func TestMiner(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - kis := types.MustGenerateBLSKeyInfo(1, 0) - - numCommittedSectors := uint64(10) - cst, addrs, root := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) - addr := addrs[0] - - table := consensus.NewPowerTableView(state.NewView(cst, root), state.NewView(cst, root)) - actual, err := table.MinerClaimedPower(ctx, addr) - require.NoError(t, err) - - expected := abi.NewStoragePower(int64(uint64(constants.DevSectorSize) * numCommittedSectors)) - assert.True(t, expected.Equals(actual)) - assert.Equal(t, expected, actual) -} - -func TestNoPowerAfterSlash(t *testing.T) { - tf.UnitTest(t) - // setup lookback state with 3 miners - ctx := context.Background() - numCommittedSectors := uint64(19) - numMiners := 3 - kis := types.MustGenerateBLSKeyInfo(numMiners, 0) - cstPower, addrsPower, rootPower := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) - cstFaults, _, rootFaults := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis[0:2]) // drop the third key - table := consensus.NewPowerTableView(state.NewView(cstPower, rootPower), state.NewView(cstFaults, rootFaults)) - - // verify that faulted miner claim is 0 power - claim, err := table.MinerClaimedPower(ctx, addrsPower[2]) - require.NoError(t, err) - assert.Equal(t, abi.NewStoragePower(0), claim) -} - -func TestTotalPowerUnaffectedBySlash(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - numCommittedSectors := uint64(19) - numMiners := 3 - kis := types.MustGenerateBLSKeyInfo(numMiners, 0) - cstPower, _, rootPower := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) - cstFaults, _, rootFaults := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis[0:2]) // drop the third key - table := consensus.NewPowerTableView(state.NewView(cstPower, rootPower), state.NewView(cstFaults, rootFaults)) - - // verify that faulted miner claim is 0 power - total, err := table.NetworkTotalPower(ctx) - require.NoError(t, err) - expected := abi.NewStoragePower(int64(uint64(constants.DevSectorSize) * numCommittedSectors * uint64(numMiners))) - - assert.Equal(t, expected, total) -} - -func requireMinerWithNumCommittedSectors(ctx context.Context, t *testing.T, numCommittedSectors uint64, ownerKeys []crypto.KeyInfo) (*cborutil.IpldStore, []address.Address, cid.Cid) { - r := repo.NewInMemoryRepo() - bs := bstore.NewBlockstore(r.Datastore()) - cst := cborutil.NewIpldStore(bs) - numMiners := len(ownerKeys) - minerConfigs := make([]*gengen.CreateStorageMinerConfig, numMiners) - for i := 0; i < numMiners; i++ { - commCfgs, err := gengen.MakeCommitCfgs(int(numCommittedSectors)) - require.NoError(t, err) - minerConfigs[i] = &gengen.CreateStorageMinerConfig{ - Owner: i, - CommittedSectors: commCfgs, - SealProofType: constants.DevSealProofType, - } - } - - // set up genesis block containing some miners with non-zero power - genCfg := &gengen.GenesisCfg{} - require.NoError(t, gengen.MinerConfigs(minerConfigs)(genCfg)) - require.NoError(t, gengen.NetworkName("ptvtest")(genCfg)) - require.NoError(t, gengen.ImportKeys(ownerKeys, "1000000")(genCfg)) - - info, err := gengen.GenGen(ctx, genCfg, bs) - require.NoError(t, err) - - var genesis block.Block - require.NoError(t, cst.Get(ctx, info.GenesisCid, &genesis)) - retAddrs := make([]address.Address, numMiners) - for i := 0; i < numMiners; i++ { - retAddrs[i] = info.Miners[i].Address - } - return cst, retAddrs, genesis.StateRoot.Cid -} diff --git a/internal/pkg/consensus/processor.go b/internal/pkg/consensus/processor.go deleted file mode 100644 index 6dccc29e6d..0000000000 --- a/internal/pkg/consensus/processor.go +++ /dev/null @@ -1,93 +0,0 @@ -package consensus - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "go.opencensus.io/trace" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics/tracing" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// ApplicationResult contains the result of successfully applying one message. -// ExecutionError might be set and the message can still be applied successfully. -// See ApplyMessage() for details. -type ApplicationResult struct { - Receipt *vm.MessageReceipt - ExecutionError error -} - -// ApplyMessageResult is the result of applying a single message. -type ApplyMessageResult struct { - ApplicationResult // Application-level result, if error is nil. - Failure error // Failure to apply the message - FailureIsPermanent bool // Whether failure is permanent, has no chance of succeeding later. -} - -type ChainRandomness interface { - SampleChainRandomness(ctx context.Context, head block.TipSetKey, tag crypto.DomainSeparationTag, epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) -} - -// DefaultProcessor handles all block processing. -type DefaultProcessor struct { - actors vm.ActorCodeLoader - syscalls vm.SyscallsImpl - rnd ChainRandomness -} - -var _ Processor = (*DefaultProcessor)(nil) - -// NewDefaultProcessor creates a default processor from the given state tree and vms. -func NewDefaultProcessor(syscalls vm.SyscallsImpl, rnd ChainRandomness) *DefaultProcessor { - return NewConfiguredProcessor(vm.DefaultActors, syscalls, rnd) -} - -// NewConfiguredProcessor creates a default processor with custom validation and rewards. -func NewConfiguredProcessor(actors vm.ActorCodeLoader, syscalls vm.SyscallsImpl, rnd ChainRandomness) *DefaultProcessor { - return &DefaultProcessor{ - actors: actors, - syscalls: syscalls, - rnd: rnd, - } -} - -// ProcessTipSet computes the state transition specified by the messages in all blocks in a TipSet. -func (p *DefaultProcessor) ProcessTipSet(ctx context.Context, st state.Tree, vms vm.Storage, ts block.TipSet, msgs []vm.BlockMessagesInfo) (results []vm.MessageReceipt, err error) { - ctx, span := trace.StartSpan(ctx, "DefaultProcessor.ProcessTipSet") - span.AddAttributes(trace.StringAttribute("tipset", ts.String())) - defer tracing.AddErrorEndSpan(ctx, span, &err) - - epoch, err := ts.Height() - if err != nil { - return nil, err - } - - parent, err := ts.Parents() - if err != nil { - return nil, err - } - - // Note: since the parent tipset key is now passed explicitly to ApplyTipSetMessages we can refactor to skip - // currying it in to the randomness call here. - rnd := headRandomness{ - chain: p.rnd, - head: parent, - } - v := vm.NewVM(st, &vms, p.syscalls) - - return v.ApplyTipSetMessages(msgs, parent, epoch, &rnd) -} - -// A chain randomness source with a fixed head tipset key. -type headRandomness struct { - chain ChainRandomness - head block.TipSetKey -} - -func (h *headRandomness) Randomness(ctx context.Context, tag crypto.DomainSeparationTag, epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - return h.chain.SampleChainRandomness(ctx, h.head, tag, epoch, entropy) -} diff --git a/internal/pkg/consensus/protocol.go b/internal/pkg/consensus/protocol.go deleted file mode 100644 index 9eec90b659..0000000000 --- a/internal/pkg/consensus/protocol.go +++ /dev/null @@ -1,36 +0,0 @@ -package consensus - -// This interface is (mostly) stateless. All of its methods are -// pure functions that only depend on their inputs. - -// Note: State does creep in through the cbor and block stores used to keep state tree and -// actor storage data in the Expected implementation. However those stores -// are global to the filecoin node so accessing the correct state is simple. -// Furthermore these stores are providing content addressed values. -// The output of these interface functions does not change based on the store state -// except for errors in the case the stores do not have a mapping. -import ( - "context" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// Protocol is an interface defining a blockchain consensus protocol. The -// methods here were arrived at after significant work fitting consensus into -// the system and the implementation level. The method set is not necessarily -// the most theoretically obvious or pleasing and should not be considered -// finalized. -type Protocol interface { - // RunStateTransition returns the state root CID resulting from applying the input ts to the - // prior `stateID`. It returns an error if the transition is invalid. - RunStateTransition(ctx context.Context, ts block.TipSet, blsMsgs [][]*types.UnsignedMessage, secpMsgs [][]*types.SignedMessage, parentWeight fbig.Int, parentStateRoot cid.Cid, parentReceiptRoot cid.Cid) (cid.Cid, []vm.MessageReceipt, error) - - // BlockTime returns the block time used by the consensus protocol. - BlockTime() time.Duration -} diff --git a/internal/pkg/consensus/testing.go b/internal/pkg/consensus/testing.go deleted file mode 100644 index 524322a4d0..0000000000 --- a/internal/pkg/consensus/testing.go +++ /dev/null @@ -1,186 +0,0 @@ -package consensus - -import ( - "context" - "fmt" - "testing" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// RequireNewTipSet instantiates and returns a new tipset of the given blocks -// and requires that the setup validation succeed. -func RequireNewTipSet(require *require.Assertions, blks ...*block.Block) block.TipSet { - ts, err := block.NewTipSet(blks...) - require.NoError(err) - return ts -} - -// FakeConsensusStateViewer is a fake power state viewer. -type FakeConsensusStateViewer struct { - Views map[cid.Cid]*state.FakeStateView -} - -// PowerStateView returns the state view for a root. -func (f *FakeConsensusStateViewer) PowerStateView(root cid.Cid) PowerStateView { - return f.Views[root] -} - -// FaultStateView returns the state view for a root. -func (f *FakeConsensusStateViewer) FaultStateView(root cid.Cid) FaultStateView { - return f.Views[root] -} - -// FakeMessageValidator is a validator that doesn't validate to simplify message creation in tests. -type FakeMessageValidator struct{} - -func (mv *FakeMessageValidator) ValidateSignedMessageSyntax(ctx context.Context, smsg *types.SignedMessage) error { - return nil -} - -func (mv *FakeMessageValidator) ValidateUnsignedMessageSyntax(ctx context.Context, msg *types.UnsignedMessage) error { - return nil -} - -// FakeElectionMachine generates fake election proofs and verifies all proofs -type FakeElectionMachine struct{} - -var _ ElectionValidator = new(FakeElectionMachine) - -// GenerateElectionProof returns a fake randomness -func (fem *FakeElectionMachine) GenerateElectionProof(_ context.Context, _ *drand.Entry, - _ abi.ChainEpoch, _ address.Address, _ address.Address, _ types.Signer) (crypto.VRFPi, error) { - return MakeFakeVRFProofForTest(), nil -} - -// GenerateEPoSt returns a fake post proof -func (fem *FakeElectionMachine) GenerateWinningPoSt(ctx context.Context, entry *drand.Entry, epoch abi.ChainEpoch, ep postgenerator.PoStGenerator, maddr address.Address, sectors SectorsStateView) ([]block.PoStProof, error) { - return []block.PoStProof{{ - RegisteredProof: constants.DevRegisteredWinningPoStProof, - ProofBytes: []byte{0xe}, - }}, nil -} - -func (fem *FakeElectionMachine) IsWinner(challengeTicket []byte, minerPower, networkPower abi.StoragePower) bool { - return true -} - -func (fem *FakeElectionMachine) VerifyElectionProof(_ context.Context, _ *drand.Entry, _ abi.ChainEpoch, _ address.Address, _ address.Address, _ crypto.VRFPi) error { - return nil -} - -func (fem *FakeElectionMachine) VerifyWinningPoSt(ctx context.Context, ep EPoStVerifier, seedEntry *drand.Entry, epoch abi.ChainEpoch, proofs []block.PoStProof, mIDAddr address.Address, sectors SectorsStateView) (bool, error) { - return true, nil -} - -// FakeTicketMachine generates fake tickets and verifies all tickets -type FakeTicketMachine struct{} - -// MakeTicket returns a fake ticket -func (ftm *FakeTicketMachine) MakeTicket(ctx context.Context, base block.TipSetKey, epoch abi.ChainEpoch, miner address.Address, entry *drand.Entry, newPeriod bool, worker address.Address, signer types.Signer) (block.Ticket, error) { - return MakeFakeTicketForTest(), nil -} - -// IsValidTicket always returns true -func (ftm *FakeTicketMachine) IsValidTicket(ctx context.Context, base block.TipSetKey, entry *drand.Entry, newPeriod bool, - epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket block.Ticket) error { - return nil -} - -// FailingTicketValidator marks all tickets as invalid -type FailingTicketValidator struct{} - -// IsValidTicket always returns false -func (ftv *FailingTicketValidator) IsValidTicket(ctx context.Context, base block.TipSetKey, entry *drand.Entry, newPeriod bool, - epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket block.Ticket) error { - return fmt.Errorf("invalid ticket") -} - -// FailingElectionValidator marks all election candidates as invalid -type FailingElectionValidator struct{} - -var _ ElectionValidator = new(FailingElectionValidator) - -func (fev *FailingElectionValidator) IsWinner(challengeTicket []byte, minerPower, networkPower abi.StoragePower) bool { - return false -} - -func (fev *FailingElectionValidator) VerifyElectionProof(_ context.Context, _ *drand.Entry, _ abi.ChainEpoch, _ address.Address, _ address.Address, _ crypto.VRFPi) error { - return nil -} - -func (fev *FailingElectionValidator) VerifyWinningPoSt(ctx context.Context, ep EPoStVerifier, seedEntry *drand.Entry, epoch abi.ChainEpoch, proofs []block.PoStProof, mIDAddr address.Address, sectors SectorsStateView) (bool, error) { - return true, nil -} - -// MakeFakeTicketForTest creates a fake ticket -func MakeFakeTicketForTest() block.Ticket { - val := make([]byte, 65) - val[0] = 200 - return block.Ticket{ - VRFProof: crypto.VRFPi(val[:]), - } -} - -// MakeFakeVRFProofForTest creates a fake election proof -func MakeFakeVRFProofForTest() []byte { - proof := make([]byte, 65) - proof[0] = 42 - return proof -} - -// MakeFakePoStForTest creates a fake post -func MakeFakePoStsForTest() []block.PoStProof { - return []block.PoStProof{{ - RegisteredProof: constants.DevRegisteredWinningPoStProof, - ProofBytes: []byte{0xe}, - }} -} - -// NFakeSectorInfos returns numSectors fake sector infos -func RequireFakeSectorInfos(t *testing.T, numSectors uint64) []abi.SectorInfo { - var infos []abi.SectorInfo - for i := uint64(0); i < numSectors; i++ { - infos = append(infos, abi.SectorInfo{ - RegisteredProof: constants.DevRegisteredSealProof, - SectorNumber: abi.SectorNumber(i), - SealedCID: types.CidFromString(t, fmt.Sprintf("fake-sector-%d", i)), - }) - } - - return infos -} - -///// Sampler ///// - -// FakeChainRandomness generates deterministic values that are a function of a seed and the provided -// tag, epoch, and entropy (but *not* the chain head key). -type FakeChainRandomness struct { - Seed uint -} - -func (s *FakeChainRandomness) SampleChainRandomness(_ context.Context, _ block.TipSetKey, tag acrypto.DomainSeparationTag, epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - return []byte(fmt.Sprintf("s=%d,e=%d,t=%d,p=%s", s.Seed, epoch, tag, string(entropy))), nil -} - -type FakeSampler struct { - Seed uint -} - -func (s *FakeSampler) SampleTicket(_ context.Context, _ block.TipSetKey, epoch abi.ChainEpoch) (block.Ticket, error) { - return block.Ticket{ - VRFProof: []byte(fmt.Sprintf("s=%d,e=%d", s.Seed, epoch)), - }, nil -} diff --git a/internal/pkg/consensus/testing_poster.go b/internal/pkg/consensus/testing_poster.go deleted file mode 100644 index 3554fe0e64..0000000000 --- a/internal/pkg/consensus/testing_poster.go +++ /dev/null @@ -1,34 +0,0 @@ -package consensus - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator" -) - -// TestElectionPoster generates and verifies electoin PoSts -type TestElectionPoster struct{} - -var _ EPoStVerifier = new(TestElectionPoster) -var _ postgenerator.PoStGenerator = new(TestElectionPoster) - -// VerifyWinningPoSt returns the validity of the input PoSt proof -func (ep *TestElectionPoster) VerifyWinningPoSt(_ context.Context, _ abi.WinningPoStVerifyInfo) (bool, error) { - return true, nil -} - -// GenerateWinningPoStSectorChallenge determines the challenges used to create a winning PoSt. -func (ep *TestElectionPoster) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { - return nil, nil -} - -// GenerateWinningPoSt creates a post proof for a winning block -func (ep *TestElectionPoster) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - return []abi.PoStProof{{ - RegisteredProof: constants.DevRegisteredWinningPoStProof, - ProofBytes: []byte{0xe}, - }}, nil -} diff --git a/internal/pkg/consensus/ticket.go b/internal/pkg/consensus/ticket.go deleted file mode 100644 index 6d16eff469..0000000000 --- a/internal/pkg/consensus/ticket.go +++ /dev/null @@ -1,83 +0,0 @@ -package consensus - -import ( - "bytes" - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/minio/blake2b-simd" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -type ChainSampler interface { - SampleTicket(ctx context.Context, head block.TipSetKey, epoch abi.ChainEpoch) (block.Ticket, error) -} - -// TicketMachine uses a VRF and VDF to generate deterministic, unpredictable -// and time delayed tickets and validates these tickets. -type TicketMachine struct { - sampler ChainSampler -} - -func NewTicketMachine(sampler ChainSampler) *TicketMachine { - return &TicketMachine{sampler: sampler} -} - -// MakeTicket creates a new ticket from a chain and target epoch by running a verifiable -// randomness function on the prior ticket. -func (tm TicketMachine) MakeTicket(ctx context.Context, base block.TipSetKey, epoch abi.ChainEpoch, miner address.Address, entry *drand.Entry, newPeriod bool, worker address.Address, signer types.Signer) (block.Ticket, error) { - randomness, err := tm.ticketVRFRandomness(ctx, base, entry, newPeriod, miner, epoch) - if err != nil { - return block.Ticket{}, errors.Wrap(err, "failed to generate ticket randomness") - } - vrfProof, err := signer.SignBytes(ctx, randomness, worker) - if err != nil { - return block.Ticket{}, errors.Wrap(err, "failed to sign election post randomness") - } - return block.Ticket{ - VRFProof: vrfProof.Data, - }, nil -} - -// IsValidTicket verifies that the ticket's proof of randomness is valid with respect to its parent. -func (tm TicketMachine) IsValidTicket(ctx context.Context, base block.TipSetKey, entry *drand.Entry, newPeriod bool, - epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket block.Ticket) error { - randomness, err := tm.ticketVRFRandomness(ctx, base, entry, newPeriod, miner, epoch) - if err != nil { - return errors.Wrap(err, "failed to generate ticket randomness") - } - - return crypto.ValidateBlsSignature(randomness, workerSigner, ticket.VRFProof) -} - -func (tm TicketMachine) ticketVRFRandomness(ctx context.Context, base block.TipSetKey, entry *drand.Entry, newPeriod bool, miner address.Address, epoch abi.ChainEpoch) (abi.Randomness, error) { - entropyBuf := bytes.Buffer{} - minerEntropy, err := encoding.Encode(miner) - if err != nil { - return nil, errors.Wrapf(err, "failed to encode miner entropy") - } - _, err = entropyBuf.Write(minerEntropy) - if err != nil { - return nil, err - } - if !newPeriod { // resample previous ticket and add to entropy - ticket, err := tm.sampler.SampleTicket(ctx, base, epoch) - if err != nil { - return nil, errors.Wrapf(err, "failed to sample previous ticket") - } - _, err = entropyBuf.Write(ticket.VRFProof) - if err != nil { - return nil, err - } - } - seed := blake2b.Sum256(entry.Data) - return crypto.BlendEntropy(acrypto.DomainSeparationTag_TicketProduction, seed[:], epoch, entropyBuf.Bytes()) -} diff --git a/internal/pkg/consensus/ticket_test.go b/internal/pkg/consensus/ticket_test.go deleted file mode 100644 index eba34e3dae..0000000000 --- a/internal/pkg/consensus/ticket_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package consensus_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestGenValidTicketChain(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - head := block.NewTipSetKey() // Tipset key is unused by fake randomness - - // Interleave 3 signers - kis := types.MustGenerateBLSKeyInfo(3, 0) - - miner, err := address.NewIDAddress(uint64(1)) - require.NoError(t, err) - signer := types.NewMockSigner(kis) - addr1 := requireAddress(t, &kis[0]) - addr2 := requireAddress(t, &kis[1]) - addr3 := requireAddress(t, &kis[2]) - - schedule := struct { - Addrs []address.Address - }{ - Addrs: []address.Address{addr1, addr1, addr1, addr2, addr3, addr3, addr1, addr2}, - } - - rnd := consensus.FakeSampler{Seed: 0} - tm := consensus.NewTicketMachine(&rnd) - - // Grow the specified ticket chain without error - for i := 0; i < len(schedule.Addrs); i++ { - requireValidTicket(ctx, t, tm, head, abi.ChainEpoch(i), miner, schedule.Addrs[i], signer) - } -} - -func requireValidTicket(ctx context.Context, t *testing.T, tm *consensus.TicketMachine, head block.TipSetKey, epoch abi.ChainEpoch, - miner, worker address.Address, signer types.Signer) { - electionEntry := &drand.Entry{} - newPeriod := false - ticket, err := tm.MakeTicket(ctx, head, epoch, miner, electionEntry, newPeriod, worker, signer) - require.NoError(t, err) - - err = tm.IsValidTicket(ctx, head, electionEntry, newPeriod, epoch, miner, worker, ticket) - require.NoError(t, err) -} - -func TestNextTicketFailsWithInvalidSigner(t *testing.T) { - ctx := context.Background() - head := block.NewTipSetKey() // Tipset key is unused by fake randomness - miner, err := address.NewIDAddress(uint64(1)) - require.NoError(t, err) - - signer, _ := types.NewMockSignersAndKeyInfo(1) - badAddr := vmaddr.RequireIDAddress(t, 100) - rnd := consensus.FakeSampler{Seed: 0} - tm := consensus.NewTicketMachine(&rnd) - electionEntry := &drand.Entry{} - newPeriod := false - badTicket, err := tm.MakeTicket(ctx, head, abi.ChainEpoch(1), miner, electionEntry, newPeriod, badAddr, signer) - assert.Error(t, err) - assert.Nil(t, badTicket.VRFProof) -} - -func requireAddress(t *testing.T, ki *crypto.KeyInfo) address.Address { - addr, err := ki.Address() - require.NoError(t, err) - return addr -} diff --git a/internal/pkg/consensus/validation.go b/internal/pkg/consensus/validation.go deleted file mode 100644 index 347e0a5ca2..0000000000 --- a/internal/pkg/consensus/validation.go +++ /dev/null @@ -1,247 +0,0 @@ -package consensus - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -var dropNonAccountCt *metrics.Int64Counter -var dropInsufficientGasCt *metrics.Int64Counter -var dropNonceTooLowCt *metrics.Int64Counter -var dropNonceTooHighCt *metrics.Int64Counter - -var invReceiverUndefCt *metrics.Int64Counter -var invSenderUndefCt *metrics.Int64Counter -var invValueAboveMaxCt *metrics.Int64Counter -var invParamsNilCt *metrics.Int64Counter -var invGasPriceNegativeCt *metrics.Int64Counter -var invGasBelowMinimumCt *metrics.Int64Counter -var invNegativeValueCt *metrics.Int64Counter -var invGasAboveBlockLimitCt *metrics.Int64Counter - -// The maximum allowed message value. -var msgMaxValue = types.NewAttoFILFromFIL(2e9) - -// These gas cost values must match those in vm/internal/gascost. -// TODO: Look up gas costs from the same place the VM gets them, keyed by epoch. https://github.com/filecoin-project/go-filecoin/issues/3955 -const onChainMessageBase = gas.Unit(0) -const onChainMessagePerByte = gas.Unit(2) - -func init() { - dropNonAccountCt = metrics.NewInt64Counter("consensus/msg_non_account_sender", "Count of dropped messages with non-account sender") - dropInsufficientGasCt = metrics.NewInt64Counter("consensus/msg_insufficient_gas_err", "Count of dropped messages with insufficient gas") - dropNonceTooLowCt = metrics.NewInt64Counter("consensus/msg_nonce_low_err", "Count of dropped messages with nonce too low") - dropNonceTooHighCt = metrics.NewInt64Counter("consensus/msg_nonce_high_err", "Count of dropped messages with nonce too high") - - invReceiverUndefCt = metrics.NewInt64Counter("consensus/msg_undef_receiver", "Count of") - invSenderUndefCt = metrics.NewInt64Counter("consensus/msg_undef_sender", "Count of") - invValueAboveMaxCt = metrics.NewInt64Counter("consensus/msg_value_max", "Count of") - invParamsNilCt = metrics.NewInt64Counter("consensus/msg_params_nil", "Count of") - invGasPriceNegativeCt = metrics.NewInt64Counter("consensus/msg_gasprice_negative", "Count of") - invGasBelowMinimumCt = metrics.NewInt64Counter("consensus/msg_gaslimit_min", "Count of") - invNegativeValueCt = metrics.NewInt64Counter("consensus/msg_value_negative", "Count of invalid negative messages with negative value") - invGasAboveBlockLimitCt = metrics.NewInt64Counter("consensus/msg_gaslimit_max", "Count of invalid messages with gas above block limit") -} - -// MessageSelectionChecker checks for miner penalties on signed messages -type MessagePenaltyChecker struct { - api penaltyCheckerAPI -} - -// penaltyCheckerAPI allows the validator to access latest state -type penaltyCheckerAPI interface { - Head() block.TipSetKey - GetActorAt(ctx context.Context, tipKey block.TipSetKey, addr address.Address) (*actor.Actor, error) -} - -func NewMessagePenaltyChecker(api penaltyCheckerAPI) *MessagePenaltyChecker { - return &MessagePenaltyChecker{ - api: api, - } -} - -// PenaltyCheck checks that a message is semantically valid for processing without -// causing miner penality. It treats any miner penalty condition as an error. -func (v *MessagePenaltyChecker) PenaltyCheck(ctx context.Context, msg *types.UnsignedMessage) error { - fromActor, err := v.api.GetActorAt(ctx, v.api.Head(), msg.From) - if err != nil { - return err - } - // Sender should not be an empty actor - if fromActor == nil || fromActor.Empty() { - return fmt.Errorf("sender %s is missing/empty: %s", msg.From, msg) - } - - // Sender must be an account actor. - if !(builtin.AccountActorCodeID.Equals(fromActor.Code.Cid)) { - dropNonAccountCt.Inc(ctx, 1) - return fmt.Errorf("sender %s is non-account actor with code %s: %s", msg.From, fromActor.Code.Cid, msg) - } - - // Avoid processing messages for actors that cannot pay. - if !canCoverGasLimit(msg, fromActor) { - dropInsufficientGasCt.Inc(ctx, 1) - return fmt.Errorf("insufficient funds from sender %s to cover value and gas cost: %s ", msg.From, msg) - } - - if msg.CallSeqNum < fromActor.CallSeqNum { - dropNonceTooLowCt.Inc(ctx, 1) - return fmt.Errorf("nonce %d lower than expected %d: %s", msg.CallSeqNum, fromActor.CallSeqNum, msg) - } - - if msg.CallSeqNum > fromActor.CallSeqNum { - dropNonceTooHighCt.Inc(ctx, 1) - return fmt.Errorf("nonce %d greater than expected: %d: %s", msg.CallSeqNum, fromActor.CallSeqNum, msg) - } - - return nil -} - -// Check's whether the maximum gas charge + message value is within the actor's balance. -// Note that this is an imperfect test, since nested messages invoked by this one may transfer -// more value from the actor's balance. -func canCoverGasLimit(msg *types.UnsignedMessage, actor *actor.Actor) bool { - // balance >= (gasprice*gasLimit + value) - gascost := big.Mul(abi.NewTokenAmount(msg.GasPrice.Int.Int64()), abi.NewTokenAmount(int64(msg.GasLimit))) - expense := big.Add(gascost, abi.NewTokenAmount(msg.Value.Int.Int64())) - return actor.Balance.GreaterThanEqual(expense) -} - -// DefaultMessageSyntaxValidator checks basic conditions independent of current state -type DefaultMessageSyntaxValidator struct{} - -func NewMessageSyntaxValidator() *DefaultMessageSyntaxValidator { - return &DefaultMessageSyntaxValidator{} -} - -// ValidateSignedMessageSyntax validates signed message syntax and state-independent invariants. -// Used for incoming messages over pubsub and secp messages included in blocks. -func (v *DefaultMessageSyntaxValidator) ValidateSignedMessageSyntax(ctx context.Context, smsg *types.SignedMessage) error { - msg := &smsg.Message - var msgLen int - if smsg.Signature.Type == crypto.SigTypeBLS { - enc, err := smsg.Message.Marshal() - if err != nil { - return errors.Wrapf(err, "failed to calculate message size") - } - msgLen = len(enc) - } else { - enc, err := smsg.Marshal() - if err != nil { - return errors.Wrapf(err, "failed to calculate message size") - } - msgLen = len(enc) - } - return v.validateMessageSyntaxShared(ctx, msg, msgLen) -} - -// ValidateUnsignedMessageSyntax validates unisigned message syntax and state-independent invariants. -// Used for bls messages included in blocks. -func (v *DefaultMessageSyntaxValidator) ValidateUnsignedMessageSyntax(ctx context.Context, msg *types.UnsignedMessage) error { - enc, err := msg.Marshal() - if err != nil { - return errors.Wrapf(err, "failed to calculate message size") - } - msgLen := len(enc) - return v.validateMessageSyntaxShared(ctx, msg, msgLen) -} - -func (v *DefaultMessageSyntaxValidator) validateMessageSyntaxShared(ctx context.Context, msg *types.UnsignedMessage, msgLen int) error { - if msg.Version != types.MessageVersion { - return fmt.Errorf("version %d, expected %d", msg.Version, types.MessageVersion) - } - - if msg.To.Empty() { - invReceiverUndefCt.Inc(ctx, 1) - return fmt.Errorf("empty receiver: %s", msg) - } - if msg.From.Empty() { - invSenderUndefCt.Inc(ctx, 1) - return fmt.Errorf("empty sender: %s", msg) - } - // The spec calls for validating a non-negative call sequence num, but by - // the time it's decoded into a uint64 the check is already passed - - if msg.Value.LessThan(big.Zero()) { - invNegativeValueCt.Inc(ctx, 1) - return fmt.Errorf("negative value %s: %s", msg.Value, msg) - } - if msg.Value.GreaterThan(msgMaxValue) { - invValueAboveMaxCt.Inc(ctx, 1) - return fmt.Errorf("value %s exceeds max %s: %s", msg.Value, msgMaxValue, msg) - } - // The spec calls for validating a non-negative method num, but by the - // time it's decoded into a uint64 the check is already passed - - if msg.Params == nil { - invParamsNilCt.Inc(ctx, 1) - return fmt.Errorf("nil params (should be empty-array): %s", msg) - } - if msg.GasPrice.LessThan(types.ZeroAttoFIL) { - invGasPriceNegativeCt.Inc(ctx, 1) - return fmt.Errorf("negative gas price %s: %s", msg.GasPrice, msg) - } - // The minimum gas limit ensures the sender has enough balance to pay for inclusion of the message in the chain - // *at all*. Without this, a message could hit out-of-gas but the sender pay nothing. - // NOTE(anorth): this check has been moved to execution time, and the miner is penalized for including - // such a message. We can probably remove this. - minMsgGas := onChainMessageBase + onChainMessagePerByte*gas.Unit(msgLen) - if msg.GasLimit < minMsgGas { - invGasBelowMinimumCt.Inc(ctx, 1) - return fmt.Errorf("gas limit %d below minimum %d to cover message size: %s", msg.GasLimit, minMsgGas, msg) - } - if msg.GasLimit > types.BlockGasLimit { - invGasAboveBlockLimitCt.Inc(ctx, 1) - return fmt.Errorf("gas limit %d exceeds block limit %d: %s", msg.GasLimit, types.BlockGasLimit, msg) - } - return nil -} - -// MessageSignatureValidator validates message signatures -type MessageSignatureValidator struct { - api signatureValidatorAPI -} - -// signatureValidatorAPI allows the validator to access state needed for signature checking -type signatureValidatorAPI interface { - Head() block.TipSetKey - AccountStateView(baseKey block.TipSetKey) (state.AccountStateView, error) -} - -func NewMessageSignatureValidator(api signatureValidatorAPI) *MessageSignatureValidator { - return &MessageSignatureValidator{ - api: api, - } -} - -// Validate validates the signed message signature. Errors probably mean the -// validation failed, but possibly indicate a failure to retrieve state. -func (v *MessageSignatureValidator) Validate(ctx context.Context, smsg *types.SignedMessage) error { - head := v.api.Head() - view, err := v.api.AccountStateView(head) - if err != nil { - return errors.Wrapf(err, "failed to load state at %v", head) - } - - sigValidator := state.NewSignatureValidator(view) - - // ensure message is properly signed - if err := sigValidator.ValidateMessageSignature(ctx, smsg); err != nil { - return errors.Wrap(err, fmt.Errorf("invalid signature by sender over message data").Error()) - } - return nil -} diff --git a/internal/pkg/consensus/validation_test.go b/internal/pkg/consensus/validation_test.go deleted file mode 100644 index 3cbe0241e7..0000000000 --- a/internal/pkg/consensus/validation_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package consensus_test - -import ( - "context" - "fmt" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var keys = types.MustGenerateKeyInfo(2, 42) -var addresses = make([]address.Address, len(keys)) - -var methodID = abi.MethodNum(21231) - -func init() { - for i, k := range keys { - addr, _ := k.Address() - addresses[i] = addr - } -} - -func TestMessagePenaltyChecker(t *testing.T) { - tf.UnitTest(t) - - alice := addresses[0] - bob := addresses[1] - actor := newActor(t, 1000, 100) - api := NewMockIngestionValidatorAPI() - api.ActorAddr = alice - api.Actor = actor - - checker := consensus.NewMessagePenaltyChecker(api) - ctx := context.Background() - - t.Run("valid", func(t *testing.T) { - msg := newMessage(t, alice, bob, 100, 5, 1, 0) - assert.NoError(t, checker.PenaltyCheck(ctx, msg)) - }) - - t.Run("non-account actor fails", func(t *testing.T) { - badActor := newActor(t, 1000, 100) - badActor.Code = e.NewCid(types.CidFromString(t, "somecid")) - msg := newMessage(t, alice, bob, 100, 5, 1, 0) - api := NewMockIngestionValidatorAPI() - api.ActorAddr = alice - api.Actor = badActor - checker := consensus.NewMessagePenaltyChecker(api) - assert.Errorf(t, checker.PenaltyCheck(ctx, msg), "account") - }) - - t.Run("can't cover value", func(t *testing.T) { - msg := newMessage(t, alice, bob, 100, 2000, 1, 0) // lots of value - assert.Errorf(t, checker.PenaltyCheck(ctx, msg), "funds") - - msg = newMessage(t, alice, bob, 100, 5, 100000, 200) // lots of expensive gas - assert.Errorf(t, checker.PenaltyCheck(ctx, msg), "funds") - }) - - t.Run("low nonce", func(t *testing.T) { - msg := newMessage(t, alice, bob, 99, 5, 1, 0) - assert.Errorf(t, checker.PenaltyCheck(ctx, msg), "too low") - }) - - t.Run("high nonce", func(t *testing.T) { - msg := newMessage(t, alice, bob, 101, 5, 1, 0) - assert.Errorf(t, checker.PenaltyCheck(ctx, msg), "too high") - }) -} - -func TestBLSSignatureValidationConfiguration(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - - // create bls address - pubKey := bls.PrivateKeyPublicKey(bls.PrivateKeyGenerate()) - from, err := address.NewBLSAddress(pubKey[:]) - require.NoError(t, err) - - msg := types.NewMeteredMessage(from, addresses[1], 0, types.ZeroAttoFIL, methodID, []byte("params"), types.NewGasPrice(1), gas.NewGas(300)) - unsigned := &types.SignedMessage{Message: *msg} - actor := newActor(t, 1000, 0) - - t.Run("syntax validator does not ignore missing signature", func(t *testing.T) { - api := NewMockIngestionValidatorAPI() - api.ActorAddr = from - api.Actor = actor - - validator := consensus.NewMessageSignatureValidator(api) - - err := validator.Validate(ctx, unsigned) - require.Error(t, err) - assert.Contains(t, err.Error(), "invalid signature") - }) -} - -func TestMessageSyntaxValidator(t *testing.T) { - tf.UnitTest(t) - var signer = types.NewMockSigner(keys) - alice := addresses[0] - bob := addresses[1] - - validator := consensus.NewMessageSyntaxValidator() - ctx := context.Background() - - t.Run("Actor not found is not an error", func(t *testing.T) { - msg, err := types.NewSignedMessage(ctx, *newMessage(t, bob, alice, 0, 0, 1, 5000), signer) - require.NoError(t, err) - assert.NoError(t, validator.ValidateSignedMessageSyntax(ctx, msg)) - }) - - t.Run("self send passes", func(t *testing.T) { - msg, err := types.NewSignedMessage(ctx, *newMessage(t, alice, alice, 100, 5, 1, 5000), signer) - require.NoError(t, err) - assert.NoError(t, validator.ValidateSignedMessageSyntax(ctx, msg), "self") - }) - - t.Run("negative value fails", func(t *testing.T) { - msg, err := types.NewSignedMessage(ctx, *newMessage(t, alice, alice, 100, -5, 1, 5000), signer) - require.NoError(t, err) - assert.Errorf(t, validator.ValidateSignedMessageSyntax(ctx, msg), "negative") - }) - - t.Run("block gas limit fails", func(t *testing.T) { - msg, err := types.NewSignedMessage(ctx, *newMessage(t, alice, bob, 100, 5, 1, types.BlockGasLimit+1), signer) - require.NoError(t, err) - assert.Errorf(t, validator.ValidateSignedMessageSyntax(ctx, msg), "block limit") - }) - -} - -func newActor(t *testing.T, balanceAF int, nonce uint64) *actor.Actor { - actor := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(int64(balanceAF)), cid.Undef) - actor.CallSeqNum = nonce - return actor -} - -func newMessage(t *testing.T, from, to address.Address, nonce uint64, valueAF int, - gasPrice int64, gasLimit gas.Unit) *types.UnsignedMessage { - val, ok := types.NewAttoFILFromString(fmt.Sprintf("%d", valueAF), 10) - require.True(t, ok, "invalid attofil") - return types.NewMeteredMessage( - from, - to, - nonce, - val, - methodID, - []byte("params"), - types.NewGasPrice(gasPrice), - gasLimit, - ) -} - -// FakeIngestionValidatorAPI provides a latest state -type FakeIngestionValidatorAPI struct { - ActorAddr address.Address - Actor *actor.Actor -} - -// NewMockIngestionValidatorAPI creates a new FakeIngestionValidatorAPI. -func NewMockIngestionValidatorAPI() *FakeIngestionValidatorAPI { - return &FakeIngestionValidatorAPI{Actor: &actor.Actor{}} -} - -func (api *FakeIngestionValidatorAPI) Head() block.TipSetKey { - return block.NewTipSetKey() -} - -func (api *FakeIngestionValidatorAPI) GetActorAt(ctx context.Context, key block.TipSetKey, a address.Address) (*actor.Actor, error) { - if a == api.ActorAddr { - return api.Actor, nil - } - return &actor.Actor{ - Balance: abi.NewTokenAmount(0), - }, nil -} - -func (api *FakeIngestionValidatorAPI) AccountStateView(baseKey block.TipSetKey) (state.AccountStateView, error) { - return &state.FakeStateView{}, nil -} diff --git a/internal/pkg/consensus/weight_test.go b/internal/pkg/consensus/weight_test.go deleted file mode 100644 index 33282e212c..0000000000 --- a/internal/pkg/consensus/weight_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package consensus_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -func TestWeight(t *testing.T) { - cst := cbor.NewMemCborStore() - ctx := context.Background() - fakeTree := state.NewFromString(t, "test-Weight-StateCid", cst) - fakeRoot, err := fakeTree.Commit(ctx) - require.NoError(t, err) - // We only care about total power for the weight function - // Total is 16, so bitlen is 5, log2b is 4 - viewer := makeStateViewer(fakeRoot, abi.NewStoragePower(16)) - ticket := consensus.MakeFakeTicketForTest() - toWeigh := block.RequireNewTipSet(t, &block.Block{ - ParentWeight: fbig.Zero(), - Ticket: ticket, - }) - sel := consensus.NewChainSelector(cst, &viewer, types.CidFromString(t, "genesisCid")) - - t.Run("basic happy path", func(t *testing.T) { - // 0 + (4*256 + (4*1*1*256/5*2)) - // 1024 + 102 = 1126 - w, err := sel.Weight(ctx, toWeigh, fakeRoot) - assert.NoError(t, err) - assert.Equal(t, fbig.NewInt(1126), w) - }) - - t.Run("total power adjusts as expected", func(t *testing.T) { - asLowerX := makeStateViewer(fakeRoot, abi.NewStoragePower(15)) - asSameX := makeStateViewer(fakeRoot, abi.NewStoragePower(31)) - asHigherX := makeStateViewer(fakeRoot, abi.NewStoragePower(32)) - - // 0 + (3*256) + (3*1*1*256/2*5) = 844 (truncating not rounding division) - selLower := consensus.NewChainSelector(cst, &asLowerX, types.CidFromString(t, "genesisCid")) - fixWeight, err := selLower.Weight(ctx, toWeigh, fakeRoot) - assert.NoError(t, err) - assert.Equal(t, fbig.NewInt(844), fixWeight) - - // Weight is same when total bytes = 16 as when total bytes = 31 - selSame := consensus.NewChainSelector(cst, &asSameX, types.CidFromString(t, "genesisCid")) - fixWeight, err = selSame.Weight(ctx, toWeigh, fakeRoot) - assert.NoError(t, err) - assert.Equal(t, fbig.NewInt(1126), fixWeight) - - // 0 + (5*256) + (5*1*1*256/2*5) = 1408 - selHigher := consensus.NewChainSelector(cst, &asHigherX, types.CidFromString(t, "genesisCid")) - fixWeight, err = selHigher.Weight(ctx, toWeigh, fakeRoot) - assert.NoError(t, err) - assert.Equal(t, fbig.NewInt(1408), fixWeight) - }) - - t.Run("non-zero parent weight", func(t *testing.T) { - parentWeight := fbig.NewInt(int64(49)) - toWeighWithParent := block.RequireNewTipSet(t, &block.Block{ - ParentWeight: parentWeight, - Ticket: ticket, - }) - - // 49 + (4*256) + (4*1*1*256/2*5) = 1175 - w, err := sel.Weight(ctx, toWeighWithParent, fakeRoot) - assert.NoError(t, err) - assert.Equal(t, fbig.NewInt(1175), w) - }) - - t.Run("many blocks", func(t *testing.T) { - toWeighThreeBlock := block.RequireNewTipSet(t, - &block.Block{ - ParentWeight: fbig.Zero(), - Ticket: ticket, - Timestamp: 0, - }, - &block.Block{ - ParentWeight: fbig.Zero(), - Ticket: ticket, - Timestamp: 1, - }, - &block.Block{ - ParentWeight: fbig.Zero(), - Ticket: ticket, - Timestamp: 2, - }, - ) - // 0 + (4*256) + (4*3*1*256/2*5) = 1331 - w, err := sel.Weight(ctx, toWeighThreeBlock, fakeRoot) - assert.NoError(t, err) - assert.Equal(t, fbig.NewInt(1331), w) - }) -} - -func makeStateViewer(stateRoot cid.Cid, networkPower abi.StoragePower) consensus.FakeConsensusStateViewer { - return consensus.FakeConsensusStateViewer{ - Views: map[cid.Cid]*appstate.FakeStateView{ - stateRoot: appstate.NewFakeStateView(networkPower, networkPower, 0, 0), - }, - } -} diff --git a/internal/pkg/constants/registered_proofs.go b/internal/pkg/constants/registered_proofs.go deleted file mode 100644 index 02726e305b..0000000000 --- a/internal/pkg/constants/registered_proofs.go +++ /dev/null @@ -1,8 +0,0 @@ -package constants - -import "github.com/filecoin-project/specs-actors/actors/abi" - -var DevRegisteredSealProof = abi.RegisteredProof_StackedDRG2KiBSeal - -var DevRegisteredWinningPoStProof = abi.RegisteredProof_StackedDRG2KiBWinningPoSt -var DevRegisteredWindowPoStProof = abi.RegisteredProof_StackedDRG2KiBWindowPoSt diff --git a/internal/pkg/crypto/crypto.go b/internal/pkg/crypto/crypto.go deleted file mode 100644 index 16c57b847d..0000000000 --- a/internal/pkg/crypto/crypto.go +++ /dev/null @@ -1,120 +0,0 @@ -package crypto - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "fmt" - "io" - - secp256k1 "github.com/ipsn/go-secp256k1" - - bls "github.com/filecoin-project/filecoin-ffi" -) - -// -// Abstract SECP and BLS crypto operations. -// - -// PrivateKeyBytes is the size of a serialized private key. -const PrivateKeyBytes = 32 - -// PublicKeyBytes is the size of a serialized public key. -const PublicKeyBytes = 65 - -// PublicKeyForSecpSecretKey returns the public key for this private key. -func PublicKeyForSecpSecretKey(sk []byte) []byte { - x, y := secp256k1.S256().ScalarBaseMult(sk) - return elliptic.Marshal(secp256k1.S256(), x, y) -} - -// SignSecp signs the given message using secp256k1 based cryptography, which must be 32 bytes long. -func SignSecp(sk, msg []byte) ([]byte, error) { - return secp256k1.Sign(msg, sk) -} - -// SignBLS signs the given message with BLS. -func SignBLS(sk, msg []byte) ([]byte, error) { - var privateKey bls.PrivateKey - copy(privateKey[:], sk) - sig := bls.PrivateKeySign(privateKey, msg) - return sig[:], nil -} - -// VerifySecp checks the given signature is a secp256k1 signature and returns true if it is valid. -func VerifySecp(pk, msg, signature []byte) bool { - if len(signature) == 65 { - // Drop the V (1byte) in [R | S | V] style signatures. - // The V (1byte) is the recovery bit and is not apart of the signature verification. - return secp256k1.VerifySignature(pk[:], msg, signature[:len(signature)-1]) - } - - return secp256k1.VerifySignature(pk[:], msg, signature) -} - -// VerifyBLS checks the given signature is valid using BLS cryptography. -func VerifyBLS(pubKey, msg, signature []byte) bool { - var blsSig bls.Signature - copy(blsSig[:], signature) - var blsPubKey bls.PublicKey - copy(blsPubKey[:], pubKey) - return bls.Verify(&blsSig, []bls.Digest{bls.Hash(msg)}, []bls.PublicKey{blsPubKey}) -} - -// VerifyBLSAggregate checks the given signature is a valid aggregate signature over all messages and public keys -func VerifyBLSAggregate(pubKeys, msgs [][]byte, signature []byte) bool { - digests := []bls.Digest{} - for _, msg := range msgs { - digests = append(digests, bls.Hash(msg)) - } - - keys := []bls.PublicKey{} - for _, pubKey := range pubKeys { - var blsPubKey bls.PublicKey - copy(blsPubKey[:], pubKey) - keys = append(keys, blsPubKey) - } - - var blsSig bls.Signature - copy(blsSig[:], signature) - return bls.Verify(&blsSig, digests, keys) -} - -// NewSecpKeyFromSeed generates a new key from the given reader. -func NewSecpKeyFromSeed(seed io.Reader) (KeyInfo, error) { - key, err := ecdsa.GenerateKey(secp256k1.S256(), seed) - if err != nil { - return KeyInfo{}, err - } - - privkey := make([]byte, PrivateKeyBytes) - blob := key.D.Bytes() - - // the length is guaranteed to be fixed, given the serialization rules for secp2561k curve points. - copy(privkey[PrivateKeyBytes-len(blob):], blob) - - return KeyInfo{ - PrivateKey: privkey, - SigType: SigTypeSecp256k1, - }, nil -} - -func NewBLSKeyFromSeed(seed io.Reader) (KeyInfo, error) { - var seedBytes bls.PrivateKeyGenSeed - read, err := seed.Read(seedBytes[:]) - if err != nil { - return KeyInfo{}, err - } - if read != len(seedBytes) { - return KeyInfo{}, fmt.Errorf("read only %d bytes of %d required from seed", read, len(seedBytes)) - } - k := bls.PrivateKeyGenerateWithSeed(seedBytes) - return KeyInfo{ - PrivateKey: k[:], - SigType: SigTypeBLS, - }, nil -} - -// EcRecover recovers the public key from a message, signature pair. -func EcRecover(msg, signature []byte) ([]byte, error) { - return secp256k1.RecoverPubkey(msg, signature) -} diff --git a/internal/pkg/crypto/crypto_test.go b/internal/pkg/crypto/crypto_test.go deleted file mode 100644 index 10b1e006c1..0000000000 --- a/internal/pkg/crypto/crypto_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package crypto_test - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestGenerateSecpKey(t *testing.T) { - tf.UnitTest(t) - - token := bytes.Repeat([]byte{42}, 512) - ki, err := crypto.NewSecpKeyFromSeed(bytes.NewReader(token)) - assert.NoError(t, err) - sk := ki.PrivateKey - - assert.Equal(t, len(sk), 32) - - msg := make([]byte, 32) - for i := 0; i < len(msg); i++ { - msg[i] = byte(i) - } - - digest, err := crypto.SignSecp(sk, msg) - assert.NoError(t, err) - assert.Equal(t, len(digest), 65) - pk := crypto.PublicKeyForSecpSecretKey(sk) - - // valid signature - assert.True(t, crypto.VerifySecp(pk, msg, digest)) - - // invalid signature - different message (too short) - assert.False(t, crypto.VerifySecp(pk, msg[3:], digest)) - - // invalid signature - different message - msg2 := make([]byte, 32) - copy(msg2, msg) - msg2[0] = 42 - assert.False(t, crypto.VerifySecp(pk, msg2, digest)) - - // invalid signature - different digest - digest2 := make([]byte, 65) - copy(digest2, digest) - digest2[0] = 42 - assert.False(t, crypto.VerifySecp(pk, msg, digest2)) - - // invalid signature - digest too short - assert.False(t, crypto.VerifySecp(pk, msg, digest[3:])) - assert.False(t, crypto.VerifySecp(pk, msg, digest[:29])) - - // invalid signature - digest too long - digest3 := make([]byte, 70) - copy(digest3, digest) - assert.False(t, crypto.VerifySecp(pk, msg, digest3)) - - recovered, err := crypto.EcRecover(msg, digest) - assert.NoError(t, err) - assert.Equal(t, recovered, crypto.PublicKeyForSecpSecretKey(sk)) -} - -func TestBLSSigning(t *testing.T) { - privateKey := bls.PrivateKeyGenerate() - data := []byte("data to be signed") - - signature, err := crypto.SignBLS(privateKey[:], data) - require.NoError(t, err) - - publicKey := bls.PrivateKeyPublicKey(privateKey) - - valid := crypto.VerifyBLS(publicKey[:], data, signature) - require.True(t, valid) - - // invalid signature fails - valid = crypto.VerifyBLS(publicKey[:], data, signature[3:]) - require.False(t, valid) - - // invalid digest fails - valid = crypto.VerifyBLS(publicKey[:], data[3:], signature) - require.False(t, valid) - -} diff --git a/internal/pkg/crypto/keyinfo.go b/internal/pkg/crypto/keyinfo.go deleted file mode 100644 index c26eab9fa4..0000000000 --- a/internal/pkg/crypto/keyinfo.go +++ /dev/null @@ -1,80 +0,0 @@ -package crypto - -import ( - "bytes" - - "github.com/filecoin-project/go-address" - "github.com/pkg/errors" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -// KeyInfo is a key and its type used for signing. -type KeyInfo struct { - // Private key. - PrivateKey []byte `json:"privateKey"` - // Cryptographic system used to generate private key. - SigType SigType `json:"sigType"` -} - -// Unmarshal decodes raw cbor bytes into KeyInfo. -func (ki *KeyInfo) Unmarshal(b []byte) error { - return encoding.Decode(b, ki) -} - -// Marshal KeyInfo into bytes. -func (ki *KeyInfo) Marshal() ([]byte, error) { - return encoding.Encode(ki) -} - -// Key returns the private key of KeyInfo -func (ki *KeyInfo) Key() []byte { - return ki.PrivateKey -} - -// Type returns the type of curve used to generate the private key -func (ki *KeyInfo) Type() SigType { - return ki.SigType -} - -// Equals returns true if the KeyInfo is equal to other. -func (ki *KeyInfo) Equals(other *KeyInfo) bool { - if ki == nil && other == nil { - return true - } - if ki == nil || other == nil { - return false - } - if ki.SigType != other.SigType { - return false - } - - return bytes.Equal(ki.PrivateKey, other.PrivateKey) -} - -// Address returns the address for this keyinfo -func (ki *KeyInfo) Address() (address.Address, error) { - if ki.SigType == SigTypeBLS { - return address.NewBLSAddress(ki.PublicKey()) - } - if ki.SigType == SigTypeSecp256k1 { - return address.NewSecp256k1Address(ki.PublicKey()) - } - return address.Undef, errors.Errorf("can not generate address for unknown crypto system: %d", ki.SigType) -} - -// Returns the public key part as uncompressed bytes. -func (ki *KeyInfo) PublicKey() []byte { - if ki.SigType == SigTypeBLS { - var blsPrivateKey bls.PrivateKey - copy(blsPrivateKey[:], ki.PrivateKey) - publicKey := bls.PrivateKeyPublicKey(blsPrivateKey) - - return publicKey[:] - } - if ki.SigType == SigTypeSecp256k1 { - return PublicKeyForSecpSecretKey(ki.PrivateKey) - } - return []byte{} -} diff --git a/internal/pkg/crypto/keyinfo_test.go b/internal/pkg/crypto/keyinfo_test.go deleted file mode 100644 index 7492858365..0000000000 --- a/internal/pkg/crypto/keyinfo_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package crypto_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestKeyInfoMarshal(t *testing.T) { - tf.UnitTest(t) - - ki := crypto.KeyInfo{ - PrivateKey: []byte{1, 2, 3, 4}, - SigType: crypto.SigTypeSecp256k1, - } - - marshaled, err := ki.Marshal() - assert.NoError(t, err) - - kiBack := &crypto.KeyInfo{} - err = kiBack.Unmarshal(marshaled) - assert.NoError(t, err) - - assert.Equal(t, ki.Key(), kiBack.Key()) - assert.Equal(t, ki.Type(), kiBack.Type()) - assert.True(t, ki.Equals(kiBack)) -} diff --git a/internal/pkg/crypto/randomness.go b/internal/pkg/crypto/randomness.go deleted file mode 100644 index 80405d22f4..0000000000 --- a/internal/pkg/crypto/randomness.go +++ /dev/null @@ -1,83 +0,0 @@ -package crypto - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/minio/blake2b-simd" - "github.com/pkg/errors" -) - -type RandomSeed []byte - -///// Chain sampling ///// - -type ChainSampler interface { - Sample(ctx context.Context, epoch abi.ChainEpoch) (RandomSeed, error) -} - -// A sampler for use when computing genesis state (the state that the genesis block points to as parent state). -// There is no chain to sample a seed from. -type GenesisSampler struct { - VRFProof VRFPi -} - -func (g *GenesisSampler) Sample(_ context.Context, epoch abi.ChainEpoch) (RandomSeed, error) { - if epoch > 0 { - return nil, fmt.Errorf("invalid use of genesis sampler for epoch %d", epoch) - } - return MakeRandomSeed(g.VRFProof) -} - -// Computes a random seed from raw ticket bytes. -// A randomness seed is the VRF digest of the minimum ticket of the tipset at or before the requested epoch -func MakeRandomSeed(rawVRFProof VRFPi) (RandomSeed, error) { - digest := rawVRFProof.Digest() - return digest[:], nil -} - -///// Randomness derivation ///// - -// RandomnessSource provides randomness to actors. -type RandomnessSource interface { - Randomness(ctx context.Context, tag crypto.DomainSeparationTag, epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) -} - -// A randomness source that seeds computations with a sample drawn from a chain epoch. -type ChainRandomnessSource struct { - Sampler ChainSampler -} - -func (c *ChainRandomnessSource) Randomness(ctx context.Context, tag crypto.DomainSeparationTag, epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - seed, err := c.Sampler.Sample(ctx, epoch) - if err != nil { - return nil, errors.Wrap(err, "failed to sample chain for randomness") - } - return BlendEntropy(tag, seed, epoch, entropy) -} - -func BlendEntropy(tag crypto.DomainSeparationTag, seed RandomSeed, epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - buffer := bytes.Buffer{} - err := binary.Write(&buffer, binary.BigEndian, int64(tag)) - if err != nil { - return nil, errors.Wrap(err, "failed to write tag for randomness") - } - _, err = buffer.Write(seed) - if err != nil { - return nil, errors.Wrap(err, "failed to write seed for randomness") - } - err = binary.Write(&buffer, binary.BigEndian, int64(epoch)) - if err != nil { - return nil, errors.Wrap(err, "failed to write epoch for randomness") - } - _, err = buffer.Write(entropy) - if err != nil { - return nil, errors.Wrap(err, "failed to write entropy for randomness") - } - bufHash := blake2b.Sum256(buffer.Bytes()) - return bufHash[:], nil -} diff --git a/internal/pkg/crypto/signatures.go b/internal/pkg/crypto/signatures.go deleted file mode 100644 index 325edff5e3..0000000000 --- a/internal/pkg/crypto/signatures.go +++ /dev/null @@ -1,86 +0,0 @@ -package crypto - -import ( - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/minio/blake2b-simd" -) - -// -// Address-based signature validation -// - -type Signature = crypto.Signature -type SigType = crypto.SigType - -const ( - SigTypeSecp256k1 = crypto.SigTypeSecp256k1 - SigTypeBLS = crypto.SigTypeBLS -) - -func Sign(data []byte, secretKey []byte, sigtype SigType) (Signature, error) { - var signature []byte - var err error - if sigtype == SigTypeSecp256k1 { - hash := blake2b.Sum256(data) - signature, err = SignSecp(secretKey, hash[:]) - } else if sigtype == SigTypeBLS { - signature, err = SignBLS(secretKey, data) - } else { - err = fmt.Errorf("unknown signature type %d", sigtype) - } - return Signature{ - Type: sigtype, - Data: signature, - }, err -} - -// ValidateSignature cryptographically verifies that 'sig' is the signed hash of 'data' with -// the public key belonging to `addr`. -func ValidateSignature(data []byte, addr address.Address, sig Signature) error { - switch addr.Protocol() { - case address.SECP256K1: - if sig.Type != SigTypeSecp256k1 { - return fmt.Errorf("incorrect signature type (%v) for address expected SECP256K1 signature", sig.Type) - } - return ValidateSecpSignature(data, addr, sig.Data) - case address.BLS: - if sig.Type != SigTypeBLS { - return fmt.Errorf("incorrect signature type (%v) for address expected BLS signature", sig.Type) - } - return ValidateBlsSignature(data, addr, sig.Data) - default: - return fmt.Errorf("incorrect address protocol (%v) for signature validation", addr.Protocol()) - } -} - -func ValidateSecpSignature(data []byte, addr address.Address, signature []byte) error { - if addr.Protocol() != address.SECP256K1 { - return fmt.Errorf("address protocol (%v) invalid for SECP256K1 signature verification", addr.Protocol()) - } - hash := blake2b.Sum256(data) - maybePk, err := EcRecover(hash[:], signature) - if err != nil { - return err - } - maybeAddr, err := address.NewSecp256k1Address(maybePk) - if err != nil { - return err - } - if maybeAddr != addr { - return fmt.Errorf("invalid SECP signature") - } - return nil -} - -func ValidateBlsSignature(data []byte, addr address.Address, signature []byte) error { - if addr.Protocol() != address.BLS { - return fmt.Errorf("address protocol (%v) invalid for BLS signature verification", addr.Protocol()) - } - if valid := VerifyBLS(addr.Payload(), data, signature); !valid { - return fmt.Errorf("invalid BLS signature") - } - return nil -} diff --git a/internal/pkg/crypto/vrf.go b/internal/pkg/crypto/vrf.go deleted file mode 100644 index 751a85c34a..0000000000 --- a/internal/pkg/crypto/vrf.go +++ /dev/null @@ -1,18 +0,0 @@ -package crypto - -import "github.com/minio/blake2b-simd" - -// VRFPi is the proof output from running a VRF. -type VRFPi []byte - -type ElectionProof struct { - _ struct{} `cbor:",toarray"` - // A proof output by running a VRF on the VRFProof of the parent ticket - VRFProof VRFPi -} - -// Digest returns the digest (hash) of a proof, for use generating challenges etc. -func (p VRFPi) Digest() [32]byte { - proofDigest := blake2b.Sum256(p) - return proofDigest -} diff --git a/internal/pkg/discovery/bootstrap.go b/internal/pkg/discovery/bootstrap.go deleted file mode 100644 index c9888a23dd..0000000000 --- a/internal/pkg/discovery/bootstrap.go +++ /dev/null @@ -1,170 +0,0 @@ -package discovery - -import ( - "context" - "math/rand" - "sync" - "time" - - logging "github.com/ipfs/go-log/v2" - host "github.com/libp2p/go-libp2p-core/host" - inet "github.com/libp2p/go-libp2p-core/network" - peer "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/routing" - dht "github.com/libp2p/go-libp2p-kad-dht" - - "github.com/filecoin-project/go-filecoin/internal/pkg/util/moresync" -) - -var logBootstrap = logging.Logger("net.bootstrap") -var filecoinDHTBootstrapConfig = dht.BootstrapConfig{ - // Recommended initial options from issu #1947 - Queries: 2, - Period: 5 * time.Minute, - Timeout: time.Minute, -} - -// Bootstrapper attempts to keep the p2p host connected to the filecoin network -// by keeping a minimum threshold of connections. If the threshold isn't met it -// connects to a random subset of the bootstrap peers. It does not use peer routing -// to discover new peers. To stop a Bootstrapper cancel the context passed in Start() -// or call Stop(). -type Bootstrapper struct { - // Config - // MinPeerThreshold is the number of connections it attempts to maintain. - MinPeerThreshold int - // Peers to connect to if we fall below the threshold. - bootstrapPeers []peer.AddrInfo - // Period is the interval at which it periodically checks to see - // if the threshold is maintained. - Period time.Duration - // ConnectionTimeout is how long to wait before timing out a connection attempt. - ConnectionTimeout time.Duration - - // Dependencies - h host.Host - d inet.Dialer - r routing.Routing - // Does the work. Usually Bootstrapper.bootstrap. Argument is a slice of - // currently-connected peers (so it won't attempt to reconnect). - Bootstrap func([]peer.ID) - - // Bookkeeping - ticker *time.Ticker - ctx context.Context - cancel context.CancelFunc - dhtBootStarted bool - filecoinPeers *moresync.Latch -} - -// NewBootstrapper returns a new Bootstrapper that will attempt to keep connected -// to the filecoin network by connecting to the given bootstrap peers. -func NewBootstrapper(bootstrapPeers []peer.AddrInfo, h host.Host, d inet.Dialer, r routing.Routing, minPeer int, period time.Duration) *Bootstrapper { - b := &Bootstrapper{ - MinPeerThreshold: minPeer, - bootstrapPeers: bootstrapPeers, - Period: period, - ConnectionTimeout: 20 * time.Second, - - h: h, - d: d, - r: r, - - filecoinPeers: moresync.NewLatch(uint(minPeer)), - } - b.Bootstrap = b.bootstrap - return b -} - -// Start starts the Bootstrapper bootstrapping. Cancel `ctx` or call Stop() to stop it. -func (b *Bootstrapper) Start(ctx context.Context) { - b.ctx, b.cancel = context.WithCancel(ctx) - b.ticker = time.NewTicker(b.Period) - - go func() { - defer b.ticker.Stop() - - for { - select { - case <-b.ctx.Done(): - return - case <-b.ticker.C: - b.Bootstrap(b.d.Peers()) - } - } - }() -} - -// Stop stops the Bootstrapper. -func (b *Bootstrapper) Stop() { - if b.cancel != nil { - b.cancel() - } -} - -// bootstrap does the actual work. If the number of connected peers -// has fallen below b.MinPeerThreshold it will attempt to connect to -// a random subset of its bootstrap peers. -func (b *Bootstrapper) bootstrap(currentPeers []peer.ID) { - peersNeeded := b.MinPeerThreshold - len(currentPeers) - if peersNeeded < 1 { - return - } - - ctx, cancel := context.WithTimeout(b.ctx, b.ConnectionTimeout) - var wg sync.WaitGroup - defer func() { - wg.Wait() - // After connecting to bootstrap peers, bootstrap the DHT. - // DHT Bootstrap is a persistent process so only do this once. - if !b.dhtBootStarted { - b.dhtBootStarted = true - err := b.bootstrapIpfsRouting() - if err != nil { - logBootstrap.Warnf("got error trying to bootstrap Routing: %s. Peer discovery may suffer.", err.Error()) - } - } - cancel() - }() - - peersAttempted := 0 - for _, i := range rand.Perm(len(b.bootstrapPeers)) { - pinfo := b.bootstrapPeers[i] - // Don't try to connect to an already connected peer. - if hasPID(currentPeers, pinfo.ID) { - continue - } - - wg.Add(1) - go func() { - if err := b.h.Connect(ctx, pinfo); err != nil { - logBootstrap.Errorf("got error trying to connect to bootstrap node %+v: %s", pinfo, err.Error()) - } - wg.Done() - }() - peersAttempted++ - if peersAttempted == peersNeeded { - return - } - } - logBootstrap.Warnf("not enough bootstrap nodes to maintain %d connections (current connections: %d)", b.MinPeerThreshold, len(currentPeers)) -} - -func hasPID(pids []peer.ID, pid peer.ID) bool { - for _, p := range pids { - if p == pid { - return true - } - } - return false -} - -func (b *Bootstrapper) bootstrapIpfsRouting() error { - dht, ok := b.r.(*dht.IpfsDHT) - if !ok { - // No bootstrapping to do exit quietly. - return nil - } - - return dht.BootstrapWithConfig(b.ctx, filecoinDHTBootstrapConfig) -} diff --git a/internal/pkg/discovery/bootstrap_test.go b/internal/pkg/discovery/bootstrap_test.go deleted file mode 100644 index 19fd635dbf..0000000000 --- a/internal/pkg/discovery/bootstrap_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package discovery - -import ( - "context" - "sync" - "testing" - "time" - - offroute "github.com/ipfs/go-ipfs-routing/offline" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func panicConnect(_ context.Context, _ peer.AddrInfo) error { panic("shouldn't be called") } -func nopPeers() []peer.ID { return []peer.ID{} } -func panicPeers() []peer.ID { panic("shouldn't be called") } - -type blankValidator struct{} - -func (blankValidator) Validate(_ string, _ []byte) error { return nil } -func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } - -func TestBootstrapperStartAndStop(t *testing.T) { - tf.UnitTest(t) - - fakeHost := th.NewFakeHost() - fakeDialer := &th.FakeDialer{PeersImpl: nopPeers} - fakeRouter := offroute.NewOfflineRouter(repo.NewInMemoryRepo().Datastore(), blankValidator{}) - - // Check that Start() causes Bootstrap() to be periodically called and - // that canceling the context causes it to stop being called. Do this - // by stubbing out Bootstrap to keep a count of the number of times it - // is called and to cancel its context after several calls. - b := NewBootstrapper([]peer.AddrInfo{}, fakeHost, fakeDialer, fakeRouter, 0, 200*time.Millisecond) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // protects callCount - var lk sync.Mutex - callCount := 0 - b.Bootstrap = func([]peer.ID) { - lk.Lock() - defer lk.Unlock() - callCount++ - if callCount == 3 { - - // If b.Period is configured to be a too small, b.ticker will tick - // again before the context's done-channel sees a value. This - // results in a callCount of 4 instead of 3. - cancel() - } - } - - b.Start(ctx) - time.Sleep(1000 * time.Millisecond) - - lk.Lock() - defer lk.Unlock() - assert.Equal(t, 3, callCount) -} - -func TestBootstrapperBootstrap(t *testing.T) { - tf.UnitTest(t) - - t.Run("Doesn't connect if already have enough peers", func(t *testing.T) { - fakeHost := &th.FakeHost{ConnectImpl: panicConnect} - fakeDialer := &th.FakeDialer{PeersImpl: panicPeers} - fakeRouter := offroute.NewOfflineRouter(repo.NewInMemoryRepo().Datastore(), blankValidator{}) - ctx := context.Background() - - b := NewBootstrapper([]peer.AddrInfo{}, fakeHost, fakeDialer, fakeRouter, 1, time.Minute) - currentPeers := []peer.ID{th.RequireRandomPeerID(t)} // Have 1 - b.ctx = ctx - assert.NotPanics(t, func() { b.bootstrap(currentPeers) }) - }) - - var lk sync.Mutex - var connectCount int - countingConnect := func(context.Context, peer.AddrInfo) error { - lk.Lock() - defer lk.Unlock() - connectCount++ - return nil - } - - t.Run("Connects if don't have enough peers", func(t *testing.T) { - fakeHost := &th.FakeHost{ConnectImpl: countingConnect} - lk.Lock() - connectCount = 0 - lk.Unlock() - fakeDialer := &th.FakeDialer{PeersImpl: panicPeers} - fakeRouter := offroute.NewOfflineRouter(repo.NewInMemoryRepo().Datastore(), blankValidator{}) - - bootstrapPeers := []peer.AddrInfo{ - {ID: th.RequireRandomPeerID(t)}, - {ID: th.RequireRandomPeerID(t)}, - } - b := NewBootstrapper(bootstrapPeers, fakeHost, fakeDialer, fakeRouter, 3, time.Minute) - b.ctx = context.Background() - currentPeers := []peer.ID{th.RequireRandomPeerID(t)} // Have 1 - b.bootstrap(currentPeers) - time.Sleep(20 * time.Millisecond) - lk.Lock() - assert.Equal(t, 2, connectCount) - lk.Unlock() - }) - - t.Run("Doesn't try to connect to an already connected peer", func(t *testing.T) { - fakeHost := &th.FakeHost{ConnectImpl: countingConnect} - lk.Lock() - connectCount = 0 - lk.Unlock() - fakeDialer := &th.FakeDialer{PeersImpl: panicPeers} - fakeRouter := offroute.NewOfflineRouter(repo.NewInMemoryRepo().Datastore(), blankValidator{}) - - connectedPeerID := th.RequireRandomPeerID(t) - bootstrapPeers := []peer.AddrInfo{ - {ID: connectedPeerID}, - } - - b := NewBootstrapper(bootstrapPeers, fakeHost, fakeDialer, fakeRouter, 2, time.Minute) // Need 2 bootstrap peers. - b.ctx = context.Background() - currentPeers := []peer.ID{connectedPeerID} // Have 1, which is the bootstrap peer. - b.bootstrap(currentPeers) - time.Sleep(20 * time.Millisecond) - lk.Lock() - assert.Equal(t, 0, connectCount) - lk.Unlock() - }) -} diff --git a/internal/pkg/discovery/hello_protocol.go b/internal/pkg/discovery/hello_protocol.go deleted file mode 100644 index 283885eac4..0000000000 --- a/internal/pkg/discovery/hello_protocol.go +++ /dev/null @@ -1,284 +0,0 @@ -package discovery - -import ( - "context" - "fmt" - "io/ioutil" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - net "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - ma "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" -) - -var log = logging.Logger("/fil/hello") - -// helloProtocolID is the libp2p protocol identifier for the hello protocol. -const helloProtocolID = "/fil/hello/1.0.0" - -var genesisErrCt = metrics.NewInt64Counter("hello_genesis_error", "Number of errors encountered in hello protocol due to incorrect genesis block") -var helloMsgErrCt = metrics.NewInt64Counter("hello_message_error", "Number of errors encountered in hello protocol due to malformed message") - -// HelloMessage is the data structure of a single message in the hello protocol. -type HelloMessage struct { - _ struct{} `cbor:",toarray"` - HeaviestTipSetCids block.TipSetKey - HeaviestTipSetHeight abi.ChainEpoch - HeaviestTipSetWeight fbig.Int - GenesisHash e.Cid -} - -// LatencyMessage is written in response to a hello message for measuring peer -// latency. -type LatencyMessage struct { - _ struct{} `cbor:",toarray"` - TArrival int64 - TSent int64 -} - -// HelloProtocolHandler implements the 'Hello' protocol handler. -// -// Upon connecting to a new node, we send them a message -// containing some information about the state of our chain, -// and receive the same information from them. This is used to -// initiate a chainsync and detect connections to forks. -type HelloProtocolHandler struct { - host host.Host - - genesis cid.Cid - - // peerDiscovered is called when new peers tell us about their chain - peerDiscovered peerDiscoveredCallback - - // is used to retrieve the current heaviest tipset - // for filling out our hello messages. - getHeaviestTipSet getTipSetFunc - - networkName string -} - -type peerDiscoveredCallback func(ci *block.ChainInfo) - -type getTipSetFunc func() (block.TipSet, error) - -// NewHelloProtocolHandler creates a new instance of the hello protocol `Handler` and registers it to -// the given `host.Host`. -func NewHelloProtocolHandler(h host.Host, gen cid.Cid, networkName string) *HelloProtocolHandler { - return &HelloProtocolHandler{ - host: h, - genesis: gen, - networkName: networkName, - } -} - -// Register registers the handler with the network. -func (h *HelloProtocolHandler) Register(peerDiscoveredCallback peerDiscoveredCallback, getHeaviestTipSet getTipSetFunc) { - // register callbacks - h.peerDiscovered = peerDiscoveredCallback - h.getHeaviestTipSet = getHeaviestTipSet - - // register a handle for when a new connection against someone is created - h.host.SetStreamHandler(helloProtocolID, h.handleNewStream) - - // register for connection notifications - h.host.Network().Notify((*helloProtocolNotifiee)(h)) -} - -func (h *HelloProtocolHandler) handleNewStream(s net.Stream) { - defer s.Close() // nolint: errcheck - ctx := context.Background() - hello, err := h.receiveHello(ctx, s) - if err != nil { - helloMsgErrCt.Inc(ctx, 1) - log.Debugf("failed to receive hello message:%s", err) - // can't process a hello received in error, but leave this connection - // open because we connections are innocent until proven guilty - // (with bad genesis) - return - } - latencyMsg := &LatencyMessage{TArrival: time.Now().UnixNano()} - - // process the hello message - from := s.Conn().RemotePeer() - ci, err := h.processHelloMessage(from, hello) - switch { - // no error - case err == nil: - // notify the local node of the new `block.ChainInfo` - h.peerDiscovered(ci) - // processing errors - case err == ErrBadGenesis: - log.Debugf("peer genesis cid: %s does not match ours: %s, disconnecting from peer: %s", &hello.GenesisHash, h.genesis, from) - genesisErrCt.Inc(context.Background(), 1) - _ = s.Conn().Close() - return - default: - // Note: we do not know why it failed, but we do not wish to shut down all protocols because of it - log.Error(err) - } - - // Send the latendy message - latencyMsg.TSent = time.Now().UnixNano() - err = h.sendLatency(latencyMsg, s) - if err != nil { - log.Error(err) - } - - return -} - -// ErrBadGenesis is the error returned when a mismatch in genesis blocks happens. -var ErrBadGenesis = fmt.Errorf("bad genesis block") - -func (h *HelloProtocolHandler) processHelloMessage(from peer.ID, msg *HelloMessage) (*block.ChainInfo, error) { - if !msg.GenesisHash.Equals(h.genesis) { - return nil, ErrBadGenesis - } - - // Note: both the sender and the source are the sender for the hello messages - return block.NewChainInfo(from, from, msg.HeaviestTipSetCids, msg.HeaviestTipSetHeight), nil -} - -func (h *HelloProtocolHandler) getOurHelloMessage() (*HelloMessage, error) { - heaviest, err := h.getHeaviestTipSet() - if err != nil { - return nil, err - } - height, err := heaviest.Height() - if err != nil { - return nil, err - } - weight, err := heaviest.ParentWeight() - if err != nil { - return nil, err - } - - return &HelloMessage{ - GenesisHash: e.NewCid(h.genesis), - HeaviestTipSetCids: heaviest.Key(), - HeaviestTipSetHeight: height, - HeaviestTipSetWeight: weight, - }, nil -} - -func (h *HelloProtocolHandler) receiveHello(ctx context.Context, s net.Stream) (*HelloMessage, error) { - var hello HelloMessage - // Read cbor bytes from stream into hello message - mr := cborutil.NewMsgReader(s) - err := mr.ReadMsg(&hello) - return &hello, err -} - -func (h *HelloProtocolHandler) receiveLatency(ctx context.Context, s net.Stream) (*LatencyMessage, error) { - var latency LatencyMessage - rawLatency, err := ioutil.ReadAll(s) - if err != nil { - return nil, err - } - err = encoding.Decode(rawLatency, &latency) - if err != nil { - return nil, err - } - return &latency, nil -} - -// sendHello send a hello message on stream `s`. -func (h *HelloProtocolHandler) sendHello(s net.Stream) error { - msg, err := h.getOurHelloMessage() - if err != nil { - return err - } - msgRaw, err := encoding.Encode(msg) - if err != nil { - return err - } - n, err := s.Write(msgRaw) - if err != nil { - return err - } - if n != len(msgRaw) { - return fmt.Errorf("could not write all hello message bytes") - } - return nil -} - -func (h *HelloProtocolHandler) sendLatency(msg *LatencyMessage, s net.Stream) error { - msgRaw, err := encoding.Encode(msg) - if err != nil { - return err - } - n, err := s.Write(msgRaw) - if err != nil { - return err - } - if n != len(msgRaw) { - return fmt.Errorf("could not write all latency message bytes") - } - return nil -} - -// Note: hide `net.Notifyee` impl using a new-type -type helloProtocolNotifiee HelloProtocolHandler - -const helloTimeout = time.Second * 10 - -func (hn *helloProtocolNotifiee) asHandler() *HelloProtocolHandler { - return (*HelloProtocolHandler)(hn) -} - -// -// `net.Notifyee` impl for `helloNotify` -// - -func (hn *helloProtocolNotifiee) Connected(n net.Network, c net.Conn) { - // Connected is invoked when a connection is made to a libp2p node. - // - // - open stream on connection - // - send HelloMessage` on stream - // - read LatencyMessage response on stream - // - // Terminate the connection if it has a different genesis block - go func() { - // add timeout - ctx, cancel := context.WithTimeout(context.Background(), helloTimeout) - defer cancel() - s, err := hn.asHandler().host.NewStream(ctx, c.RemotePeer(), helloProtocolID) - if err != nil { - // If peer does not do hello keep connection open - return - } - defer func() { _ = s.Close() }() - // send out the hello message - err = hn.asHandler().sendHello(s) - if err != nil { - log.Debugf("failed to send hello handshake to peer %s: %s", c.RemotePeer(), err) - // Don't close connection for failed hello protocol impl - return - } - - // now receive latency message - _, err = hn.asHandler().receiveLatency(ctx, s) - if err != nil { - log.Debugf("failed to receive hello latency msg from peer %s: %s", c.RemotePeer(), err) - return - } - - }() -} - -func (hn *helloProtocolNotifiee) Listen(n net.Network, a ma.Multiaddr) { /* empty */ } -func (hn *helloProtocolNotifiee) ListenClose(n net.Network, a ma.Multiaddr) { /* empty */ } -func (hn *helloProtocolNotifiee) Disconnected(n net.Network, c net.Conn) { /* empty */ } -func (hn *helloProtocolNotifiee) OpenedStream(n net.Network, s net.Stream) { /* empty */ } -func (hn *helloProtocolNotifiee) ClosedStream(n net.Network, s net.Stream) { /* empty */ } diff --git a/internal/pkg/discovery/hello_protocol_test.go b/internal/pkg/discovery/hello_protocol_test.go deleted file mode 100644 index 69b690d3a8..0000000000 --- a/internal/pkg/discovery/hello_protocol_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package discovery_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/discovery" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -type mockHelloCallback struct { - mock.Mock -} - -func (msb *mockHelloCallback) HelloCallback(ci *block.ChainInfo) { - msb.Called(ci.Sender, ci.Head, ci.Height) -} - -type mockHeaviestGetter struct { - heaviest block.TipSet -} - -func (mhg *mockHeaviestGetter) getHeaviestTipSet() (block.TipSet, error) { - return mhg.heaviest, nil -} - -func TestHelloHandshake(t *testing.T) { - tf.UnitTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - mn, err := mocknet.WithNPeers(ctx, 2) - require.NoError(t, err) - - a := mn.Hosts()[0] - b := mn.Hosts()[1] - - genesisA := &block.Block{} - - heavy1 := block.RequireNewTipSet(t, &block.Block{Height: 2, Ticket: block.Ticket{VRFProof: []byte{0}}}) - heavy2 := block.RequireNewTipSet(t, &block.Block{Height: 3, Ticket: block.Ticket{VRFProof: []byte{1}}}) - - msc1, msc2 := new(mockHelloCallback), new(mockHelloCallback) - hg1, hg2 := &mockHeaviestGetter{heavy1}, &mockHeaviestGetter{heavy2} - - discovery.NewHelloProtocolHandler(a, genesisA.Cid(), "").Register(msc1.HelloCallback, hg1.getHeaviestTipSet) - discovery.NewHelloProtocolHandler(b, genesisA.Cid(), "").Register(msc2.HelloCallback, hg2.getHeaviestTipSet) - - msc1.On("HelloCallback", b.ID(), heavy2.Key(), abi.ChainEpoch(3)).Return() - msc2.On("HelloCallback", a.ID(), heavy1.Key(), abi.ChainEpoch(2)).Return() - - require.NoError(t, mn.LinkAll()) - require.NoError(t, mn.ConnectAllButSelf()) - - require.NoError(t, th.WaitForIt(10, 50*time.Millisecond, func() (bool, error) { - var msc1Done bool - var msc2Done bool - for _, call := range msc1.Calls { - if call.Method == "HelloCallback" { - if _, differences := msc1.ExpectedCalls[0].Arguments.Diff(call.Arguments); differences == 0 { - msc1Done = true - break - } - } - } - for _, call := range msc2.Calls { - if call.Method == "HelloCallback" { - if _, differences := msc2.ExpectedCalls[0].Arguments.Diff(call.Arguments); differences == 0 { - msc2Done = true - break - } - } - } - - return msc1Done && msc2Done, nil - })) -} - -func TestHelloBadGenesis(t *testing.T) { - tf.UnitTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - mn, err := mocknet.WithNPeers(ctx, 2) - assert.NoError(t, err) - - a := mn.Hosts()[0] - b := mn.Hosts()[1] - - builder := chain.NewBuilder(t, address.Undef) - - genesisA := builder.AppendBlockOn(block.UndefTipSet) - genesisB := builder.AppendBlockOn(block.UndefTipSet) - - heavy1 := block.RequireNewTipSet(t, &block.Block{Height: 2, Ticket: block.Ticket{VRFProof: []byte{0}}}) - heavy2 := block.RequireNewTipSet(t, &block.Block{Height: 3, Ticket: block.Ticket{VRFProof: []byte{1}}}) - - msc1, msc2 := new(mockHelloCallback), new(mockHelloCallback) - hg1, hg2 := &mockHeaviestGetter{heavy1}, &mockHeaviestGetter{heavy2} - - discovery.NewHelloProtocolHandler(a, genesisA.Cid(), "").Register(msc1.HelloCallback, hg1.getHeaviestTipSet) - discovery.NewHelloProtocolHandler(b, genesisB.Cid(), "").Register(msc2.HelloCallback, hg2.getHeaviestTipSet) - - msc1.On("HelloCallback", mock.Anything, mock.Anything, mock.Anything).Return() - msc2.On("HelloCallback", mock.Anything, mock.Anything, mock.Anything).Return() - - require.NoError(t, mn.LinkAll()) - require.NoError(t, mn.ConnectAllButSelf()) - - time.Sleep(time.Millisecond * 50) - - msc1.AssertNumberOfCalls(t, "HelloCallback", 0) - msc2.AssertNumberOfCalls(t, "HelloCallback", 0) -} - -func TestHelloMultiBlock(t *testing.T) { - tf.UnitTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - mn, err := mocknet.WithNPeers(ctx, 2) - assert.NoError(t, err) - - a := mn.Hosts()[0] - b := mn.Hosts()[1] - - builder := chain.NewBuilder(t, address.Undef) - - genesisTipset := builder.NewGenesis() - assert.Equal(t, 1, genesisTipset.Len()) - - heavy1 := builder.AppendOn(genesisTipset, 3) - heavy1 = builder.AppendOn(heavy1, 3) - heavy2 := builder.AppendOn(heavy1, 3) - - msc1, msc2 := new(mockHelloCallback), new(mockHelloCallback) - hg1, hg2 := &mockHeaviestGetter{heavy1}, &mockHeaviestGetter{heavy2} - - discovery.NewHelloProtocolHandler(a, genesisTipset.At(0).Cid(), "").Register(msc1.HelloCallback, hg1.getHeaviestTipSet) - discovery.NewHelloProtocolHandler(b, genesisTipset.At(0).Cid(), "").Register(msc2.HelloCallback, hg2.getHeaviestTipSet) - - msc1.On("HelloCallback", b.ID(), heavy2.Key(), abi.ChainEpoch(3)).Return() - msc2.On("HelloCallback", a.ID(), heavy1.Key(), abi.ChainEpoch(2)).Return() - - assert.NoError(t, mn.LinkAll()) - assert.NoError(t, mn.ConnectAllButSelf()) - - time.Sleep(time.Millisecond * 50) - - msc1.AssertExpectations(t) - msc2.AssertExpectations(t) -} diff --git a/internal/pkg/discovery/noop_discovery.go b/internal/pkg/discovery/noop_discovery.go deleted file mode 100644 index a1daaf63fb..0000000000 --- a/internal/pkg/discovery/noop_discovery.go +++ /dev/null @@ -1,29 +0,0 @@ -package discovery - -import ( - "context" - "time" - - libp2pdisc "github.com/libp2p/go-libp2p-core/discovery" - pstore "github.com/libp2p/go-libp2p-peerstore" // nolint: staticcheck -) - -// NoopDiscovery satisfies the discovery interface without doing anything -type NoopDiscovery struct{} - -// FindPeers returns a dead channel that is always closed -func (sd *NoopDiscovery) FindPeers(ctx context.Context, ns string, opts ...libp2pdisc.Option) (<-chan pstore.PeerInfo, error) { // nolint: staticcheck - closedCh := make(chan pstore.PeerInfo) // nolint: staticcheck - // the output is immediately closed, discovery requests end immediately - // Callstack: - // https://github.com/libp2p/go-libp2p-pubsub/blob/55f4ad6eb98b9e617e46641e7078944781abb54c/discovery.go#L157 - // https://github.com/libp2p/go-libp2p-pubsub/blob/55f4ad6eb98b9e617e46641e7078944781abb54c/discovery.go#L287 - // https://github.com/libp2p/go-libp2p-discovery/blob/master/backoffconnector.go#L52 - close(closedCh) - return closedCh, nil -} - -// Advertise does nothing and returns 1 hour. -func (sd *NoopDiscovery) Advertise(ctx context.Context, ns string, opts ...libp2pdisc.Option) (time.Duration, error) { // nolint: staticcheck - return time.Hour, nil -} diff --git a/internal/pkg/discovery/peer_tracker.go b/internal/pkg/discovery/peer_tracker.go deleted file mode 100644 index 70067b860d..0000000000 --- a/internal/pkg/discovery/peer_tracker.go +++ /dev/null @@ -1,141 +0,0 @@ -package discovery - -import ( - "sort" - "sync" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" -) - -var logPeerTracker = logging.Logger("peer-tracker") - -// PeerTracker is used to record a subset of peers. Its methods are thread safe. -// It is designed to plug directly into libp2p disconnect notifications to -// automatically register dropped connections. -type PeerTracker struct { - // mu protects peers - mu sync.RWMutex - - // self tracks the ID of the peer tracker's owner - self peer.ID - - // peers maps peer.IDs to info about their chains - peers map[peer.ID]*block.ChainInfo - trusted map[peer.ID]struct{} -} - -// NewPeerTracker creates a peer tracker. -func NewPeerTracker(self peer.ID, trust ...peer.ID) *PeerTracker { - trustedSet := make(map[peer.ID]struct{}, len(trust)) - for _, t := range trust { - trustedSet[t] = struct{}{} - } - return &PeerTracker{ - peers: make(map[peer.ID]*block.ChainInfo), - trusted: trustedSet, - self: self, - } -} - -// SelectHead returns the chain info from trusted peers with the greatest height. -// An error is returned if no peers are in the tracker. -func (tracker *PeerTracker) SelectHead() (*block.ChainInfo, error) { - heads := tracker.listTrusted() - if len(heads) == 0 { - return nil, errors.New("no peers tracked") - } - sort.Slice(heads, func(i, j int) bool { return heads[i].Height > heads[j].Height }) - return heads[0], nil -} - -// Track adds information about a given peer.ID -func (tracker *PeerTracker) Track(ci *block.ChainInfo) { - tracker.mu.Lock() - defer tracker.mu.Unlock() - - _, tracking := tracker.peers[ci.Sender] - _, trusted := tracker.trusted[ci.Sender] - tracker.peers[ci.Sender] = ci - logPeerTracker.Infow("Track peer", "chainInfo", ci, "new", !tracking, "count", len(tracker.peers), "trusted", trusted) -} - -// Self returns the peer tracker's owner ID -func (tracker *PeerTracker) Self() peer.ID { - return tracker.self -} - -// List returns the chain info of the currently tracked peers (both trusted and untrusted). -// The info tracked by the tracker can change arbitrarily after this is called -- there is no -// guarantee that the peers returned will be tracked when they are used by the caller and no -// guarantee that the chain info is up to date. -func (tracker *PeerTracker) List() []*block.ChainInfo { - tracker.mu.Lock() - defer tracker.mu.Unlock() - - var tracked []*block.ChainInfo - for _, ci := range tracker.peers { - tracked = append(tracked, ci) - } - out := make([]*block.ChainInfo, len(tracked)) - copy(out, tracked) - return out -} - -// Remove removes a peer ID from the tracker. -func (tracker *PeerTracker) Remove(pid peer.ID) { - tracker.mu.Lock() - defer tracker.mu.Unlock() - - _, trusted := tracker.trusted[pid] - if _, tracking := tracker.peers[pid]; tracking { - delete(tracker.peers, pid) - if trusted { - logPeerTracker.Warnw("Dropping peer", "peer", pid.Pretty(), "trusted", trusted) - } else { - logPeerTracker.Infow("Dropping peer", "peer", pid.Pretty(), "trusted", trusted) - } - } -} - -// RegisterDisconnect registers a tracker remove operation as a libp2p -// "Disconnected" network event callback. -func (tracker *PeerTracker) RegisterDisconnect(ntwk network.Network) { - notifee := &network.NotifyBundle{} - notifee.DisconnectedF = func(network network.Network, conn network.Conn) { - pid := conn.RemotePeer() - tracker.Remove(pid) - } - ntwk.Notify(notifee) -} - -// trustedPeers returns a slice of peers trusted by the PeerTracker. trustedPeers remain constant after -// the PeerTracker has been initialized. -func (tracker *PeerTracker) trustedPeers() []peer.ID { - var peers []peer.ID - for p := range tracker.trusted { - peers = append(peers, p) - } - return peers -} - -// listTrusted returns the chain info of the trusted tracked peers. The info tracked by the tracker can -// change arbitrarily after this is called -- there is no guarantee that the peers returned will be -// tracked when they are used by the caller and no guarantee that the chain info is up to date. -func (tracker *PeerTracker) listTrusted() []*block.ChainInfo { - tracker.mu.Lock() - defer tracker.mu.Unlock() - - var tracked []*block.ChainInfo - for p, ci := range tracker.peers { - if _, trusted := tracker.trusted[p]; trusted { - tracked = append(tracked, ci) - } - } - out := make([]*block.ChainInfo, len(tracked)) - copy(out, tracked) - return out -} diff --git a/internal/pkg/discovery/peer_tracker_test.go b/internal/pkg/discovery/peer_tracker_test.go deleted file mode 100644 index 08c0f5ac77..0000000000 --- a/internal/pkg/discovery/peer_tracker_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package discovery_test - -import ( - "context" - "sort" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/discovery" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestPeerTrackerTracks(t *testing.T) { - tf.UnitTest(t) - - tracker := discovery.NewPeerTracker(peer.ID("")) - pid0 := th.RequireIntPeerID(t, 0) - pid1 := th.RequireIntPeerID(t, 1) - pid3 := th.RequireIntPeerID(t, 3) - pid7 := th.RequireIntPeerID(t, 7) - - ci0 := block.NewChainInfo(pid0, pid0, block.NewTipSetKey(types.CidFromString(t, "somecid")), 6) - ci1 := block.NewChainInfo(pid1, pid1, block.NewTipSetKey(), 0) - ci3 := block.NewChainInfo(pid3, pid3, block.NewTipSetKey(), 0) - ci7 := block.NewChainInfo(pid7, pid7, block.NewTipSetKey(), 0) - - tracker.Track(ci0) - tracker.Track(ci1) - tracker.Track(ci3) - tracker.Track(ci7) - - tracked := tracker.List() - sort.Sort(block.CISlice(tracked)) - expected := []*block.ChainInfo{ci0, ci1, ci3, ci7} - sort.Sort(block.CISlice(expected)) - assert.Equal(t, expected, tracked) - -} - -func TestPeerTrackerSelectHead(t *testing.T) { - tf.UnitTest(t) - - pid0 := th.RequireIntPeerID(t, 0) - pid1 := th.RequireIntPeerID(t, 1) - pid2 := th.RequireIntPeerID(t, 2) - pid3 := th.RequireIntPeerID(t, 3) - - ci0 := block.NewChainInfo(pid0, pid0, block.NewTipSetKey(types.CidFromString(t, "somecid0")), 6) - ci1 := block.NewChainInfo(pid1, pid1, block.NewTipSetKey(types.CidFromString(t, "somecid1")), 10) - ci2 := block.NewChainInfo(pid2, pid2, block.NewTipSetKey(types.CidFromString(t, "somecid2")), 7) - ci3 := block.NewChainInfo(pid3, pid3, block.NewTipSetKey(types.CidFromString(t, "somecid3")), 9) - - // trusting pid2 and pid3 - tracker := discovery.NewPeerTracker(pid2, pid3) - tracker.Track(ci0) - tracker.Track(ci1) - tracker.Track(ci2) - tracker.Track(ci3) - - // select the highest head - head, err := tracker.SelectHead() - assert.NoError(t, err) - assert.Equal(t, head.Head, ci3.Head) -} - -func TestPeerTrackerRemove(t *testing.T) { - tf.UnitTest(t) - - tracker := discovery.NewPeerTracker(peer.ID("")) - pid0 := th.RequireIntPeerID(t, 0) - pid1 := th.RequireIntPeerID(t, 1) - pid3 := th.RequireIntPeerID(t, 3) - pid7 := th.RequireIntPeerID(t, 7) - - ci0 := block.NewChainInfo(pid0, pid0, block.NewTipSetKey(types.CidFromString(t, "somecid")), 6) - ci1 := block.NewChainInfo(pid1, pid1, block.NewTipSetKey(), 0) - ci3 := block.NewChainInfo(pid3, pid3, block.NewTipSetKey(), 0) - ci7 := block.NewChainInfo(pid7, pid7, block.NewTipSetKey(), 0) - - tracker.Track(ci0) - tracker.Track(ci1) - tracker.Track(ci3) - tracker.Track(ci7) - - tracker.Remove(pid1) - tracker.Remove(pid3) - tracker.Remove(pid7) - - tracked := tracker.List() - expected := []*block.ChainInfo{ci0} - assert.Equal(t, expected, tracked) -} - -func TestPeerTrackerNetworkDisconnect(t *testing.T) { - tf.UnitTest(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - mn, err := mocknet.FullMeshConnected(ctx, 4) - require.NoError(t, err) - - self := mn.Hosts()[0] - a := mn.Hosts()[1] - b := mn.Hosts()[2] - c := mn.Hosts()[3] - - selfID := self.ID() - aID := a.ID() - bID := b.ID() - cID := c.ID() - - aCI := block.NewChainInfo(aID, aID, block.NewTipSetKey(), 0) - bCI := block.NewChainInfo(bID, bID, block.NewTipSetKey(), 0) - - // self is the tracking node - // self tracks peers a and b - // self does not track peer c - tracker := discovery.NewPeerTracker(peer.ID("")) - tracker.Track(aCI) - tracker.Track(bCI) - - // register tracker OnDisconnect callback in self's network - tracker.RegisterDisconnect(self.Network()) - - // disconnect from tracked a and untracked c - require.NoError(t, mn.DisconnectPeers(selfID, aID)) - require.NoError(t, mn.DisconnectPeers(selfID, cID)) - - tracked := tracker.List() - assert.Equal(t, []*block.ChainInfo{bCI}, tracked) -} diff --git a/internal/pkg/drand/drand.go b/internal/pkg/drand/drand.go deleted file mode 100644 index 8cb454f5c5..0000000000 --- a/internal/pkg/drand/drand.go +++ /dev/null @@ -1,26 +0,0 @@ -package drand - -import ( - "context" - "time" -) - -// IFace is the standard inferface for interacting with the drand network -type IFace interface { - ReadEntry(ctx context.Context, drandRound Round) (*Entry, error) - VerifyEntry(parent, child *Entry) (bool, error) - FetchGroupConfig(addresses []string, secure bool, overrideGroupAddrs bool) ([]string, [][]byte, uint64, int, error) - StartTimeOfRound(round Round) time.Time - RoundsInInterval(startTime, endTime time.Time) []Round - FirstFilecoinRound() Round -} - -// Round is a type for recording drand round indexes -type Round uint64 - -// A verifiable entry from a beacon chain, carrying round and randomness information. -type Entry struct { - _ struct{} `cbor:",toarray"` - Round Round - Data []byte -} diff --git a/internal/pkg/drand/drand_grpc.go b/internal/pkg/drand/drand_grpc.go deleted file mode 100644 index e7b766f27e..0000000000 --- a/internal/pkg/drand/drand_grpc.go +++ /dev/null @@ -1,263 +0,0 @@ -package drand - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/drand/drand/beacon" - "github.com/drand/drand/core" - "github.com/drand/drand/key" - "github.com/drand/drand/net" - "github.com/drand/kyber" - logging "github.com/ipfs/go-log/v2" -) - -var log = logging.Logger("drand") - -// Address points to a drand server -type Address struct { - address string - secure bool -} - -// NewAddress creates a new address -func NewAddress(a string, secure bool) Address { - return Address{a, secure} -} - -// GRPC is a drand client that can fetch and verify from a public drand network -type GRPC struct { - addresses []Address - client *core.Client - key *key.DistPublic - - // The time of the 0th round of the DRAND chain - genesisTime time.Time - // the time of genesis block of the Filecoin chain - filecoinGenesisTime time.Time - // The DRAND round first included in the filecoin blockchain - firstFilecoin Round - // Duration of a round in this DRAND network - roundTime time.Duration - - // internal state - latestEntry *Entry - cache map[Round]*Entry -} - -var _ IFace = &GRPC{} - -// NewGRPC creates a client that will draw randomness from the given addresses. -// distKeyCoeff are hex encoded strings representing a distributed public key -// Behavior is undefined if provided address do not point to Drand servers in the same group. -func NewGRPC(addresses []Address, distKeyCoeff [][]byte, drandGenTime time.Time, filecoinGenTime time.Time, rd time.Duration) (*GRPC, error) { - distKey, err := groupKeycoefficientsToDistPublic(distKeyCoeff) - if err != nil { - return nil, err - } - - grpc := &GRPC{ - addresses: addresses, - client: core.NewGrpcClient(), - key: distKey, - genesisTime: drandGenTime, - filecoinGenesisTime: filecoinGenTime, - // firstFilecoin set in updateFirsFilecoinRound below - roundTime: rd, - cache: make(map[Round]*Entry), - } - err = grpc.updateFirstFilecoinRound() - if err != nil { - return nil, err - } - return grpc, nil -} - -func (d *GRPC) updateFirstFilecoinRound() error { - // First filecoin round is the first drand round before filecoinGenesisTime - searchStart := d.filecoinGenesisTime.Add(-1 * d.roundTime) - results := d.RoundsInInterval(searchStart, d.filecoinGenesisTime) - if len(results) != 1 { - return fmt.Errorf("found %d drand rounds between filecoinGenTime and filecoinGenTime - drandRountDuration, expected 1", len(results)) - } - d.firstFilecoin = results[0] - return nil -} - -// ReadEntry fetches an entry from one of the drand servers (trying them sequentially) and returns the result. -func (d *GRPC) ReadEntry(ctx context.Context, drandRound Round) (*Entry, error) { - if entry, ok := d.cache[drandRound]; ok { - return entry, nil - } - - // try each address, stopping when we have a key - for _, addr := range d.addresses { - if ctx.Err() != nil { // Don't try any more peers after cancellation. - return nil, ctx.Err() - } - // The drand client doesn't accept a context, so is un-cancellable :-( - pub, err := d.client.Public(addr.address, d.key, addr.secure, int(drandRound)) - if err != nil { - log.Warnf("Error fetching drand randomness from %s: %s", addr.address, err) - continue - } - - // Because the client.Public() call can't be cancelled by this context, it can return at any time, - // potentially leading to concurrent state updates below racing a new call to into ReadEntry() (because - // the caller thought it was cancelled already). - // This check will mostly, but not completely securely, avoid this. A robust fix requires avoiding - // concurrent calls to here completely, which ultimately arise from the goroutine in the mining scheduler. - // https://github.com/filecoin-project/go-filecoin/issues/4065 - if ctx.Err() != nil { - return nil, ctx.Err() - } - - entry := &Entry{ - Round: drandRound, - Data: pub.GetSignature(), - } - d.updateLocalState(entry) - return entry, nil - } - return nil, errors.New("could not retrieve drand randomess from any address") -} - -func (d *GRPC) updateLocalState(entry *Entry) { - if d.latestEntry == nil { - d.latestEntry = entry - } - if entry.Round > d.latestEntry.Round { - d.latestEntry = entry - } - d.cache[entry.Round] = entry -} - -// VerifyEntry verifies that the child's signature is a valid signature of the previous entry. -func (d *GRPC) VerifyEntry(parent, child *Entry) (bool, error) { - if len(d.key.Coefficients) == 0 { - return false, fmt.Errorf("no dist key configured") - } - msg := beacon.Message(uint64(child.Round), parent.Data) - err := key.Scheme.VerifyRecovered(d.key.Coefficients[0], msg, child.Data) - if err != nil { - return false, err - } - - return true, nil -} - -// FetchGroupConfig Should only be used when switching to a new drand server group. -// Returns hex encoded group key coefficients that can be used to construct a public key. -// If overrideGroupAddrs is true, the given set of addresses will be set as the drand nodes. -// Otherwise drand address config will be set from the retrieved group info. The -// override is useful when the the drand server is behind NAT. -func (d *GRPC) FetchGroupConfig(addresses []string, secure bool, overrideGroupAddrs bool) ([]string, [][]byte, uint64, int, error) { - defaultManager := net.NewCertManager() - client := core.NewGrpcClientFromCert(defaultManager) - - // try each address, stopping when we have a key - for _, addr := range addresses { - groupAddrs, keyCoeffs, genesisTime, roundSeconds, err := fetchGroupServer(client, Address{addr, secure}) - if err != nil { - log.Warnf("Error fetching drand group key from %s: %s", addr, err) - continue - } - d.genesisTime = time.Unix(int64(genesisTime), 0) - d.roundTime = time.Duration(roundSeconds) * time.Second - - distKey, err := groupKeycoefficientsToDistPublic(keyCoeffs) - if err != nil { - return nil, nil, 0, 0, err - } - d.key = distKey - - if overrideGroupAddrs { - d.addresses = drandAddresses(addresses, secure) - } else { - d.addresses = drandAddresses(groupAddrs, secure) - } - - err = d.updateFirstFilecoinRound() // this depends on genesis and round time so recalculate - if err != nil { - return nil, nil, 0, 0, err - } - - return groupAddrs, keyCoeffs, genesisTime, roundSeconds, nil - } - return nil, nil, 0, 0, errors.New("Could not retrieve drand group key from any address") -} - -func drandAddresses(addresses []string, secure bool) []Address { - addrs := make([]Address, len(addresses)) - for i, a := range addresses { - addrs[i] = NewAddress(a, secure) - } - return addrs -} - -func fetchGroupServer(client *core.Client, address Address) ([]string, [][]byte, uint64, int, error) { - groupResp, err := client.Group(address.address, address.secure) - if err != nil { - return nil, nil, 0, 0, err - } - - nodes := groupResp.GetNodes() - addrs := make([]string, len(nodes)) - for i, nd := range nodes { - addrs[i] = nd.GetAddress() - } - - return addrs, groupResp.DistKey, groupResp.GenesisTime, int(groupResp.Period), nil -} - -func groupKeycoefficientsToDistPublic(coefficients [][]byte) (*key.DistPublic, error) { - pubKey := key.DistPublic{} - pubKey.Coefficients = make([]kyber.Point, len(coefficients)) - for i, k := range coefficients { - pubKey.Coefficients[i] = key.KeyGroup.Point() - err := pubKey.Coefficients[i].UnmarshalBinary(k) - if err != nil { - return nil, err - } - } - return &pubKey, nil -} - -// FirstFilecoinRound returns the configured first drand round included in the filecoin blockchain -func (d *GRPC) FirstFilecoinRound() Round { - return d.firstFilecoin -} - -// StartTimeOfRound returns the time the given DRAND round will start if it is unskipped -func (d *GRPC) StartTimeOfRound(round Round) time.Time { - return d.genesisTime.Add(d.roundTime * time.Duration(round)) -} - -// RoundsInInterval returns all rounds in the given interval. - -func (d *GRPC) RoundsInInterval(startTime, endTime time.Time) []Round { - return roundsInInterval(startTime, endTime, d.StartTimeOfRound, d.roundTime) -} - -func roundsInInterval(startTime, endTime time.Time, startTimeOfRound func(Round) time.Time, roundDuration time.Duration) []Round { - // Find first round after startTime - genesisTime := startTimeOfRound(Round(0)) - truncatedStartRound := Round(startTime.Sub(genesisTime) / roundDuration) - var round Round - if startTimeOfRound(truncatedStartRound).Equal(startTime) { - round = truncatedStartRound - } else { - round = truncatedStartRound + 1 - } - roundTime := startTimeOfRound(round) - var rounds []Round - // Advance a round time until we hit endTime, adding rounds - for roundTime.Before(endTime) { - rounds = append(rounds, round) - round++ - roundTime = startTimeOfRound(round) - } - return rounds -} diff --git a/internal/pkg/drand/testing.go b/internal/pkg/drand/testing.go deleted file mode 100644 index 404ee2d413..0000000000 --- a/internal/pkg/drand/testing.go +++ /dev/null @@ -1,65 +0,0 @@ -package drand - -import ( - "context" - "encoding/binary" - "time" - - ffi "github.com/filecoin-project/filecoin-ffi" -) - -const testDRANDRoundDuration = 25 * time.Second - -// Fake is a fake drand utility that reads and validates entries as specified below -type Fake struct { - // Time of round 0 - GenesisTime time.Time - FirstFilecoin Round -} - -var _ IFace = &Fake{} - -// NewFake sets up a fake drand that starts exactly one testDRANDRoundDuration before -// the provided filecoin genesis time. -func NewFake(filecoinGenTime time.Time) *Fake { - drandGenTime := filecoinGenTime.Add(-1 * testDRANDRoundDuration) - return &Fake{ - GenesisTime: drandGenTime, - FirstFilecoin: Round(0), - } -} - -// ReadEntry immediately returns a drand entry with a signature equal to the -// round number -func (d *Fake) ReadEntry(_ context.Context, drandRound Round) (*Entry, error) { - fakeSigData := make([]byte, ffi.SignatureBytes) - binary.PutUvarint(fakeSigData, uint64(drandRound)) - return &Entry{ - Round: drandRound, - Data: fakeSigData, - }, nil -} - -// VerifyEntry always returns true without error -func (d *Fake) VerifyEntry(parent, child *Entry) (bool, error) { - return true, nil -} - -func (d *Fake) StartTimeOfRound(round Round) time.Time { - return d.GenesisTime.Add(testDRANDRoundDuration * time.Duration(round)) -} - -// RoundsInInterval returns the DRAND round numbers within [startTime, endTime) -// startTime inclusive, endTime exclusive. -func (d *Fake) RoundsInInterval(startTime, endTime time.Time) []Round { - return roundsInInterval(startTime, endTime, d.StartTimeOfRound, testDRANDRoundDuration) -} - -func (d *Fake) FirstFilecoinRound() Round { - return d.FirstFilecoin -} - -// FetchGroupConfig returns empty group addresses and key coefficients -func (d *Fake) FetchGroupConfig(_ []string, _, _ bool) ([]string, [][]byte, uint64, int, error) { - return []string{}, [][]byte{}, 0, 0, nil -} diff --git a/internal/pkg/enccid/enc_cid.go b/internal/pkg/enccid/enc_cid.go deleted file mode 100644 index f51f844b34..0000000000 --- a/internal/pkg/enccid/enc_cid.go +++ /dev/null @@ -1,128 +0,0 @@ -package enccid - -import ( - "encoding/json" - "fmt" - - cbor "github.com/fxamacker/cbor/v2" - cid "github.com/ipfs/go-cid" - ipldcbor "github.com/ipfs/go-ipld-cbor" -) - -// Cid is a cid wrapper that implements UnmarshalCBOR and MarshalCBOR. -// From ipld-cbor's perspective it is technically a pointer to a cid, because -// it maps `cbor null-val` <==> `cid.Undef` -type Cid struct { - cid.Cid -} - -// Undef wraps cid.Undef -var Undef = NewCid(cid.Undef) - -// NewCid creates an Cid struct from a cid -func NewCid(c cid.Cid) Cid { - return Cid{c} -} - -// MarshalCBOR converts the wrapped cid to bytes -func (w Cid) MarshalCBOR() ([]byte, error) { - // handle undef cid by writing null - // TODO: remove this handling after removing paths that attempt to encode an Undef CID - // This should never appear on chain, and the only usages are tests. - // https://github.com/filecoin-project/go-filecoin/issues/3931 - if w.Equals(cid.Undef) { - return []byte{0xf6}, nil - } - - // tag = 42 - tag0 := byte(0xd8) - tag1 := byte(0x2a) - - raw, err := castCidToBytes(w.Cid) - if err != nil { - return nil, err - } - // because we need to do the cbor tag outside the byte string we are forced - // to write the cbor type-len value for a byte string of raw's length - cborLen, err := cbor.Marshal(len(raw)) - if err != nil { - return nil, err - } - cborLen[0] |= 0x40 // flip major type 0 to major type 2 - prefixLen := len(cborLen) + 2 - - result := make([]byte, len(cborLen)+len(raw)+2, len(cborLen)+len(raw)+2) - result[0] = tag0 - result[1] = tag1 - copy(result[2:prefixLen], cborLen) - copy(result[prefixLen:], raw) - - return result, nil -} - -// UnmarshalCBOR fills the wrapped cid according to the cbor encoded bytes -func (w *Cid) UnmarshalCBOR(cborBs []byte) error { - if len(cborBs) == 0 { - return fmt.Errorf("nil bytes does not decode to cid") - } - // check undef cid - if len(cborBs) == 1 { - if cborBs[0] != 0xf6 { - return fmt.Errorf("invalid cbor bytes: %x for cid", cborBs[0]) - } - // this is a pointer to an undefined cid - w.Cid = cid.Undef - return nil - } - - // check tag: - if cborBs[0] != 0xd8 || cborBs[1] != 0x2a { - return fmt.Errorf("ipld cbor tags cids with tag 42 not %x", cborBs[:2]) - } - cborBs = cborBs[2:] - // strip len: - var cidBs []byte - err := cbor.Unmarshal(cborBs, &cidBs) - if err != nil { - return err - } - - w.Cid, err = castBytesToCid(cidBs) - return err -} - -// UnmarshalJSON defers to cid json unmarshalling -func (w *Cid) UnmarshalJSON(jsonBs []byte) error { - return json.Unmarshal(jsonBs, &w.Cid) -} - -// MarshalJSON defers to cid json marshalling -func (w Cid) MarshalJSON() ([]byte, error) { - return json.Marshal(w.Cid) -} - -// This is lifted from go-ipld-cbor but should probably be exported from there. -func castBytesToCid(x []byte) (cid.Cid, error) { - if len(x) == 0 { - return cid.Cid{}, ipldcbor.ErrEmptyLink - } - - if x[0] != 0 { - return cid.Cid{}, ipldcbor.ErrInvalidMultibase - } - - c, err := cid.Cast(x[1:]) - if err != nil { - return cid.Cid{}, ipldcbor.ErrInvalidLink - } - - return c, nil -} - -// This is lifted from go-ipld-cbor but should probably be exported from there. -func castCidToBytes(link cid.Cid) ([]byte, error) { - if !link.Defined() { - return nil, ipldcbor.ErrEmptyLink - } - return append([]byte{0}, link.Bytes()...), nil -} diff --git a/internal/pkg/enccid/enccid_test.go b/internal/pkg/enccid/enccid_test.go deleted file mode 100644 index 07935b3fec..0000000000 --- a/internal/pkg/enccid/enccid_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package enccid_test - -import ( - "testing" - - cbor "github.com/fxamacker/cbor/v2" - cid "github.com/ipfs/go-cid" - ipldcbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - . "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestCborRoundTrip(t *testing.T) { - tf.UnitTest(t) - - c, err := constants.DefaultCidBuilder.Sum([]byte("epigram")) - require.NoError(t, err) - w := NewCid(c) - cbytes, err := cbor.Marshal(w) - require.NoError(t, err) - - olcbytes, err := ipldcbor.DumpObject(c) - require.NoError(t, err) - assert.Equal(t, olcbytes, cbytes) - var rtOlC cid.Cid - err = ipldcbor.DecodeInto(olcbytes, &rtOlC) - require.NoError(t, err) - - var newC Cid - err = cbor.Unmarshal(cbytes, &newC) - require.NoError(t, err) - assert.Equal(t, w, newC) -} - -func TestEmptyCid(t *testing.T) { - tf.UnitTest(t) - - nullCid := NewCid(cid.Undef) - cbytes, err := cbor.Marshal(nullCid) - require.NoError(t, err) - - var retUndefCid Cid - err = cbor.Unmarshal(cbytes, &retUndefCid) - require.NoError(t, err) - assert.True(t, retUndefCid.Equals(cid.Undef)) -} - -func TestJSONRoundTrip(t *testing.T) { - tf.UnitTest(t) - - c, err := constants.DefaultCidBuilder.Sum([]byte("epigram")) - require.NoError(t, err) - w := NewCid(c) - - jBs, err := w.MarshalJSON() - require.NoError(t, err) - - var rt Cid - err = rt.UnmarshalJSON(jBs) - require.NoError(t, err) - assert.True(t, rt.Equals(w.Cid)) -} diff --git a/internal/pkg/encoding/encoding.go b/internal/pkg/encoding/encoding.go deleted file mode 100644 index b2fc98cb0b..0000000000 --- a/internal/pkg/encoding/encoding.go +++ /dev/null @@ -1,238 +0,0 @@ -package encoding - -import ( - "fmt" - "io" - "reflect" -) - -// Encodable represents types that can be encoded using this library. -type Encodable interface { - Encode(encoder Encoder) error -} - -// Decodable represents types that can be decoded using this library. -type Decodable interface { - Decode(decoder Decoder) error -} - -// Encoder represents types that can encode values. -type Encoder interface { - // EncodeUint encodes a uint. - EncodeUint(obj uint) error - // EncodeUint8 encodes a uint8. - EncodeUint8(obj uint8) error - // EncodeUint16 encodes a uint16. - EncodeUint16(obj uint16) error - // EncodeUint32 encodes a uint32. - EncodeUint32(obj uint32) error - // EncodeUint64 encodes a uint64. - EncodeUint64(obj uint64) error - // EncodeInt encodes a int8. - EncodeInt(obj int) error - // EncodeInt8 encodes a int8. - EncodeInt8(obj int8) error - // EncodeInt16 encodes a int16. - EncodeInt16(obj int16) error - // EncodeInt32 encodes a int32. - EncodeInt32(obj int32) error - // EncodeInt64 encodes a int64. - EncodeInt64(obj int64) error - // EncodeBool encodes a bool. - EncodeBool(obj bool) error - // EncodeString encodes a string. - EncodeString(obj string) error - // EncodeArray encodes an array. - EncodeArray(obj interface{}) error - // EncodeMap encodes a map. - EncodeMap(obj interface{}) error - // EncodeStruct encodes a struct. - EncodeStruct(obj interface{}) error - // Bytes returns the encoded bytes. - Bytes() []byte -} - -// Decoder represents types that can decode values. -type Decoder interface { - // DecodeValue decodes a primitive value. - DecodeValue(obj interface{}) error - // DecodeArray decodes an array. - DecodeArray(obj interface{}) error - // DecodeMap decodes a map. - DecodeMap(obj interface{}) error - // DecodeStruct decodes a struct. - DecodeStruct(obj interface{}) error -} - -type defaultEncoder = FxamackerCborEncoder -type defaultDecoder = FxamackerCborDecoder - -var defaultNewStreamDecoder = FxamackerNewStreamDecoder - -// NewStreamDecoder is a function initializing a new stream decoder -type NewStreamDecoder func(io.Reader) StreamDecoder - -// StreamDecoder wraps a stream of bytes and decodes them into an object -type StreamDecoder interface { - Decode(v interface{}) error -} - -// Encode encodes an object, returning a byte array. -func Encode(obj interface{}) ([]byte, error) { - var encoder Encoder = &defaultEncoder{} - return encode(obj, reflect.ValueOf(obj), encoder) -} - -// EncodeWith encodes an object using the encoder provided returning a byte array. -func EncodeWith(obj interface{}, encoder Encoder) ([]byte, error) { - return encode(obj, reflect.ValueOf(obj), encoder) -} - -func encode(obj interface{}, v reflect.Value, encoder Encoder) ([]byte, error) { - var err error - - // if `Encodable`, we are done - if encodable, ok := obj.(Encodable); ok { - if err = encodable.Encode(encoder); err != nil { - return nil, err - } - return encoder.Bytes(), nil - } - - // Note: this -> (v.Convert(reflect.TypeOf(uint64(0))).Interface().(uint64)) - // is because doing `obj.(uint64)` blows up on `type foo uint64` - - switch v.Kind() { - case reflect.Uint: - err = encoder.EncodeUint(v.Convert(reflect.TypeOf(uint(0))).Interface().(uint)) - case reflect.Uint8: - err = encoder.EncodeUint8(v.Convert(reflect.TypeOf(uint8(0))).Interface().(uint8)) - case reflect.Uint16: - err = encoder.EncodeUint16(v.Convert(reflect.TypeOf(uint16(0))).Interface().(uint16)) - case reflect.Uint32: - err = encoder.EncodeUint32(v.Convert(reflect.TypeOf(uint32(0))).Interface().(uint32)) - case reflect.Uint64: - err = encoder.EncodeUint64(v.Convert(reflect.TypeOf(uint64(0))).Interface().(uint64)) - case reflect.Int: - err = encoder.EncodeInt(v.Convert(reflect.TypeOf(int(0))).Interface().(int)) - case reflect.Int8: - err = encoder.EncodeInt8(v.Convert(reflect.TypeOf(int8(0))).Interface().(int8)) - case reflect.Int16: - err = encoder.EncodeInt16(v.Convert(reflect.TypeOf(int16(0))).Interface().(int16)) - case reflect.Int32: - err = encoder.EncodeInt32(v.Convert(reflect.TypeOf(int32(0))).Interface().(int32)) - case reflect.Int64: - err = encoder.EncodeInt64(v.Convert(reflect.TypeOf(int64(0))).Interface().(int64)) - case reflect.Bool: - err = encoder.EncodeBool(v.Convert(reflect.TypeOf(false)).Interface().(bool)) - case reflect.String: - err = encoder.EncodeString(v.Convert(reflect.TypeOf("")).Interface().(string)) - case reflect.Slice: - err = encoder.EncodeArray(obj) - case reflect.Array: - err = encoder.EncodeArray(obj) - case reflect.Map: - err = encoder.EncodeMap(obj) - case reflect.Struct: - err = encoder.EncodeStruct(obj) - case reflect.Ptr: - if v.IsNil() { - t := v.Type() - nv := reflect.New(t.Elem()) - return encode(obj, nv, encoder) - } - // navigate the pointer and check the underlying type - return encode(obj, reflect.Indirect(v), encoder) - case reflect.Interface: - // navigate the interface and check the underlying type - return encode(obj, v.Elem(), encoder) - default: - return nil, fmt.Errorf("unsupported type for encoding: %T", obj) - } - - if err != nil { - return nil, err - } - - return encoder.Bytes(), nil -} - -// DecodeWith decodes a decodable type, and populates a pointer to the type. -func DecodeWith(obj interface{}, decoder Decoder) error { - return decode(obj, reflect.ValueOf(obj), decoder) -} - -// Decode decodes a decodable type, and populates a pointer to the type. -func Decode(raw []byte, obj interface{}) error { - var decoder Decoder = &defaultDecoder{ - raw: raw, - } - - return decode(obj, reflect.ValueOf(obj), decoder) -} - -func decode(obj interface{}, v reflect.Value, decoder Decoder) error { - var err error - - // if `Decodable`, we are done - if decodable, ok := obj.(Decodable); ok { - if err = decodable.Decode(decoder); err != nil { - return err - } - return nil - } - k := v.Kind() - switch k { - case reflect.Uint: - return decoder.DecodeValue(obj) - case reflect.Uint8: - return decoder.DecodeValue(obj) - case reflect.Uint16: - return decoder.DecodeValue(obj) - case reflect.Uint32: - return decoder.DecodeValue(obj) - case reflect.Uint64: - return decoder.DecodeValue(obj) - // case uint128: TODO: Big uint? - case reflect.Int: - return decoder.DecodeValue(obj) - case reflect.Int8: - return decoder.DecodeValue(obj) - case reflect.Int16: - return decoder.DecodeValue(obj) - case reflect.Int32: - return decoder.DecodeValue(obj) - case reflect.Int64: - // case int128: TODO: Big int? - return decoder.DecodeValue(obj) - case reflect.Bool: - return decoder.DecodeValue(obj) - case reflect.String: - return decoder.DecodeValue(obj) - case reflect.Slice: - return decoder.DecodeArray(obj) - case reflect.Array: - return decoder.DecodeArray(obj) - case reflect.Map: - return decoder.DecodeMap(obj) - case reflect.Struct: - return decoder.DecodeStruct(obj) - case reflect.Ptr: - if v.IsNil() { - t := v.Type() - nv := reflect.New(t.Elem()) - return decode(obj, nv, decoder) - } - return decode(obj, reflect.Indirect(v), decoder) - case reflect.Interface: - return decode(obj, v.Elem(), decoder) - default: - return fmt.Errorf("unsupported type for decoding: %T, kind: %v", obj, k) - } -} - -// StreamDecode decodes a decodable type from a reader. -func StreamDecode(r io.Reader, obj interface{}) error { - streamDecoder := defaultNewStreamDecoder(r) - return streamDecoder.Decode(obj) -} diff --git a/internal/pkg/encoding/encoding_test.go b/internal/pkg/encoding/encoding_test.go deleted file mode 100644 index d5ac1b5f32..0000000000 --- a/internal/pkg/encoding/encoding_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package encoding - -import ( - "testing" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "gotest.tools/assert" -) - -func doTestForEncoding(t *testing.T, original interface{}, expectedcalls interface{}) { - tf.UnitTest(t) - - encoder := newTestEncoder() - - out, err := EncodeWith(original, &encoder) - assert.NilError(t, err) - assert.DeepEqual(t, out, []byte{1, 2, 3}) - - assert.DeepEqual(t, encoder.calls, expectedcalls) - assert.DeepEqual(t, encoder.lastobj, original) -} - -func TestEncodingForEncodeUint8(t *testing.T) { - doTestForEncoding(t, uint8(21), []string{"EncodeUint8"}) -} - -func TestEncodingForEncodeUint16(t *testing.T) { - doTestForEncoding(t, uint16(21), []string{"EncodeUint16"}) -} - -func TestEncodingForEncodeUint32(t *testing.T) { - doTestForEncoding(t, uint32(21), []string{"EncodeUint32"}) -} - -func TestEncodingForEncodeUint64(t *testing.T) { - doTestForEncoding(t, uint64(21), []string{"EncodeUint64"}) -} - -func TestEncodingForEncodeInt8(t *testing.T) { - doTestForEncoding(t, int8(21), []string{"EncodeInt8"}) -} - -func TestEncodingForEncodeInt16(t *testing.T) { - doTestForEncoding(t, int16(21), []string{"EncodeInt16"}) -} - -func TestEncodingForEncodeInt32(t *testing.T) { - doTestForEncoding(t, int32(21), []string{"EncodeInt32"}) -} - -func TestEncodingForEncodeInt64(t *testing.T) { - doTestForEncoding(t, int64(21), []string{"EncodeInt64"}) - -} - -func TestEncodingForEncodeBool(t *testing.T) { - doTestForEncoding(t, false, []string{"EncodeBool"}) -} - -func TestEncodingForEncodeString(t *testing.T) { - doTestForEncoding(t, "hello", []string{"EncodeString"}) -} - -func TestEncodingForEncodeArray(t *testing.T) { - doTestForEncoding(t, []uint64{6, 2, 8}, []string{"EncodeArray"}) -} - -func TestEncodingForEncodeMap(t *testing.T) { - doTestForEncoding(t, map[string]uint64{"x": 6, "y": 8}, []string{"EncodeMap"}) -} - -func TestEncodingForEncodeStruct(t *testing.T) { - doTestForEncoding(t, struct { - X uint32 - Y byte - }{X: 6, Y: 8}, []string{"EncodeStruct"}) -} - -func TestEncodingForPointer(t *testing.T) { - obj := struct { - X uint32 - Y byte - }{X: 6, Y: 8} - doTestForEncoding(t, &obj, []string{"EncodeStruct"}) -} - -func TestEncodingForEncodable(t *testing.T) { - tf.UnitTest(t) - - encoder := newTestEncoder() - original := customPoint{X: 6, Y: 8} - - out, err := EncodeWith(original, &encoder) - assert.NilError(t, err) - assert.DeepEqual(t, out, []byte{1, 2, 3}) - - assert.DeepEqual(t, encoder.calls, []string{"EncodeArray"}) - assert.DeepEqual(t, encoder.lastobj, []uint64{6, 8}) -} - -func TestEncodingNil(t *testing.T) { - var obj *defaultPoint - doTestForEncoding(t, obj, []string{"EncodeStruct"}) -} - -func doTestForDecoding(t *testing.T, obj interface{}, expected interface{}, expectedcalls interface{}) { - tf.UnitTest(t) - - decoder := newTestDecoder() - // inject decoded value - decoder.decoded = expected - - err := DecodeWith(obj, &decoder) - assert.NilError(t, err) - - assert.DeepEqual(t, decoder.calls, expectedcalls) -} - -func TestDecodingForDecodeArray(t *testing.T) { - obj := []uint64{} - expected := []uint64{54, 2, 2} - doTestForDecoding(t, &obj, expected, []string{"DecodeArray"}) - assert.DeepEqual(t, obj, expected) -} - -func TestDecodingForMap(t *testing.T) { - obj := map[string]uint64{} - expected := map[string]uint64{"x": 6, "y": 8} - doTestForDecoding(t, &obj, expected, []string{"DecodeMap"}) - assert.DeepEqual(t, obj, expected) -} - -func TestDecodingForStruct(t *testing.T) { - obj := defaultPoint{} - expected := defaultPoint{X: 8, Y: 4} - doTestForDecoding(t, &obj, expected, []string{"DecodeStruct"}) - assert.DeepEqual(t, obj, expected) -} - -func TestDecodingForDecodable(t *testing.T) { - obj := customPoint{} - expected := customPoint{X: 8, Y: 4} - - tf.UnitTest(t) - - decoder := newTestDecoder() - // inject decoded value - decoder.decoded = []uint64{8, 4} - - err := DecodeWith(&obj, &decoder) - assert.NilError(t, err) - - assert.DeepEqual(t, decoder.calls, []string{"DecodeArray"}) - assert.DeepEqual(t, obj, expected) -} - -func TestDecodingForDoublePointerStruct(t *testing.T) { - obj := &defaultPoint{} - expected := defaultPoint{X: 8, Y: 4} - doTestForDecoding(t, &obj, expected, []string{"DecodeStruct"}) - assert.DeepEqual(t, obj, &expected) -} - -func TestDecodingForDoublePointerMap(t *testing.T) { - obj := &map[string]uint64{} - expected := map[string]uint64{"x": 6, "y": 8} - doTestForDecoding(t, &obj, expected, []string{"DecodeMap"}) - assert.DeepEqual(t, obj, &expected) -} - -type testMode int - -const testConstForMode = testMode(iota) - -func TestDecodingForConstValue(t *testing.T) { - obj := testConstForMode - expected := testConstForMode - doTestForDecoding(t, &obj, expected, []string{"DecodeValue"}) - assert.DeepEqual(t, obj, expected) -} - -func TestDecodingOnNil(t *testing.T) { - // Note: this is needed here for reasons. - // (answers might be found in CI land) - tf.UnitTest(t) - defer mustPanic(t) - - var obj *map[string]uint64 - expected := map[string]uint64{} - doTestForDecoding(t, &obj, expected, []string{"DecodeValue"}) - assert.DeepEqual(t, obj, expected) -} - -func mustPanic(t *testing.T) { - if r := recover(); r == nil { - t.Fail() - } -} diff --git a/internal/pkg/encoding/fxamacker_cbor.go b/internal/pkg/encoding/fxamacker_cbor.go deleted file mode 100644 index 8d6f04ac2f..0000000000 --- a/internal/pkg/encoding/fxamacker_cbor.go +++ /dev/null @@ -1,184 +0,0 @@ -package encoding - -import ( - "bytes" - "io" - - cbor "github.com/fxamacker/cbor/v2" -) - -// FxamackerCborEncoder is an object encoder that encodes objects based on the CBOR standard. -type FxamackerCborEncoder struct { - b bytes.Buffer -} - -// FxamackerCborDecoder is an object decoder that decodes objects based on the CBOR standard. -type FxamackerCborDecoder struct { - raw []byte -} - -// NewFxamackerCborEncoder creates a new `FxamackerCborEncoder`. -func NewFxamackerCborEncoder() FxamackerCborEncoder { - return FxamackerCborEncoder{} -} - -// NewFxamackerCborDecoder creates a new `FxamackerCborDecoder`. -func NewFxamackerCborDecoder(b []byte) FxamackerCborDecoder { - return FxamackerCborDecoder{ - raw: b, - } -} - -// FxamackerNewStreamDecoder initializes a new fxamacker cbor stream decoder -var FxamackerNewStreamDecoder = cbor.NewDecoder - -// -// FxamackerCborEncoder -// - -// EncodeUint encodes a uint. -func (encoder *FxamackerCborEncoder) EncodeUint(obj uint) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint8 encodes a uint8. -func (encoder *FxamackerCborEncoder) EncodeUint8(obj uint8) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint16 encodes a uint16. -func (encoder *FxamackerCborEncoder) EncodeUint16(obj uint16) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint32 encodes a uint32. -func (encoder *FxamackerCborEncoder) EncodeUint32(obj uint32) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint64 encodes a uint64. -func (encoder *FxamackerCborEncoder) EncodeUint64(obj uint64) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt encodes a int. -func (encoder *FxamackerCborEncoder) EncodeInt(obj int) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt8 encodes a int8. -func (encoder *FxamackerCborEncoder) EncodeInt8(obj int8) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt16 encodes a int16. -func (encoder *FxamackerCborEncoder) EncodeInt16(obj int16) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt32 encodes a int32. -func (encoder *FxamackerCborEncoder) EncodeInt32(obj int32) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt64 encodes a int64. -func (encoder *FxamackerCborEncoder) EncodeInt64(obj int64) error { - return encoder.encodeCbor(obj) -} - -// EncodeBool encodes a bool. -func (encoder *FxamackerCborEncoder) EncodeBool(obj bool) error { - return encoder.encodeCbor(obj) -} - -// EncodeString encodes a string. -func (encoder *FxamackerCborEncoder) EncodeString(obj string) error { - return encoder.encodeCbor(obj) -} - -// EncodeArray encodes an array. -func (encoder *FxamackerCborEncoder) EncodeArray(obj interface{}) error { - return encoder.encodeCbor(obj) -} - -// EncodeMap encodes a map. -func (encoder *FxamackerCborEncoder) EncodeMap(obj interface{}) error { - return encoder.encodeCbor(obj) -} - -// EncodeStruct encodes a struct. -func (encoder *FxamackerCborEncoder) EncodeStruct(obj interface{}) error { - return encoder.encodeCbor(obj) -} - -// Bytes returns the encoded bytes. -func (encoder FxamackerCborEncoder) Bytes() []byte { - return encoder.b.Bytes() -} - -func (encoder *FxamackerCborEncoder) encodeCbor(obj interface{}) error { - // check for object implementing cborMarshallerStreamed - if m, ok := obj.(cborMarshalerStreamed); ok { - return m.MarshalCBOR(&encoder.b) - } - - // get cbor encoded bytes - raw, err := cbor.Marshal(obj) - if err != nil { - return err - } - - // write to buffer - encoder.b.Write(raw) - - return nil -} - -// -// FxamackerCborDecoder -// - -// DecodeValue decodes an primitive value. -func (decoder *FxamackerCborDecoder) DecodeValue(obj interface{}) error { - return decoder.decodeCbor(obj) -} - -// DecodeArray decodes an array. -func (decoder *FxamackerCborDecoder) DecodeArray(obj interface{}) error { - return decoder.decodeCbor(obj) -} - -// DecodeMap encodes a map. -func (decoder *FxamackerCborDecoder) DecodeMap(obj interface{}) error { - return decoder.decodeCbor(obj) -} - -// DecodeStruct decodes a struct. -func (decoder *FxamackerCborDecoder) DecodeStruct(obj interface{}) error { - - return decoder.decodeCbor(obj) -} - -func (decoder *FxamackerCborDecoder) decodeCbor(obj interface{}) error { - // check for object implementing cborUnmarshallerStreamed - if u, ok := obj.(cborUnmarshalerStreamed); ok { - return u.UnmarshalCBOR(bytes.NewBuffer(decoder.raw)) - } - - // decode the bytes into a cbor object - if err := cbor.Unmarshal(decoder.raw, obj); err != nil { - return err - } - // reset the bytes, nothing left with CBOR - decoder.raw = nil - - return nil -} - -type cborUnmarshalerStreamed interface { - UnmarshalCBOR(io.Reader) error -} - -type cborMarshalerStreamed interface { - MarshalCBOR(io.Writer) error -} diff --git a/internal/pkg/encoding/ipld_cbor.go b/internal/pkg/encoding/ipld_cbor.go deleted file mode 100644 index a1829f7295..0000000000 --- a/internal/pkg/encoding/ipld_cbor.go +++ /dev/null @@ -1,166 +0,0 @@ -package encoding - -import ( - "bytes" - - cbor "github.com/ipfs/go-ipld-cbor" -) - -// IpldCborEncoder is an object encoder that encodes objects based on the CBOR standard. -type IpldCborEncoder struct { - b bytes.Buffer -} - -// IpldCborDecoder is an object decoder that decodes objects based on the CBOR standard. -type IpldCborDecoder struct { - raw []byte -} - -// NewIpldCborEncoder creates a new `IpldCborEncoder`. -func NewIpldCborEncoder() IpldCborEncoder { - return IpldCborEncoder{} -} - -// NewIpldCborDecoder creates a new `IpldCborDecoder`. -func NewIpldCborDecoder(b []byte) IpldCborDecoder { - return IpldCborDecoder{ - raw: b, - } -} - -// RegisterIpldCborType registers a type for Cbor encoding/decoding. -func RegisterIpldCborType(i interface{}) { - cbor.RegisterCborType(i) -} - -// -// IpldCborEncoder -// - -// EncodeUint encodes a uint. -func (encoder *IpldCborEncoder) EncodeUint(obj uint) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint8 encodes a uint8. -func (encoder *IpldCborEncoder) EncodeUint8(obj uint8) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint16 encodes a uint16. -func (encoder *IpldCborEncoder) EncodeUint16(obj uint16) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint32 encodes a uint32. -func (encoder *IpldCborEncoder) EncodeUint32(obj uint32) error { - return encoder.encodeCbor(obj) -} - -// EncodeUint64 encodes a uint64. -func (encoder *IpldCborEncoder) EncodeUint64(obj uint64) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt encodes a int. -func (encoder *IpldCborEncoder) EncodeInt(obj int) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt8 encodes a int8. -func (encoder *IpldCborEncoder) EncodeInt8(obj int8) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt16 encodes a int16. -func (encoder *IpldCborEncoder) EncodeInt16(obj int16) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt32 encodes a int32. -func (encoder *IpldCborEncoder) EncodeInt32(obj int32) error { - return encoder.encodeCbor(obj) -} - -// EncodeInt64 encodes a int64. -func (encoder *IpldCborEncoder) EncodeInt64(obj int64) error { - return encoder.encodeCbor(obj) -} - -// EncodeBool encodes a bool. -func (encoder *IpldCborEncoder) EncodeBool(obj bool) error { - return encoder.encodeCbor(obj) -} - -// EncodeString encodes a string. -func (encoder *IpldCborEncoder) EncodeString(obj string) error { - return encoder.encodeCbor(obj) -} - -// EncodeArray encodes an array. -func (encoder *IpldCborEncoder) EncodeArray(obj interface{}) error { - return encoder.encodeCbor(obj) -} - -// EncodeMap encodes a map. -func (encoder *IpldCborEncoder) EncodeMap(obj interface{}) error { - return encoder.encodeCbor(obj) -} - -// EncodeStruct encodes a struct. -func (encoder *IpldCborEncoder) EncodeStruct(obj interface{}) error { - return encoder.encodeCbor(obj) -} - -// Bytes returns the encoded bytes. -func (encoder IpldCborEncoder) Bytes() []byte { - return encoder.b.Bytes() -} - -func (encoder *IpldCborEncoder) encodeCbor(obj interface{}) error { - // get cbor encoded bytes - raw, err := cbor.DumpObject(obj) - if err != nil { - return err - } - - // write to buffer - encoder.b.Write(raw) - - return nil -} - -// -// IpldCborDecoder -// - -// DecodeValue encodes an array. -func (decoder *IpldCborDecoder) DecodeValue(obj interface{}) error { - return decoder.decodeCbor(obj) -} - -// DecodeArray encodes an array. -func (decoder *IpldCborDecoder) DecodeArray(obj interface{}) error { - return decoder.decodeCbor(obj) -} - -// DecodeMap encodes a map. -func (decoder *IpldCborDecoder) DecodeMap(obj interface{}) error { - return decoder.decodeCbor(obj) -} - -// DecodeStruct encodes a uint64. -func (decoder *IpldCborDecoder) DecodeStruct(obj interface{}) error { - return decoder.decodeCbor(obj) -} - -func (decoder *IpldCborDecoder) decodeCbor(obj interface{}) error { - // decode the bytes into a cbor object - if err := cbor.DecodeInto(decoder.raw, obj); err != nil { - return err - } - // reset the bytes, nothing left with CBOR - decoder.raw = nil - - return nil -} diff --git a/internal/pkg/encoding/ipld_cbor_test.go b/internal/pkg/encoding/ipld_cbor_test.go deleted file mode 100644 index 367d72a351..0000000000 --- a/internal/pkg/encoding/ipld_cbor_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package encoding - -import ( - "bytes" - "reflect" - "testing" - - "gotest.tools/assert" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestIpldCborEncodingEncodeStruct(t *testing.T) { - tf.UnitTest(t) - - var original = &defaultPoint{X: 8, Y: 3} - var encoder = IpldCborEncoder{} - - err := encoder.EncodeStruct(original) - assert.NilError(t, err) - - output := encoder.Bytes() - - var expected = []byte{162, 97, 120, 8, 97, 121, 3} - assert.Assert(t, bytes.Equal(output, expected)) -} - -func TestIpldCborDecodingDecodeStruct(t *testing.T) { - tf.UnitTest(t) - - var input = []byte{162, 97, 120, 8, 97, 121, 3} - - var decoder = NewIpldCborDecoder(input) - - var output = defaultPoint{} - err := decoder.DecodeStruct(&output) - assert.NilError(t, err) - - var expected = defaultPoint{X: 8, Y: 3} - assert.Equal(t, output, expected) -} - -func TestIpldCborEncodeDecodeIsClosed(t *testing.T) { - tf.UnitTest(t) - - original := defaultPoint{X: 8, Y: 3} - - raw, err := Encode(original) - assert.NilError(t, err) - - decoded := defaultPoint{} - - err = Decode(raw, &decoded) - assert.NilError(t, err) - - assert.Assert(t, reflect.DeepEqual(original, decoded)) -} - -func TestIpldCborCustomEncoding(t *testing.T) { - tf.UnitTest(t) - - original := customPoint{X: 8, Y: 3} - - raw, err := Encode(original) - assert.NilError(t, err) - - var expected = []byte{130, 8, 3} - assert.Assert(t, bytes.Equal(raw, expected)) -} - -func TestIpldCborCustomDecoding(t *testing.T) { - tf.UnitTest(t) - - var input = []byte{130, 8, 3} - - var output = customPoint{} - err := Decode(input, &output) - assert.NilError(t, err) - - var expected = customPoint{X: 8, Y: 3} - assert.Equal(t, output, expected) -} - -type wrapper uint64 - -func TestIpldCborNewTypeEncoding(t *testing.T) { - tf.UnitTest(t) - var original = wrapper(873) - var encoder = IpldCborEncoder{} - - output, err := EncodeWith(original, &encoder) - assert.NilError(t, err) - - var expected = []byte{25, 3, 105} - assert.Assert(t, bytes.Equal(output, expected)) -} - -func TestIpldCborNewTypeDecoding(t *testing.T) { - tf.UnitTest(t) - - var input = []byte{25, 3, 105} - var decoder = NewIpldCborDecoder(input) - - var output = wrapper(0) - err := DecodeWith(&output, &decoder) - assert.NilError(t, err) - - var expected = wrapper(873) - assert.Equal(t, output, expected) -} diff --git a/internal/pkg/encoding/testing.go b/internal/pkg/encoding/testing.go deleted file mode 100644 index a98faab967..0000000000 --- a/internal/pkg/encoding/testing.go +++ /dev/null @@ -1,212 +0,0 @@ -package encoding - -import "reflect" - -func init() { - RegisterIpldCborType(defaultPoint{}) - RegisterIpldCborType(customPoint{}) -} - -type defaultPoint struct { - X uint64 - Y uint64 -} - -type testEncoder struct { - calls []string - lastobj interface{} -} - -type testDecoder struct { - calls []string - decoded interface{} -} - -func newTestEncoder() testEncoder { - return testEncoder{ - calls: []string{}, - } -} - -func newTestDecoder() testDecoder { - return testDecoder{ - calls: []string{}, - } -} - -// EncodeUint encodes a uint. -func (encoder *testEncoder) EncodeUint(obj uint) error { - encoder.calls = append(encoder.calls, "EncodeUint") - encoder.lastobj = obj - return nil -} - -// EncodeUint8 encodes a uint8. -func (encoder *testEncoder) EncodeUint8(obj uint8) error { - encoder.calls = append(encoder.calls, "EncodeUint8") - encoder.lastobj = obj - return nil -} - -// EncodeUint16 encodes a uint16. -func (encoder *testEncoder) EncodeUint16(obj uint16) error { - encoder.calls = append(encoder.calls, "EncodeUint16") - encoder.lastobj = obj - return nil -} - -// EncodeUint32 encodes a uint32. -func (encoder *testEncoder) EncodeUint32(obj uint32) error { - encoder.calls = append(encoder.calls, "EncodeUint32") - encoder.lastobj = obj - return nil -} - -// EncodeUint64 encodes a uint64. -func (encoder *testEncoder) EncodeUint64(obj uint64) error { - encoder.calls = append(encoder.calls, "EncodeUint64") - encoder.lastobj = obj - return nil -} - -// EncodeInt encodes a int. -func (encoder *testEncoder) EncodeInt(obj int) error { - encoder.calls = append(encoder.calls, "EncodeInt") - encoder.lastobj = obj - return nil -} - -// EncodeInt8 encodes a int8. -func (encoder *testEncoder) EncodeInt8(obj int8) error { - encoder.calls = append(encoder.calls, "EncodeInt8") - encoder.lastobj = obj - return nil -} - -// EncodeInt16 encodes a int16. -func (encoder *testEncoder) EncodeInt16(obj int16) error { - encoder.calls = append(encoder.calls, "EncodeInt16") - encoder.lastobj = obj - return nil -} - -// EncodeInt32 encodes a int32. -func (encoder *testEncoder) EncodeInt32(obj int32) error { - encoder.calls = append(encoder.calls, "EncodeInt32") - encoder.lastobj = obj - return nil -} - -// EncodeInt64 encodes a int64. -func (encoder *testEncoder) EncodeInt64(obj int64) error { - encoder.calls = append(encoder.calls, "EncodeInt64") - encoder.lastobj = obj - return nil -} - -// EncodeBoolean encodes a bool. -func (encoder *testEncoder) EncodeBool(obj bool) error { - encoder.calls = append(encoder.calls, "EncodeBool") - encoder.lastobj = obj - return nil -} - -// EncodeString encodes a string. -func (encoder *testEncoder) EncodeString(obj string) error { - encoder.calls = append(encoder.calls, "EncodeString") - encoder.lastobj = obj - return nil -} - -// EncodeArray encodes an array. -func (encoder *testEncoder) EncodeArray(obj interface{}) error { - encoder.calls = append(encoder.calls, "EncodeArray") - encoder.lastobj = obj - return nil -} - -// EncodeMap encodes a map. -func (encoder *testEncoder) EncodeMap(obj interface{}) error { - encoder.calls = append(encoder.calls, "EncodeMap") - encoder.lastobj = obj - return nil -} - -// EncodeStruct encodes a uint64. -func (encoder *testEncoder) EncodeStruct(obj interface{}) error { - encoder.calls = append(encoder.calls, "EncodeStruct") - encoder.lastobj = obj - return nil -} - -// Bytes returns the encoded bytes. -func (encoder testEncoder) Bytes() []byte { - return []byte{1, 2, 3} -} - -// DecodeValue encodes an array. -func (decoder *testDecoder) DecodeValue(obj interface{}) error { - decoder.calls = append(decoder.calls, "DecodeValue") - set(reflect.ValueOf(obj), decoder.decoded) - return nil -} - -// DecodeArray encodes an array. -func (decoder *testDecoder) DecodeArray(obj interface{}) error { - decoder.calls = append(decoder.calls, "DecodeArray") - set(reflect.ValueOf(obj), decoder.decoded) - return nil -} - -// DecodeMap encodes a map. -func (decoder *testDecoder) DecodeMap(obj interface{}) error { - decoder.calls = append(decoder.calls, "DecodeMap") - set(reflect.ValueOf(obj), decoder.decoded) - return nil -} - -// EncodeStruct encodes a uint64. -func (decoder *testDecoder) DecodeStruct(obj interface{}) error { - decoder.calls = append(decoder.calls, "DecodeStruct") - set(reflect.ValueOf(obj), decoder.decoded) - return nil -} - -func set(v reflect.Value, to interface{}) { - switch v.Kind() { - case reflect.Interface: - v.Elem().Set(reflect.ValueOf(to)) - case reflect.Ptr: - if v.IsNil() { - // Note: we need to figure out how to set things like a pointer to a nil pointer - panic("not supported") - } else { - set(reflect.Indirect(v), to) - } - default: - v.Set(reflect.ValueOf(to)) - } -} - -type customPoint struct { - X uint64 - Y uint64 -} - -func (p customPoint) Encode(encoder Encoder) error { - if err := encoder.EncodeArray([]uint64{p.X, p.Y}); err != nil { - return err - } - - return nil -} - -func (p *customPoint) Decode(decoder Decoder) error { - decoded := []uint64{} - if err := decoder.DecodeArray(&decoded); err != nil { - return err - } - p.X = decoded[0] - p.Y = decoded[1] - return nil -} diff --git a/internal/pkg/encoding/why_cbor.go b/internal/pkg/encoding/why_cbor.go deleted file mode 100644 index fbfbe43e28..0000000000 --- a/internal/pkg/encoding/why_cbor.go +++ /dev/null @@ -1,54 +0,0 @@ -package encoding - -import ( - "bytes" - "fmt" - - cbg "github.com/whyrusleeping/cbor-gen" -) - -// WhyCborEncoder is an object encoder that encodes objects based on the CBOR standard. -type WhyCborEncoder struct { - b *bytes.Buffer -} - -// WhyCborDecoder is an object decoder that decodes objects based on the CBOR standard. -type WhyCborDecoder struct { - b *bytes.Buffer -} - -// -// CborEncoder -// - -// EncodeObject encodes an object. -func (encoder *WhyCborEncoder) EncodeObject(obj Encodable) error { - cborobj, ok := obj.(cbg.CBORMarshaler) - if !ok { - return fmt.Errorf("Object is not a CBORMarshaler") - } - return cborobj.MarshalCBOR(encoder.b) -} - -// IntoBytes returns the encoded bytes. -func (encoder WhyCborEncoder) IntoBytes() []byte { - return encoder.b.Bytes() -} - -// -// CborDecoder -// - -// SetBytes sets the initializer internal bytes to match the input. -func (decoder *WhyCborDecoder) SetBytes(raw []byte) { - decoder.b = bytes.NewBuffer(raw) -} - -// DecodeObject decodes an object. -func (decoder WhyCborDecoder) DecodeObject(obj Decodable) error { - cborobj, ok := obj.(cbg.CBORUnmarshaler) - if !ok { - return fmt.Errorf("Object is not a CBORUnmarshaler") - } - return cborobj.UnmarshalCBOR(decoder.b) -} diff --git a/internal/pkg/encoding/why_cbor_test.go b/internal/pkg/encoding/why_cbor_test.go deleted file mode 100644 index b50a9bdc7b..0000000000 --- a/internal/pkg/encoding/why_cbor_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package encoding - -import ( - "bytes" - "fmt" - "io" - "testing" - - "gotest.tools/assert" - - cbg "github.com/whyrusleeping/cbor-gen" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestWhyCborEncodingOutput(t *testing.T) { - tf.UnitTest(t) - - var original = &customPoint{X: 8, Y: 3} - var encoder = WhyCborEncoder{b: bytes.NewBuffer([]byte{})} - - err := encoder.EncodeObject(original) - assert.NilError(t, err) - - output := encoder.IntoBytes() - - var expected = []byte{130, 8, 3} - assert.Assert(t, bytes.Equal(output, expected)) -} - -func TestWhyCborDecodingOutput(t *testing.T) { - tf.UnitTest(t) - - var input = []byte{130, 8, 3} - - var decoder = &WhyCborDecoder{} - decoder.SetBytes(input) - - var output = customPoint{} - err := decoder.DecodeObject(&output) - assert.NilError(t, err) - - var expected = customPoint{X: 8, Y: 3} - assert.Equal(t, output, expected) -} - -func (t *customPoint) MarshalCBOR(w io.Writer) error { - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.t.X (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, t.X)); err != nil { - return err - } - - // t.t.Y (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, t.Y)); err != nil { - return err - } - return nil -} - -func (t *customPoint) UnmarshalCBOR(br io.Reader) error { - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.t.X (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.X = extra - // t.t.Y (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Y = extra - return nil -} diff --git a/internal/pkg/genesis/genesis.go b/internal/pkg/genesis/genesis.go deleted file mode 100644 index 6f649d49f4..0000000000 --- a/internal/pkg/genesis/genesis.go +++ /dev/null @@ -1,32 +0,0 @@ -package genesis - -import ( - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" -) - -// InitFunc is the signature for function that is used to create a genesis block. -type InitFunc func(cst cbor.IpldStore, bs blockstore.Blockstore) (*block.Block, error) - -// Ticket is the ticket to place in the genesis block header (which can't be derived from a prior ticket), -// used in the evaluation of the messages in the genesis block, -// and *also* the ticket value used when computing the genesis state (the parent state of the genesis block). -var Ticket = block.Ticket{ - VRFProof: []byte{ - 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, - 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, - }, -} - -// VM is the view into the VM used during genesis block creation. -type VM interface { - ApplyGenesisMessage(from address.Address, to address.Address, method abi.MethodNum, value abi.TokenAmount, params interface{}, rnd crypto.RandomnessSource) (interface{}, error) - ContextStore() adt.Store -} diff --git a/internal/pkg/journal/testing.go b/internal/pkg/journal/testing.go deleted file mode 100644 index 986db2229a..0000000000 --- a/internal/pkg/journal/testing.go +++ /dev/null @@ -1,62 +0,0 @@ -package journal - -import ( - "sync" - "testing" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" -) - -// NewInMemoryJournal returns a journal backed by an in-memory map. -func NewInMemoryJournal(t *testing.T, clk clock.Clock) Journal { - return &MemoryJournal{ - t: t, - clock: clk, - topics: make(map[string][]entry), - } -} - -// MemoryJournal represents a journal held in memory. -type MemoryJournal struct { - t *testing.T - clock clock.Clock - topicsMu sync.Mutex - topics map[string][]entry -} - -// Topic returns a Writer with the provided `topic`. -func (mj *MemoryJournal) Topic(topic string) Writer { - mr := &MemoryWriter{ - topic: topic, - journal: mj, - } - return mr -} - -type entry struct { - time time.Time - event string - kvs []interface{} -} - -// MemoryWriter writes journal entires in memory. -type MemoryWriter struct { - topic string - journal *MemoryJournal -} - -// Write records an operation and its metadata to a Journal accepting variadic key-value -// pairs. -func (mw *MemoryWriter) Write(event string, kvs ...interface{}) { - if len(kvs)%2 != 0 { - mw.journal.t.Fatalf("journal write call has odd number of key values pairs: %d event: %s topic: %s", len(kvs), event, mw.topic) - } - mw.journal.topicsMu.Lock() - mw.journal.topics[mw.topic] = append(mw.journal.topics[mw.topic], entry{ - event: event, - time: mw.journal.clock.Now(), - kvs: kvs, - }) - mw.journal.topicsMu.Unlock() -} diff --git a/internal/pkg/message/handler.go b/internal/pkg/message/handler.go deleted file mode 100644 index 9376aa1179..0000000000 --- a/internal/pkg/message/handler.go +++ /dev/null @@ -1,51 +0,0 @@ -package message - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" -) - -// HeadHandler wires up new head tipset handling to the message inbox and outbox. -type HeadHandler struct { - // Inbox and outbox exported for testing. - Inbox *Inbox - Outbox *Outbox - chain chainProvider - - prevHead block.TipSet -} - -// NewHeadHandler build a new new-head handler. -func NewHeadHandler(inbox *Inbox, outbox *Outbox, chain chainProvider, head block.TipSet) *HeadHandler { - return &HeadHandler{inbox, outbox, chain, head} -} - -// HandleNewHead computes the chain delta implied by a new head and updates the inbox and outbox. -func (h *HeadHandler) HandleNewHead(ctx context.Context, newHead block.TipSet) error { - if !newHead.Defined() { - log.Warn("received empty tipset, ignoring") - return nil - } - if newHead.Equals(h.prevHead) { - log.Warnf("received non-new head tipset, ignoring %s", newHead.Key()) - return nil - } - - oldTips, newTips, err := chain.CollectTipsToCommonAncestor(ctx, h.chain, h.prevHead, newHead) - if err != nil { - return errors.Errorf("traversing chain with new head %s, prev %s: %s", newHead.Key(), h.prevHead.Key(), err) - } - if err := h.Outbox.HandleNewHead(ctx, oldTips, newTips); err != nil { - log.Errorf("updating outbound message queue for tipset %s, prev %s: %s", newHead.Key(), h.prevHead.Key(), err) - } - if err := h.Inbox.HandleNewHead(ctx, oldTips, newTips); err != nil { - log.Errorf("updating message pool for tipset %s, prev %s: %s", newHead.Key(), h.prevHead.Key(), err) - } - - h.prevHead = newHead - return nil -} diff --git a/internal/pkg/message/handler_integration_test.go b/internal/pkg/message/handler_integration_test.go deleted file mode 100644 index a2ba019d06..0000000000 --- a/internal/pkg/message/handler_integration_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package message_test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/journal" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// TestNewHeadHandlerIntegration tests inbox and outbox policy consistency. -func TestNewHeadHandlerIntegration(t *testing.T) { - tf.UnitTest(t) - signer, _ := types.NewMockSignersAndKeyInfo(2) - objournal := journal.NewInMemoryJournal(t, clock.NewFake(time.Unix(1234567890, 0))).Topic("outbox") - sender := signer.Addresses[0] - dest := signer.Addresses[1] - ctx := context.Background() - // Maximum age for a message in the pool/queue. As of August 2019, this is in rounds for the - // outbox queue and non-null tipsets for the inbox pool :-(. - // We generally desire messages to expire from the pool before the queue so that retries are - // accepted. - maxAge := uint(10) - gasPrice := types.NewGasPrice(1) - gasUnits := gas.NewGas(1000) - - makeHandler := func(provider *message.FakeProvider, root block.TipSet) *message.HeadHandler { - mpool := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - inbox := message.NewInbox(mpool, maxAge, provider, provider) - queue := message.NewQueue() - publisher := message.NewDefaultPublisher(&message.MockNetworkPublisher{}, mpool) - policy := message.NewMessageQueuePolicy(provider, maxAge) - outbox := message.NewOutbox(signer, &message.FakeValidator{}, queue, publisher, policy, - provider, provider, objournal) - - return message.NewHeadHandler(inbox, outbox, provider, root) - } - - t.Run("test send after reverted message", func(t *testing.T) { - provider := message.NewFakeProvider(t) - root := provider.NewGenesis() - actr := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(0), cid.Undef) - actr.CallSeqNum = 42 - provider.SetHeadAndActor(t, root.Key(), sender, actr) - - handler := makeHandler(provider, root) - outbox := handler.Outbox - inbox := handler.Inbox - - // First, send a message and expect to find it in the message queue and pool. - mid1, donePub1, err := outbox.Send(ctx, sender, dest, types.ZeroAttoFIL, gasPrice, gasUnits, true, abi.MethodNum(9000001), adt.Empty) - require.NoError(t, err) - require.NotNil(t, donePub1) - require.Equal(t, 1, len(outbox.Queue().List(sender))) // Message is in the queue. - pub1Err := <-donePub1 - assert.NoError(t, pub1Err) - msg1, found := inbox.Pool().Get(mid1) - require.True(t, found) // Message is in the pool. - assert.True(t, msg1.Equals(outbox.Queue().List(sender)[0].Msg)) - - // Receive the message in a block. - left := provider.BuildOneOn(root, func(b *chain.BlockBuilder) { - b.AddMessages([]*types.SignedMessage{msg1}, []*types.UnsignedMessage{}) - }) - require.NoError(t, handler.HandleNewHead(ctx, left)) - assert.Equal(t, 0, len(outbox.Queue().List(sender))) // Gone from queue. - _, found = inbox.Pool().Get(mid1) - assert.False(t, found) // Gone from pool. - - // Now re-org the chain to un-mine that message. - right := provider.BuildOneOn(root, func(b *chain.BlockBuilder) { - // No messages. - }) - require.NoError(t, handler.HandleNewHead(ctx, right)) - assert.Equal(t, 1, len(outbox.Queue().List(sender))) // Message returns to queue. - _, found = inbox.Pool().Get(mid1) - assert.True(t, found) // Message returns to pool to be mined again. - - // Send another message from the same account. - // First, send a message and expect to find it in the message queue and pool. - mid2, donePub2, err := outbox.Send(ctx, sender, dest, types.ZeroAttoFIL, gasPrice, gasUnits, true, abi.MethodNum(9000002), adt.Empty) - // This case causes the nonce to be wrongly calculated, since the first, now-unmined message - // is not in the outbox, and actor state has not updated, but the message pool already has - // a message with the same nonce. - require.NoError(t, err) - require.NotNil(t, donePub2) - - // Both messages are in the pool. - pub2Err := <-donePub2 - assert.NoError(t, pub2Err) - _, found = inbox.Pool().Get(mid1) - require.True(t, found) - msg2, found := inbox.Pool().Get(mid2) - require.True(t, found) - // Both messages are in the queue too, in the right order. - restoredQueue := outbox.Queue().List(sender) - assert.Equal(t, 2, len(restoredQueue)) - assert.True(t, msg1.Equals(restoredQueue[0].Msg)) - assert.True(t, msg2.Equals(restoredQueue[1].Msg)) - }) - - t.Run("ignores empty tipset", func(t *testing.T) { - provider := message.NewFakeProvider(t) - root := provider.NewGenesis() - provider.SetHead(root.Key()) - - handler := makeHandler(provider, root) - err := handler.HandleNewHead(ctx, block.UndefTipSet) - assert.NoError(t, err) - }) - - t.Run("ignores duplicate tipset", func(t *testing.T) { - provider := message.NewFakeProvider(t) - root := provider.NewGenesis() - provider.SetHead(root.Key()) - - handler := makeHandler(provider, root) - err := handler.HandleNewHead(ctx, root) - assert.NoError(t, err) - }) -} diff --git a/internal/pkg/message/inbox.go b/internal/pkg/message/inbox.go deleted file mode 100644 index 6ed07dc811..0000000000 --- a/internal/pkg/message/inbox.go +++ /dev/null @@ -1,153 +0,0 @@ -package message - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// InboxMaxAgeTipsets is maximum age (in non-empty tipsets) to permit messages to stay in the pool after reception. -// It should be a little shorter than the outbox max age so that messages expire from mining -// pools a little before the sender gives up on them. -const InboxMaxAgeTipsets = 6 - -// Inbox maintains a pool of received messages. -type Inbox struct { - // The pool storing received messages. - pool *Pool - // Maximum age of a pool message. - maxAgeTipsets uint - - // Provides tipsets for chain traversal. - chain chainProvider - messageProvider messageProvider -} - -// messageProvider provides message collections given their cid. -type messageProvider interface { - LoadMessages(context.Context, cid.Cid) ([]*types.SignedMessage, []*types.UnsignedMessage, error) -} - -// NewInbox constructs a new inbox. -func NewInbox(pool *Pool, maxAgeRounds uint, chain chainProvider, messages messageProvider) *Inbox { - return &Inbox{ - pool: pool, - maxAgeTipsets: maxAgeRounds, - chain: chain, - messageProvider: messages, - } -} - -// Add adds a message to the pool, tagged with the current block height. -// An error probably means the message failed to validate, -// but it could indicate a more serious problem with the system. -func (ib *Inbox) Add(ctx context.Context, msg *types.SignedMessage) (cid.Cid, error) { - head, err := ib.chain.GetTipSet(ib.chain.GetHead()) - if err != nil { - return cid.Undef, err - } - blockTime, err := head.Height() - if err != nil { - return cid.Undef, err - } - - return ib.pool.Add(ctx, msg, blockTime) -} - -// Pool returns the inbox's message pool. -func (ib *Inbox) Pool() *Pool { - return ib.pool -} - -// HandleNewHead updates the message pool in response to a new head tipset. -// This removes messages from the pool that are found in the newly adopted chain and adds back -// those from the removed chain (if any) that do not appear in the new chain. -// The `oldChain` and `newChain` lists are expected in descending height order, and each may be empty. -func (ib *Inbox) HandleNewHead(ctx context.Context, oldChain, newChain []block.TipSet) error { - chainHeight, err := reorgHeight(oldChain, newChain) - if err != nil { - return err - } - - // Add all message from the old tipsets to the message pool, so they can be mined again. - for _, tipset := range oldChain { - for i := 0; i < tipset.Len(); i++ { - block := tipset.At(i) - secpMsgs, _, err := ib.messageProvider.LoadMessages(ctx, block.Messages.Cid) - if err != nil { - return err - } - for _, msg := range secpMsgs { - _, err = ib.pool.Add(ctx, msg, chainHeight) - if err != nil { - // Messages from the removed chain are frequently invalidated, e.g. because that - // same message is already mined on the new chain. - log.Debug(err) - } - } - } - } - - // Remove all messages in the new tipsets from the pool, now mined. - // Cid() can error, so collect all the CIDs up front. - var removeCids []cid.Cid - for _, tipset := range newChain { - for i := 0; i < tipset.Len(); i++ { - secpMsgs, _, err := ib.messageProvider.LoadMessages(ctx, tipset.At(i).Messages.Cid) - if err != nil { - return err - } - for _, msg := range secpMsgs { - cid, err := msg.Cid() - if err != nil { - return err - } - removeCids = append(removeCids, cid) - } - } - } - for _, c := range removeCids { - ib.pool.Remove(c) - } - - // prune all messages that have been in the pool too long - if len(newChain) > 0 { - return timeoutMessages(ctx, ib.pool, ib.chain, newChain[0], ib.maxAgeTipsets) - } - return nil -} - -// timeoutMessages removes all messages from the pool that arrived more than maxAgeTipsets tip sets ago. -// Note that we measure the timeout in the number of tip sets we have received rather than a fixed block -// height. This prevents us from prematurely timing messages that arrive during long chains of null blocks. -// Also when blocks fill, the rate of message processing will correspond more closely to rate of tip -// sets than to the expected block time over short timescales. -func timeoutMessages(ctx context.Context, pool *Pool, chains chain.TipSetProvider, head block.TipSet, maxAgeTipsets uint) error { - var err error - - var minimumHeight abi.ChainEpoch - itr := chain.IterAncestors(ctx, chains, head) - - // Walk back maxAgeTipsets+1 tipsets to determine lowest block height to prune. - for i := uint(0); err == nil && i <= maxAgeTipsets && !itr.Complete(); i++ { - minimumHeight, err = itr.Value().Height() - if err == nil { - err = itr.Next() - } - } - if err != nil { - return err - } - - // remove all messages added before minimumHeight - for _, cid := range pool.PendingBefore(minimumHeight) { - pool.Remove(cid) - } - - return nil -} diff --git a/internal/pkg/message/inbox_test.go b/internal/pkg/message/inbox_test.go deleted file mode 100644 index ba5940f220..0000000000 --- a/internal/pkg/message/inbox_test.go +++ /dev/null @@ -1,475 +0,0 @@ -package message_test - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestUpdateMessagePool(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - type msgs []*types.SignedMessage - type msgsSet [][]*types.SignedMessage - - var mockSigner, _ = types.NewMockSignersAndKeyInfo(10) - - t.Run("Replace head", func(t *testing.T) { - // Msg pool: [m0, m1], Chain: b[] - // to - // Msg pool: [m0], Chain: b[m1] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(2, mockSigner) - requireAdd(t, ib, m[0], m[1]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{}) - - newChain := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{m[1]}}) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain, newChain)) - assertPoolEquals(t, p, m[0]) - }) - - t.Run("Replace head with self", func(t *testing.T) { - // Msg pool: [m0, m1], Chain: b[m2] - // to - // Msg pool: [m0, m1], Chain: b[m2] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(3, mockSigner) - requireAdd(t, ib, m[0], m[1]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{m[2]}}) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain, oldChain)) // sic - assertPoolEquals(t, p, m[0], m[1]) - }) - - t.Run("Replace head with a long chain", func(t *testing.T) { - // Msg pool: [m2, m5], Chain: b[m0, m1] - // to - // Msg pool: [m1], Chain: b[m2, m3] -> b[m4] -> b[m0] -> b[] -> b[m5, m6] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(7, mockSigner) - requireAdd(t, ib, m[2], m[5]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{m[0], m[1]}}) - - newChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[2], m[3]}}, - msgsSet{msgs{m[4]}}, - msgsSet{msgs{m[0]}}, - msgsSet{msgs{}}, - msgsSet{msgs{m[5], m[6]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain, newChain)) - assertPoolEquals(t, p, m[1]) - }) - - t.Run("Replace head with multi-block tipset chains", func(t *testing.T) { - // Msg pool: [m2, m5], Chain: {b[m0], b[m1]} - // to - // Msg pool: [m1], Chain: b[m2, m3] -> {b[m4], b[m0], b[], b[]} -> {b[], b[m6,m5]} - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(7, mockSigner) - requireAdd(t, ib, m[2], m[5]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{m[0]}, msgs{m[1]}}) - - newChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[2], m[3]}}, - msgsSet{msgs{m[4]}, msgs{m[0]}, msgs{}, msgs{}}, - msgsSet{msgs{}, msgs{m[5], m[6]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain, newChain)) - assertPoolEquals(t, p, m[1]) - }) - - t.Run("Replace internal node (second one)", func(t *testing.T) { - // Msg pool: [m3, m5], Chain: b[m0] -> b[m1] -> b[m2] - // to - // Msg pool: [m1, m2], Chain: b[m0] -> b[m3] -> b[m4, m5] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(6, mockSigner) - requireAdd(t, ib, m[3], m[5]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[0]}}, - msgsSet{msgs{m[1]}}, - msgsSet{msgs{m[2]}}, - ) - - newChain := requireChainWithMessages(t, chainProvider.Builder, oldChain[0], - msgsSet{msgs{m[3]}}, - msgsSet{msgs{m[4], m[5]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain[:len(oldChain)-1], newChain)) - assertPoolEquals(t, p, m[1], m[2]) - }) - - t.Run("Replace internal node (second one) with a long chain", func(t *testing.T) { - // Msg pool: [m6], Chain: b[m0] -> b[m1] -> b[m2] - // to - // Msg pool: [m6], Chain: b[m0] -> b[m3] -> b[m4] -> b[m5] -> b[m1, m2] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(7, mockSigner) - requireAdd(t, ib, m[6]) - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[0]}}, - msgsSet{msgs{m[1]}}, - msgsSet{msgs{m[2]}}, - ) - - newChain := requireChainWithMessages(t, chainProvider.Builder, oldChain[0], - msgsSet{msgs{m[3]}}, - msgsSet{msgs{m[4]}}, - msgsSet{msgs{m[5]}}, - msgsSet{msgs{m[1], m[2]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain[:len(oldChain)-1], newChain)) - assertPoolEquals(t, p, m[6]) - }) - - t.Run("Replace internal node with multi-block tipset chains", func(t *testing.T) { - // Msg pool: [m6], Chain: {b[m0], b[m1]} -> b[m2] - // to - // Msg pool: [m6], Chain: {b[m0], b[m1]} -> b[m3] -> b[m4] -> {b[m5], b[m1, m2]} - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(7, mockSigner) - requireAdd(t, ib, m[6]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[0]}, msgs{m[1]}}, - msgsSet{msgs{m[2]}}, - ) - - newChain := requireChainWithMessages(t, chainProvider.Builder, oldChain[0], - msgsSet{msgs{m[3]}}, - msgsSet{msgs{m[4]}}, - msgsSet{msgs{m[5]}, msgs{m[1], m[2]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain[:len(oldChain)-1], newChain)) - assertPoolEquals(t, p, m[6]) - }) - - t.Run("Replace with same messages in different block structure", func(t *testing.T) { - // Msg pool: [m3, m5], Chain: b[m0] -> b[m1] -> b[m2] - // to - // Msg pool: [m3, m5], Chain: {b[m0], b[m1], b[m2]} - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(6, mockSigner) - requireAdd(t, ib, m[3], m[5]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[0]}}, - msgsSet{msgs{m[1]}}, - msgsSet{msgs{m[2]}}, - ) - - newChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[0]}, msgs{m[1]}, msgs{m[2]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain, newChain)) - assertPoolEquals(t, p, m[3], m[5]) - }) - - t.Run("Truncate to internal node", func(t *testing.T) { - // Msg pool: [], Chain: b[m0] -> b[m1] -> b[m2] -> b[m3] - // to - // Msg pool: [m2, m3], Chain: b[m0] -> b[m1] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - m := types.NewSignedMsgs(4, mockSigner) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[0]}}, - msgsSet{msgs{m[1]}}, - msgsSet{msgs{m[2]}}, - msgsSet{msgs{m[3]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, oldChain[:2], nil)) - assertPoolEquals(t, p, m[2], m[3]) - }) - - t.Run("Extend head", func(t *testing.T) { - // Msg pool: [m0, m1], Chain: b[] - // to - // Msg pool: [m0], Chain: b[] -> b[m1, m2] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(3, mockSigner) - requireAdd(t, ib, m[0], m[1]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{}}) - - newChain := requireChainWithMessages(t, chainProvider.Builder, oldChain[0], msgsSet{msgs{m[1], m[2]}}) - - assert.NoError(t, ib.HandleNewHead(ctx, nil, newChain)) - assertPoolEquals(t, p, m[0]) - }) - - t.Run("Extend head with a longer chain and more messages", func(t *testing.T) { - // Msg pool: [m2, m5], Chain: b[m0] -> b[m1] - // to - // Msg pool: [], Chain: b[m0] -> b[m1] -> b[m2, m3] -> b[m4] -> b[m5, m6] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 10, chainProvider, chainProvider) - - m := types.NewSignedMsgs(7, mockSigner) - requireAdd(t, ib, m[2], m[5]) - - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{m[0]}}, - msgsSet{msgs{m[1]}}, - ) - - newChain := requireChainWithMessages(t, chainProvider.Builder, oldChain[0], - msgsSet{msgs{m[2], m[3]}}, - msgsSet{msgs{m[4]}}, - msgsSet{msgs{m[5], m[6]}}, - ) - - assert.NoError(t, ib.HandleNewHead(ctx, nil, newChain)) - assertPoolEquals(t, p) - }) - - t.Run("Messages added to new chain are added at chain height", func(t *testing.T) { - // Msg pool: [m2, m5], Chain: b[m0] -> b[m1] - // to - // Msg pool: [], Chain: b[m0] -> b[m1] -> b[m2, m3] -> b[m4] -> b[m5, m6] - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - ib := message.NewInbox(p, 5, chainProvider, chainProvider) - - m := types.NewSignedMsgs(1, mockSigner) - - // old chain with one tipset containing one message - oldChain := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{m[0]}}) - - // new chain with 10 tipsets and no messages - newChain := requireChainWithMessages(t, chainProvider.Builder, parent, - msgsSet{msgs{}}, msgsSet{msgs{}}, msgsSet{msgs{}}, msgsSet{msgs{}}, msgsSet{msgs{}}, - msgsSet{msgs{}}, msgsSet{msgs{}}, msgsSet{msgs{}}, msgsSet{msgs{}}, msgsSet{msgs{}}, - ) - - // reorg the chain. Messages added more than 5 tipsets from the head will be immediately dropped - assert.NoError(t, ib.HandleNewHead(ctx, oldChain, newChain)) - - // expect the first message to still be in the chain because it is added at the new head - assertPoolEquals(t, p, m[0]) - }) - - t.Run("Times out old messages", func(t *testing.T) { - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - maxAge := uint(10) - ib := message.NewInbox(p, maxAge, chainProvider, chainProvider) - - m := types.NewSignedMsgs(maxAge, mockSigner) - - head := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{}})[0] - - // Add a message at each block height until maxAge is reached. - for i := uint(0); i < maxAge; i++ { - // chainProvider's head determines block time at which message is added - chainProvider.SetHead(head.Key()) - - requireAdd(t, ib, m[i]) - - // update pool with tipset that has no messages - next := requireChainWithMessages(t, chainProvider.Builder, head, msgsSet{msgs{}})[0] - assert.NoError(t, ib.HandleNewHead(ctx, nil, []block.TipSet{next})) - - // assert all added messages still in pool - assertPoolEquals(t, p, m[:i+1]...) - - head = next - } - require.Equal(t, abi.ChainEpoch(11), head.At(0).Height) - - // next tipset times out first message only - next := requireChainWithMessages(t, chainProvider.Builder, head, msgsSet{msgs{}})[0] - assert.NoError(t, ib.HandleNewHead(ctx, nil, []block.TipSet{next})) - assertPoolEquals(t, p, m[1:]...) - - // adding a chain of 4 tipsets times out based on final state - newChain := requireChainWithMessages(t, chainProvider.Builder, next, - msgsSet{msgs{}}, - msgsSet{msgs{}}, - msgsSet{msgs{}}, - msgsSet{msgs{}}, - ) - require.Equal(t, abi.ChainEpoch(16), newChain[0].At(0).Height) - assert.NoError(t, ib.HandleNewHead(ctx, nil, newChain)) - assertPoolEquals(t, p, m[5:]...) - }) - - t.Run("UnsignedMessage timeout is unaffected by null tipsets", func(t *testing.T) { - chainProvider, parent := newProviderWithGenesis(t) - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - maxAge := uint(10) - ib := message.NewInbox(p, maxAge, chainProvider, chainProvider) - - m := types.NewSignedMsgs(maxAge, mockSigner) - head := requireChainWithMessages(t, chainProvider.Builder, parent, msgsSet{msgs{}})[0] - - // Add a message at each block height until maxAge is reached - for i := uint(0); i < maxAge; i++ { - // chainProvider's head determines block time at which message is added - chainProvider.SetHead(head.Key()) - - requireAdd(t, ib, m[i]) - - // update pool with tipset that has no messages and four - // null blocks - next := chainProvider.BuildOneOn(head, func(bb *chain.BlockBuilder) { - bb.IncHeight(4) // 4 null blocks - }) - - assert.NoError(t, ib.HandleNewHead(ctx, nil, []block.TipSet{next})) - - // assert all added messages still in pool - assertPoolEquals(t, p, m[:i+1]...) - - head = next - } - - // next tipset times out first message only - next := requireChainWithMessages(t, chainProvider.Builder, head, msgsSet{msgs{}})[0] - assert.NoError(t, ib.HandleNewHead(ctx, nil, []block.TipSet{next})) - assertPoolEquals(t, p, m[1:]...) - }) -} - -func newProviderWithGenesis(t *testing.T) (*message.FakeProvider, block.TipSet) { - provider := message.NewFakeProvider(t) - head := provider.Builder.NewGenesis() - provider.SetHead(head.Key()) - return provider, head -} - -func requireAdd(t *testing.T, ib *message.Inbox, msgs ...*types.SignedMessage) { - ctx := context.Background() - for _, m := range msgs { - _, err := ib.Add(ctx, m) - require.NoError(t, err) - } -} - -func msgAsString(msg *types.SignedMessage) string { - // When using NewMessageForTestGetter msg.Method is set - // to "msgN" so we print that (it will correspond - // to a variable of the same name in the tests - // below). - return strconv.FormatInt(int64(msg.Message.Method), 10) -} - -func msgsAsString(msgs []*types.SignedMessage) string { - s := "" - for _, m := range msgs { - s = fmt.Sprintf("%s%s ", s, msgAsString(m)) - } - return "[" + s + "]" -} - -// assertPoolEquals returns true if p contains exactly the expected messages. -func assertPoolEquals(t *testing.T, p *message.Pool, expMsgs ...*types.SignedMessage) { - msgs := p.Pending() - if len(msgs) != len(expMsgs) { - assert.Failf(t, "wrong messages in pool", "expMsgs %v, got msgs %v", msgsAsString(expMsgs), msgsAsString(msgs)) - - } - for _, m1 := range expMsgs { - found := false - for _, m2 := range msgs { - if types.SmsgCidsEqual(m1, m2) { - found = true - break - } - } - if !found { - assert.Failf(t, "wrong messages in pool", "expMsgs %v, got msgs %v (msgs doesn't contain %v)", msgsAsString(expMsgs), msgsAsString(msgs), msgAsString(m1)) - } - } -} - -// requireChainWithMessages creates a chain of tipsets containing the given messages -// using the provided chain builder. The builder stores the chain. Note that -// each msgSet argument is a slice of message slices. Each slice of slices -// goes into a successive tipset and each subslice goes into one tipset block. -// Precondition: the root tipset must be defined. The chain of tipsets is -// returned in descending height order (head-first). -// TODO: move this onto the builder, #3110 -func requireChainWithMessages(t *testing.T, builder *chain.Builder, root block.TipSet, msgSets ...[][]*types.SignedMessage) []block.TipSet { - var tipSets []block.TipSet - parent := root - require.True(t, parent.Defined()) - - for _, tsMsgSet := range msgSets { - if len(tsMsgSet) == 0 { - parent = builder.BuildOneOn(parent, nil) - } else { - parent = builder.Build(parent, len(tsMsgSet), msgBuild(t, tsMsgSet)) - } - tipSets = append(tipSets, parent) - } - chain.Reverse(tipSets) - return tipSets -} - -// msgBuild takes in the msgSet dictating which messages go on which block of -// a test tipset and returns a build function that adds these messages to the -// correct block using the chain.Builder. -func msgBuild(t *testing.T, msgSet [][]*types.SignedMessage) func(*chain.BlockBuilder, int) { - return func(bb *chain.BlockBuilder, i int) { - require.True(t, i <= len(msgSet)) - bb.AddMessages(msgSet[i], []*types.UnsignedMessage{}) - } -} diff --git a/internal/pkg/message/outbox.go b/internal/pkg/message/outbox.go deleted file mode 100644 index f9f6273b6c..0000000000 --- a/internal/pkg/message/outbox.go +++ /dev/null @@ -1,225 +0,0 @@ -package message - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/journal" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// Outbox validates and marshals messages for sending and maintains the outbound message queue. -// The code arrangement here is not quite right. We probably want to factor out the bits that -// build and sign a message from those that add to the local queue/pool and broadcast it. -// See discussion in -// https://github.com/filecoin-project/go-filecoin/pull/3178#discussion_r311593312 -// and https://github.com/filecoin-project/go-filecoin/issues/3052#issuecomment-513643661 -type Outbox struct { - // Signs messages - signer types.Signer - // Validates messages before sending them. - validator messageValidator - // Holds messages sent from this node but not yet mined. - queue *Queue - // Publishes a signed message to the network. - publisher publisher - // Maintains message queue in response to new tipsets. - policy QueuePolicy - - chains chainProvider - actors actorProvider - - // Protects the "next nonce" calculation to avoid collisions. - nonceLock sync.Mutex - - journal journal.Writer -} - -type messageValidator interface { - // Validate checks a message for validity. - ValidateSignedMessageSyntax(ctx context.Context, msg *types.SignedMessage) error -} - -type actorProvider interface { - // GetActorAt returns the actor state defined by the chain up to some tipset - GetActorAt(ctx context.Context, tipset block.TipSetKey, addr address.Address) (*actor.Actor, error) -} - -type publisher interface { - Publish(ctx context.Context, message *types.SignedMessage, height abi.ChainEpoch, bcast bool) error -} - -var msgSendErrCt = metrics.NewInt64Counter("message_sender_error", "Number of errors encountered while sending a message") - -// NewOutbox creates a new outbox -func NewOutbox(signer types.Signer, validator messageValidator, queue *Queue, - publisher publisher, policy QueuePolicy, chains chainProvider, actors actorProvider, jw journal.Writer) *Outbox { - return &Outbox{ - signer: signer, - validator: validator, - queue: queue, - publisher: publisher, - policy: policy, - chains: chains, - actors: actors, - journal: jw, - } -} - -// Queue returns the outbox's outbound message queue. -func (ob *Outbox) Queue() *Queue { - return ob.queue -} - -// Send marshals and sends a message, retaining it in the outbound message queue. -// If bcast is true, the publisher broadcasts the message to the network at the current block height. -func (ob *Outbox) Send(ctx context.Context, from, to address.Address, value types.AttoFIL, - gasPrice types.AttoFIL, gasLimit gas.Unit, bcast bool, method abi.MethodNum, params interface{}) (out cid.Cid, pubErrCh chan error, err error) { - encodedParams, err := encoding.Encode(params) - if err != nil { - return cid.Undef, nil, errors.Wrap(err, "invalid params") - } - - return ob.SendEncoded(ctx, from, to, value, gasPrice, gasLimit, bcast, method, encodedParams) -} - -// SendEncoded sends an encoded message, retaining it in the outbound message queue. -// If bcast is true, the publisher broadcasts the message to the network at the current block height. -func (ob *Outbox) SendEncoded(ctx context.Context, from, to address.Address, value types.AttoFIL, - gasPrice types.AttoFIL, gasLimit gas.Unit, bcast bool, method abi.MethodNum, encodedParams []byte) (out cid.Cid, pubErrCh chan error, err error) { - defer func() { - if err != nil { - msgSendErrCt.Inc(ctx, 1) - } - ob.journal.Write("SendEncoded", - "to", to.String(), "from", from.String(), "value", value.Int.Uint64(), "method", method, - "gasPrice", gasPrice.Int.Uint64(), "gasLimit", uint64(gasLimit), "bcast", bcast, - "encodedParams", encodedParams, "error", err, "cid", out.String()) - }() - - // The spec's message syntax validation rules restricts empty parameters - // to be encoded as an empty byte string not cbor null - if encodedParams == nil { - encodedParams = []byte{} - } - - // Lock to avoid a race inspecting the actor state and message queue to calculate next nonce. - ob.nonceLock.Lock() - defer ob.nonceLock.Unlock() - - head := ob.chains.GetHead() - - fromActor, err := ob.actors.GetActorAt(ctx, head, from) - if err != nil { - return cid.Undef, nil, errors.Wrapf(err, "no actor at address %s", from) - } - - nonce, err := nextNonce(fromActor, ob.queue, from) - if err != nil { - return cid.Undef, nil, errors.Wrapf(err, "failed calculating nonce for actor at %s", from) - } - - rawMsg := types.NewMeteredMessage(from, to, nonce, value, method, encodedParams, gasPrice, gasLimit) - signed, err := types.NewSignedMessage(ctx, *rawMsg, ob.signer) - - if err != nil { - return cid.Undef, nil, errors.Wrap(err, "failed to sign message") - } - - // Slightly awkward: it would be better validate before signing but the MeteredMessage construction - // is hidden inside NewSignedMessage. - err = ob.validator.ValidateSignedMessageSyntax(ctx, signed) - if err != nil { - return cid.Undef, nil, errors.Wrap(err, "invalid message") - } - - return sendSignedMsg(ctx, ob, signed, bcast) -} - -// SignedSend send a signed message, retaining it in the outbound message queue. -// If bcast is true, the publisher broadcasts the message to the network at the current block height. -func (ob *Outbox) SignedSend(ctx context.Context, signed *types.SignedMessage, bcast bool) (out cid.Cid, pubErrCh chan error, err error) { - defer func() { - if err != nil { - msgSendErrCt.Inc(ctx, 1) - } - }() - - return sendSignedMsg(ctx, ob, signed, bcast) -} - -// sendSignedMsg add signed message in pool and return cid -func sendSignedMsg(ctx context.Context, ob *Outbox, signed *types.SignedMessage, bcast bool) (cid.Cid, chan error, error) { - head := ob.chains.GetHead() - - height, err := tipsetHeight(ob.chains, head) - if err != nil { - return cid.Undef, nil, errors.Wrap(err, "failed to get block height") - } - - // Add to the local message queue/pool at the last possible moment before broadcasting to network. - if err := ob.queue.Enqueue(ctx, signed, uint64(height)); err != nil { - return cid.Undef, nil, errors.Wrap(err, "failed to add message to outbound queue") - } - - var c cid.Cid - if signed.Message.From.Protocol() == address.BLS { - // drop signature before generating Cid to match cid of message retrieved from block. - c, err = signed.Message.Cid() - } else { - c, err = signed.Cid() - } - if err != nil { - return cid.Undef, nil, err - } - pubErrCh := make(chan error) - - go func() { - err = ob.publisher.Publish(ctx, signed, height, bcast) - if err != nil { - log.Errorf("error: %s publishing message %s", err, c.String()) - } - pubErrCh <- err - close(pubErrCh) - }() - - return c, pubErrCh, nil -} - -// HandleNewHead maintains the message queue in response to a new head tipset. -func (ob *Outbox) HandleNewHead(ctx context.Context, oldTips, newTips []block.TipSet) error { - return ob.policy.HandleNewHead(ctx, ob.queue, oldTips, newTips) -} - -// nextNonce returns the next expected nonce value for an account actor. This is the larger -// of the actor's nonce value, or one greater than the largest nonce from the actor found in the message queue. -func nextNonce(act *actor.Actor, queue *Queue, address address.Address) (uint64, error) { - actorNonce, err := actor.NextNonce(act) - if err != nil { - return 0, err - } - - poolNonce, found := queue.LargestNonce(address) - if found && poolNonce >= actorNonce { - return poolNonce + 1, nil - } - return actorNonce, nil -} - -func tipsetHeight(provider chainProvider, key block.TipSetKey) (abi.ChainEpoch, error) { - head, err := provider.GetTipSet(key) - if err != nil { - return 0, err - } - return head.Height() -} diff --git a/internal/pkg/message/outbox_test.go b/internal/pkg/message/outbox_test.go deleted file mode 100644 index 21c77023b9..0000000000 --- a/internal/pkg/message/outbox_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package message_test - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/journal" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -func newOutboxTestJournal(t *testing.T) journal.Writer { - return journal.NewInMemoryJournal(t, clock.NewFake(time.Unix(1234567890, 0))).Topic("outbox") -} - -func TestOutbox(t *testing.T) { - tf.UnitTest(t) - - t.Run("invalid message rejected", func(t *testing.T) { - w, _ := types.NewMockSignersAndKeyInfo(1) - sender := w.Addresses[0] - queue := message.NewQueue() - publisher := &message.MockPublisher{} - provider := message.NewFakeProvider(t) - bcast := true - - ob := message.NewOutbox(w, message.FakeValidator{RejectMessages: true}, queue, publisher, - message.NullPolicy{}, provider, provider, newOutboxTestJournal(t)) - - cid, _, err := ob.Send(context.Background(), sender, sender, types.NewAttoFILFromFIL(2), types.NewGasPrice(0), gas.NewGas(0), bcast, builtin.MethodSend, adt.Empty) - assert.Errorf(t, err, "for testing") - assert.False(t, cid.Defined()) - }) - - t.Run("send message enqueues and calls Publish, but respects bcast flag for broadcasting", func(t *testing.T) { - w, _ := types.NewMockSignersAndKeyInfo(1) - sender := w.Addresses[0] - toAddr := vmaddr.NewForTestGetter()() - queue := message.NewQueue() - publisher := &message.MockPublisher{} - provider := message.NewFakeProvider(t) - - head := provider.BuildOneOn(block.UndefTipSet, func(b *chain.BlockBuilder) { - b.IncHeight(1000) - }) - actr := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(0), cid.Undef) - actr.CallSeqNum = 42 - provider.SetHeadAndActor(t, head.Key(), sender, actr) - - ob := message.NewOutbox(w, message.FakeValidator{}, queue, publisher, message.NullPolicy{}, provider, provider, newOutboxTestJournal(t)) - require.Empty(t, queue.List(sender)) - require.Nil(t, publisher.Message) - - testCases := []struct { - bcast bool - nonce uint64 - height int - }{{true, actr.CallSeqNum, 1000}, {false, actr.CallSeqNum + 1, 1000}} - - for _, test := range testCases { - _, pubDone, err := ob.Send(context.Background(), sender, toAddr, types.ZeroAttoFIL, types.NewGasPrice(0), gas.NewGas(0), test.bcast, builtin.MethodSend, adt.Empty) - require.NoError(t, err) - assert.Equal(t, uint64(test.height), queue.List(sender)[0].Stamp) - require.NotNil(t, pubDone) - pubErr := <-pubDone - assert.NoError(t, pubErr) - require.NotNil(t, publisher.Message) - assert.Equal(t, test.nonce, publisher.Message.Message.CallSeqNum) - assert.Equal(t, abi.ChainEpoch(test.height), publisher.Height) - assert.Equal(t, test.bcast, publisher.Bcast) - } - - }) - t.Run("send message avoids nonce race", func(t *testing.T) { - ctx := context.Background() - msgCount := 20 // number of messages to send - sendConcurrent := 3 // number of of concurrent message sends - - w, _ := types.NewMockSignersAndKeyInfo(1) - sender := w.Addresses[0] - toAddr := vmaddr.NewForTestGetter()() - queue := message.NewQueue() - publisher := &message.MockPublisher{} - provider := message.NewFakeProvider(t) - bcast := true - - head := provider.BuildOneOn(block.UndefTipSet, func(b *chain.BlockBuilder) { - b.IncHeight(1000) - }) - actr := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(0), cid.Undef) - actr.CallSeqNum = 42 - provider.SetHeadAndActor(t, head.Key(), sender, actr) - - s := message.NewOutbox(w, message.FakeValidator{}, queue, publisher, message.NullPolicy{}, provider, provider, newOutboxTestJournal(t)) - - var wg sync.WaitGroup - addTwentyMessages := func(batch int) { - defer wg.Done() - for i := 0; i < msgCount; i++ { - method := abi.MethodNum(batch*10000 + i) - _, _, err := s.Send(ctx, sender, toAddr, types.ZeroAttoFIL, types.NewGasPrice(0), gas.NewGas(0), bcast, method, adt.Empty) - require.NoError(t, err) - } - } - - // Add messages concurrently. - for i := 0; i < sendConcurrent; i++ { - wg.Add(1) - go addTwentyMessages(i) - } - wg.Wait() - - enqueued := queue.List(sender) - assert.Equal(t, 60, len(enqueued)) - - // Expect the nonces to be distinct and contiguous - nonces := map[uint64]bool{} - for _, message := range enqueued { - assert.Equal(t, uint64(1000), message.Stamp) - _, found := nonces[message.Msg.Message.CallSeqNum] - require.False(t, found) - nonces[message.Msg.Message.CallSeqNum] = true - } - - for i := 0; i < 60; i++ { - assert.True(t, nonces[actr.CallSeqNum+uint64(i)]) - - } - }) - - t.Run("fails with non-account actor", func(t *testing.T) { - w, _ := types.NewMockSignersAndKeyInfo(1) - sender := w.Addresses[0] - toAddr := vmaddr.NewForTestGetter()() - queue := message.NewQueue() - publisher := &message.MockPublisher{} - provider := message.NewFakeProvider(t) - - head := provider.NewGenesis() - actr := actor.NewActor(builtin.StorageMarketActorCodeID, abi.NewTokenAmount(0), cid.Undef) - provider.SetHeadAndActor(t, head.Key(), sender, actr) - - ob := message.NewOutbox(w, message.FakeValidator{}, queue, publisher, message.NullPolicy{}, provider, provider, newOutboxTestJournal(t)) - - _, _, err := ob.Send(context.Background(), sender, toAddr, types.ZeroAttoFIL, types.NewGasPrice(0), gas.NewGas(0), true, builtin.MethodSend, adt.Empty) - assert.Error(t, err) - assert.Contains(t, err.Error(), "account or empty") - }) -} diff --git a/internal/pkg/message/policy.go b/internal/pkg/message/policy.go deleted file mode 100644 index 9135719da5..0000000000 --- a/internal/pkg/message/policy.go +++ /dev/null @@ -1,130 +0,0 @@ -package message - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/specs-actors/actors/abi" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// OutboxMaxAgeRounds is the maximum age (in consensus rounds) to permit messages to stay in the outbound message queue. -// This should be a little longer than the message pool's timeout so that messages expire from mining -// pools a little before the sending node gives up on them. -const OutboxMaxAgeRounds = 10 - -var log = logging.Logger("message") - -// QueuePolicy manages a message queue state in response to changes on the blockchain. -type QueuePolicy interface { - // HandleNewHead updates a message queue in response to a new chain head. The new head may be based - // directly on the previous head, or it may be based on a prior tipset (aka a re-org). - // - `oldTips` is a list of tipsets that used to be on the main chain but are no longer. - // - `newTips` is a list of tipsets that now form the head of the main chain. - // Both lists are in descending height order, down to but not including the common ancestor tipset. - HandleNewHead(ctx context.Context, target PolicyTarget, oldTips, newTips []block.TipSet) error -} - -// PolicyTarget is outbound queue object on which the policy acts. -type PolicyTarget interface { - RemoveNext(ctx context.Context, sender address.Address, expectedNonce uint64) (msg *types.SignedMessage, found bool, err error) - Requeue(ctx context.Context, msg *types.SignedMessage, stamp uint64) error - ExpireBefore(ctx context.Context, stamp uint64) map[address.Address][]*types.SignedMessage -} - -// DefaultQueuePolicy manages a target message queue state in response to changes on the blockchain. -// Messages are removed from the queue as soon as they appear in a block that's part of a heaviest chain. -// At this point, messages are highly likely to be valid and known to a large number of nodes, -// even if the block ends up as an abandoned fork. -// There is no special handling for re-orgs and messages do not revert to the queue if the block -// ends up childless (in contrast to the message pool). -type DefaultQueuePolicy struct { - // Provides messages collections from cids. - messageProvider messageProvider - // Maximum difference in message stamp from current block height before expiring an address's queue - maxAgeRounds uint64 -} - -// NewMessageQueuePolicy returns a new policy which removes mined messages from the queue and expires -// messages older than `maxAgeTipsets` rounds. -func NewMessageQueuePolicy(messages messageProvider, maxAge uint) *DefaultQueuePolicy { - return &DefaultQueuePolicy{messages, uint64(maxAge)} -} - -// HandleNewHead removes from the queue all messages that have now been mined in new blocks. -func (p *DefaultQueuePolicy) HandleNewHead(ctx context.Context, target PolicyTarget, oldTips, newTips []block.TipSet) error { - chainHeight, err := reorgHeight(oldTips, newTips) - if err != nil { - return err - } - - // Remove all messages in the new chain from the queue since they have been mined into blocks. - // Rearrange the tipsets into ascending height order so messages are discovered in nonce order. - chain.Reverse(newTips) - for _, tipset := range newTips { - for i := 0; i < tipset.Len(); i++ { - secpMsgs, _, err := p.messageProvider.LoadMessages(ctx, tipset.At(i).Messages.Cid) - if err != nil { - return err - } - for _, minedMsg := range secpMsgs { - removed, found, err := target.RemoveNext(ctx, minedMsg.Message.From, minedMsg.Message.CallSeqNum) - if err != nil { - return err - } - if found && !minedMsg.Equals(removed) { - log.Warnf("Queued message %v differs from mined message %v with same sender & nonce", removed, minedMsg) - } - // Else if not found, the message was not sent by this node, or has already been removed - // from the queue (e.g. a blockchain re-org). - } - } - } - - // Return messages from the old chain back to the queue. This is necessary so that the next nonce - // implied by the queue+state matches that of the message pool (which will also have the un-mined - // message re-instated). - // Note that this will include messages that were never sent by this node since the queue doesn't - // keep track of "allowed" senders. However, messages from other addresses will expire - // harmlessly. - // See discussion in https://github.com/filecoin-project/go-filecoin/issues/3052 - // Traverse these in descending height order. - for _, tipset := range oldTips { - for i := 0; i < tipset.Len(); i++ { - secpMsgs, _, err := p.messageProvider.LoadMessages(ctx, tipset.At(i).Messages.Cid) - if err != nil { - return err - } - for _, restoredMsg := range secpMsgs { - err := target.Requeue(ctx, restoredMsg, uint64(chainHeight)) - if err != nil { - return err - } - } - } - } - - // Expire messages that have been in the queue for too long; they will probably never be mined. - if uint64(chainHeight) >= p.maxAgeRounds { // avoid uint subtraction overflow - expired := target.ExpireBefore(ctx, uint64(chainHeight)-p.maxAgeRounds) - for _, msg := range expired { - log.Warnf("Outbound message %v expired un-mined after %d rounds", msg, p.maxAgeRounds) - } - } - return nil -} - -// reorgHeight returns height of the new chain given only the tipset diff which may be empty -func reorgHeight(oldTips, newTips []block.TipSet) (abi.ChainEpoch, error) { - if len(newTips) > 0 { - return newTips[0].Height() - } else if len(oldTips) > 0 { // A pure rewind is unlikely in practice. - return oldTips[0].Height() - } - // this is a noop reorg. Chain height shouldn't matter. - return 0, nil -} diff --git a/internal/pkg/message/policy_test.go b/internal/pkg/message/policy_test.go deleted file mode 100644 index a4b10723e8..0000000000 --- a/internal/pkg/message/policy_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package message_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -// Tests for the outbound message queue policy. -// These tests could use a fake/mock policy target, but it would require some sophistication to -// validate the order of removals, so using a real queue is a bit easier. -func TestMessageQueuePolicy(t *testing.T) { - tf.UnitTest(t) - - // Individual tests share a MessageMaker so not parallel (but quick) - ctx := context.Background() - - keys := types.MustGenerateKeyInfo(2, 42) - mm := vm.NewMessageMaker(t, keys) - - alice := mm.Addresses()[0] - bob := mm.Addresses()[1] - - requireEnqueue := func(q *message.Queue, msg *types.SignedMessage, stamp uint64) *types.SignedMessage { - err := q.Enqueue(ctx, msg, stamp) - require.NoError(t, err) - return msg - } - - t.Run("old block does nothing", func(t *testing.T) { - blocks := chain.NewBuilder(t, alice) - q := message.NewQueue() - policy := message.NewMessageQueuePolicy(blocks, 10) - - fromAlice := mm.NewSignedMessage(alice, 1) - fromBob := mm.NewSignedMessage(bob, 1) - requireEnqueue(q, fromAlice, 100) - requireEnqueue(q, fromBob, 200) - - root := blocks.NewGenesis() // Height = 0 - b1 := blocks.AppendOn(root, 1) - - err := policy.HandleNewHead(ctx, q, nil, []block.TipSet{b1}) - assert.NoError(t, err) - assert.Equal(t, qm(fromAlice, 100), q.List(alice)[0]) - assert.Equal(t, qm(fromBob, 200), q.List(bob)[0]) - }) - - t.Run("chain truncation does nothing", func(t *testing.T) { - blocks := chain.NewBuilder(t, alice) - q := message.NewQueue() - policy := message.NewMessageQueuePolicy(blocks, 10) - - fromAlice := mm.NewSignedMessage(alice, 1) - fromBob := mm.NewSignedMessage(bob, 1) - requireEnqueue(q, fromAlice, 100) - requireEnqueue(q, fromBob, 200) - - root := blocks.NewGenesis() // Height = 0 - b1 := blocks.AppendOn(root, 1) - - err := policy.HandleNewHead(ctx, q, []block.TipSet{b1}, []block.TipSet{}) - assert.NoError(t, err) - assert.Equal(t, qm(fromAlice, 100), q.List(alice)[0]) - assert.Equal(t, qm(fromBob, 200), q.List(bob)[0]) - }) - - t.Run("removes mined messages", func(t *testing.T) { - blocks := chain.NewBuilder(t, alice) - q := message.NewQueue() - policy := message.NewMessageQueuePolicy(blocks, 10) - - msgs := []*types.SignedMessage{ - requireEnqueue(q, mm.NewSignedMessage(alice, 1), 100), - requireEnqueue(q, mm.NewSignedMessage(alice, 2), 101), - requireEnqueue(q, mm.NewSignedMessage(alice, 3), 102), - requireEnqueue(q, mm.NewSignedMessage(bob, 1), 100), - } - - assert.Equal(t, qm(msgs[0], 100), q.List(alice)[0]) - assert.Equal(t, qm(msgs[3], 100), q.List(bob)[0]) - - root := blocks.BuildOneOn(block.UndefTipSet, func(b *chain.BlockBuilder) { - b.IncHeight(103) - }) - b1 := blocks.BuildOneOn(root, func(b *chain.BlockBuilder) { - b.AddMessages( - []*types.SignedMessage{msgs[0]}, - []*types.UnsignedMessage{}, - ) - }) - - err := policy.HandleNewHead(ctx, q, nil, []block.TipSet{b1}) - require.NoError(t, err) - assert.Equal(t, qm(msgs[1], 101), q.List(alice)[0]) // First message removed successfully - assert.Equal(t, qm(msgs[3], 100), q.List(bob)[0]) // No change - - // A block with no messages does nothing - b2 := blocks.AppendOn(b1, 1) - err = policy.HandleNewHead(ctx, q, []block.TipSet{}, []block.TipSet{b2}) - require.NoError(t, err) - assert.Equal(t, qm(msgs[1], 101), q.List(alice)[0]) - assert.Equal(t, qm(msgs[3], 100), q.List(bob)[0]) - - // Block with both alice and bob's next message - b3 := blocks.BuildOneOn(b2, func(b *chain.BlockBuilder) { - b.AddMessages( - []*types.SignedMessage{msgs[1], msgs[3]}, - []*types.UnsignedMessage{}, - ) - }) - err = policy.HandleNewHead(ctx, q, nil, []block.TipSet{b3}) - require.NoError(t, err) - assert.Equal(t, qm(msgs[2], 102), q.List(alice)[0]) - assert.Empty(t, q.List(bob)) // None left - - // Block with alice's last message - b4 := blocks.BuildOneOn(b3, func(b *chain.BlockBuilder) { - b.AddMessages( - []*types.SignedMessage{msgs[2]}, - []*types.UnsignedMessage{}, - ) - }) - err = policy.HandleNewHead(ctx, q, nil, []block.TipSet{b4}) - require.NoError(t, err) - assert.Empty(t, q.List(alice)) - }) - - t.Run("expires old messages", func(t *testing.T) { - blocks := chain.NewBuilder(t, alice) - messages := blocks - q := message.NewQueue() - policy := message.NewMessageQueuePolicy(messages, 10) - - msgs := []*types.SignedMessage{ - requireEnqueue(q, mm.NewSignedMessage(alice, 1), 100), - requireEnqueue(q, mm.NewSignedMessage(alice, 2), 101), - requireEnqueue(q, mm.NewSignedMessage(alice, 3), 102), - requireEnqueue(q, mm.NewSignedMessage(bob, 1), 200), - } - - assert.Equal(t, qm(msgs[0], 100), q.List(alice)[0]) - assert.Equal(t, qm(msgs[3], 200), q.List(bob)[0]) - - root := blocks.BuildOneOn(block.UndefTipSet, func(b *chain.BlockBuilder) { - b.IncHeight(100) - }) - - // Skip 9 rounds since alice's first message enqueued, so b1 has height 110 - b1 := blocks.BuildOneOn(root, func(b *chain.BlockBuilder) { - b.IncHeight(9) - }) - - err := policy.HandleNewHead(ctx, q, nil, []block.TipSet{b1}) - require.NoError(t, err) - - assert.Equal(t, qm(msgs[0], 100), q.List(alice)[0]) // No change - assert.Equal(t, qm(msgs[3], 200), q.List(bob)[0]) - - b2 := blocks.AppendOn(b1, 1) // Height b1.Height + 1 = 111 - err = policy.HandleNewHead(ctx, q, nil, []block.TipSet{b2}) - require.NoError(t, err) - assert.Empty(t, q.List(alice)) // Alice's messages all expired - assert.Equal(t, qm(msgs[3], 200), q.List(bob)[0]) // Bob's remain - }) - - t.Run("fails when messages out of nonce order", func(t *testing.T) { - blocks := chain.NewBuilder(t, alice) - messages := blocks - q := message.NewQueue() - policy := message.NewMessageQueuePolicy(messages, 10) - - msgs := []*types.SignedMessage{ - requireEnqueue(q, mm.NewSignedMessage(alice, 1), 100), - requireEnqueue(q, mm.NewSignedMessage(alice, 2), 101), - requireEnqueue(q, mm.NewSignedMessage(alice, 3), 102), - } - - root := blocks.BuildOneOn(block.UndefTipSet, func(b *chain.BlockBuilder) { - b.IncHeight(100) - }) - - b1 := blocks.BuildOneOn(root, func(b *chain.BlockBuilder) { - b.AddMessages( - []*types.SignedMessage{msgs[1]}, - []*types.UnsignedMessage{}, - ) - }) - err := policy.HandleNewHead(ctx, q, nil, []block.TipSet{b1}) - require.Error(t, err) - assert.Contains(t, err.Error(), "nonce 1, expected 2") - }) -} - -func qm(msg *types.SignedMessage, stamp uint64) *message.Queued { - return &message.Queued{Msg: msg, Stamp: stamp} -} diff --git a/internal/pkg/message/pool.go b/internal/pkg/message/pool.go deleted file mode 100644 index d50d43c0e4..0000000000 --- a/internal/pkg/message/pool.go +++ /dev/null @@ -1,173 +0,0 @@ -package message - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var mpSize = metrics.NewInt64Gauge("message_pool_size", "The size of the message pool") - -// PoolValidator defines a validator that ensures a message can go through the pool. -type PoolValidator interface { - ValidateSignedMessageSyntax(ctx context.Context, msg *types.SignedMessage) error -} - -// Pool keeps an unordered, de-duplicated set of Messages and supports removal by CID. -// By 'de-duplicated' we mean that insertion of a message by cid that already -// exists is a nop. We use a Pool to store all messages received by this node -// via network or directly created via user command that have yet to be included -// in a block. Messages are removed as they are processed. -// -// Pool is safe for concurrent access. -type Pool struct { - lk sync.RWMutex - - cfg *config.MessagePoolConfig - validator PoolValidator - pending map[cid.Cid]*timedmessage // all pending messages - addressNonces map[addressNonce]bool // set of address nonce pairs used to efficiently validate duplicate nonces -} - -type timedmessage struct { - message *types.SignedMessage - addedAt abi.ChainEpoch -} - -type addressNonce struct { - addr address.Address - nonce uint64 -} - -func newAddressNonce(msg *types.SignedMessage) addressNonce { - return addressNonce{addr: msg.Message.From, nonce: msg.Message.CallSeqNum} -} - -// NewPool constructs a new Pool. -func NewPool(cfg *config.MessagePoolConfig, validator PoolValidator) *Pool { - return &Pool{ - cfg: cfg, - validator: validator, - pending: make(map[cid.Cid]*timedmessage), - addressNonces: make(map[addressNonce]bool), - } -} - -// Add adds a message to the pool, tagged with the block height at which it was received. -// Does nothing if the message is already in the pool. -func (pool *Pool) Add(ctx context.Context, msg *types.SignedMessage, height abi.ChainEpoch) (cid.Cid, error) { - pool.lk.Lock() - defer pool.lk.Unlock() - - c, err := msg.Cid() - if err != nil { - return cid.Undef, errors.Wrap(err, "failed to create CID") - } - - // ignore message prior to validation if it is already in pool - _, found := pool.pending[c] - if found { - return c, nil - } - - if err = pool.validateMessage(ctx, msg); err != nil { - return cid.Undef, errors.Wrap(err, "validation error adding message to pool") - } - - pool.pending[c] = &timedmessage{message: msg, addedAt: height} - pool.addressNonces[newAddressNonce(msg)] = true - mpSize.Set(ctx, int64(len(pool.pending))) - return c, nil -} - -// Pending returns all pending messages. -func (pool *Pool) Pending() []*types.SignedMessage { - pool.lk.Lock() - defer pool.lk.Unlock() - - out := make([]*types.SignedMessage, 0, len(pool.pending)) - for _, msg := range pool.pending { - out = append(out, msg.message) - } - - return out -} - -// Get retrieves a message from the pool by CID. -func (pool *Pool) Get(c cid.Cid) (*types.SignedMessage, bool) { - pool.lk.RLock() - defer pool.lk.RUnlock() - value, ok := pool.pending[c] - if !ok { - return nil, ok - } else if value == nil { - panic("Found nil message for CID " + c.String()) - } - return value.message, ok -} - -// Remove removes the message by CID from the pending pool. -func (pool *Pool) Remove(c cid.Cid) { - pool.lk.Lock() - defer pool.lk.Unlock() - msg, ok := pool.pending[c] - if ok { - delete(pool.addressNonces, newAddressNonce(msg.message)) - delete(pool.pending, c) - } - - mpSize.Set(context.TODO(), int64(len(pool.pending))) -} - -// LargestNonce returns the largest nonce used by a message from address in the pool. -// If no messages from address are found, found will be false. -func (pool *Pool) LargestNonce(address address.Address) (largest uint64, found bool) { - for _, m := range pool.Pending() { - if m.Message.From == address { - found = true - if m.Message.CallSeqNum > largest { - largest = m.Message.CallSeqNum - } - } - } - return -} - -// PendingBefore returns the CIDs of messages added with height less than `minimumHeight`. -func (pool *Pool) PendingBefore(minimumHeight abi.ChainEpoch) []cid.Cid { - pool.lk.RLock() - defer pool.lk.RUnlock() - - var cids []cid.Cid - for c, msg := range pool.pending { - if msg.addedAt < minimumHeight { - cids = append(cids, c) - } - } - return cids -} - -// validateMessage validates that too many messages aren't added to the pool and the ones that are -// have a high probability of making it through processing. -func (pool *Pool) validateMessage(ctx context.Context, message *types.SignedMessage) error { - if uint(len(pool.pending)) >= pool.cfg.MaxPoolSize { - return errors.Errorf("message pool is full (%d messages)", pool.cfg.MaxPoolSize) - } - - // check that message with this nonce does not already exist - _, found := pool.addressNonces[newAddressNonce(message)] - if found { - return errors.Errorf("message pool contains message with same actor and nonce but different cid") - } - - // check that the message is likely to succeed in processing - return pool.validator.ValidateSignedMessageSyntax(ctx, message) -} diff --git a/internal/pkg/message/pool_test.go b/internal/pkg/message/pool_test.go deleted file mode 100644 index 92a22361da..0000000000 --- a/internal/pkg/message/pool_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package message_test - -import ( - "context" - "sync" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" -) - -var mockSigner, _ = types.NewMockSignersAndKeyInfo(10) -var newSignedMessage = types.NewSignedMessageForTestGetter(mockSigner) - -func TestMessagePoolAddRemove(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - - pool := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - msg1 := newSignedMessage() - msg2 := mustSetNonce(mockSigner, newSignedMessage(), 1) - - c1, err := msg1.Cid() - assert.NoError(t, err) - c2, err := msg2.Cid() - assert.NoError(t, err) - - assert.Len(t, pool.Pending(), 0) - m, ok := pool.Get(c1) - assert.Nil(t, m) - assert.False(t, ok) - - _, err = pool.Add(ctx, msg1, 0) - assert.NoError(t, err) - assert.Len(t, pool.Pending(), 1) - - _, err = pool.Add(ctx, msg2, 0) - assert.NoError(t, err) - assert.Len(t, pool.Pending(), 2) - - m, ok = pool.Get(c1) - assert.Equal(t, msg1, m) - assert.True(t, ok) - m, ok = pool.Get(c2) - assert.Equal(t, msg2, m) - assert.True(t, ok) - - pool.Remove(c1) - assert.Len(t, pool.Pending(), 1) - pool.Remove(c2) - assert.Len(t, pool.Pending(), 0) -} - -func TestMessagePoolValidate(t *testing.T) { - tf.UnitTest(t) - - t.Run("message pool rejects messages after it reaches its limit", func(t *testing.T) { - // alter the config to have a max size that can be quickly tested - mpoolCfg := config.NewDefaultConfig().Mpool - maxMessagePoolSize := uint(100) - mpoolCfg.MaxPoolSize = maxMessagePoolSize - ctx := context.Background() - pool := message.NewPool(mpoolCfg, th.NewMockMessagePoolValidator()) - - smsgs := types.NewSignedMsgs(maxMessagePoolSize+1, mockSigner) - for _, smsg := range smsgs[:maxMessagePoolSize] { - _, err := pool.Add(ctx, smsg, 0) - require.NoError(t, err) - } - - assert.Len(t, pool.Pending(), int(maxMessagePoolSize)) - - // attempt to add one more - _, err := pool.Add(ctx, smsgs[maxMessagePoolSize], 0) - require.Error(t, err) - assert.Contains(t, err.Error(), "message pool is full") - - assert.Len(t, pool.Pending(), int(maxMessagePoolSize)) - }) - - t.Run("validates no two messages are added with same nonce", func(t *testing.T) { - ctx := context.Background() - pool := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - - smsg1 := newSignedMessage() - _, err := pool.Add(ctx, smsg1, 0) - require.NoError(t, err) - - smsg2 := mustSetNonce(mockSigner, newSignedMessage(), smsg1.Message.CallSeqNum) - _, err = pool.Add(ctx, smsg2, 0) - require.Error(t, err) - assert.Contains(t, err.Error(), "message with same actor and nonce") - }) - - t.Run("validates using supplied validator", func(t *testing.T) { - ctx := context.Background() - validator := th.NewMockMessagePoolValidator() - validator.Valid = false - pool := message.NewPool(config.NewDefaultConfig().Mpool, validator) - - smsg1 := mustSetNonce(mockSigner, newSignedMessage(), 0) - _, err := pool.Add(ctx, smsg1, 0) - require.Error(t, err) - assert.Contains(t, err.Error(), "mock validation error") - }) -} - -func TestMessagePoolDedup(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - - pool := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - msg1 := newSignedMessage() - - assert.Len(t, pool.Pending(), 0) - _, err := pool.Add(ctx, msg1, 0) - assert.NoError(t, err) - assert.Len(t, pool.Pending(), 1) - - _, err = pool.Add(ctx, msg1, 0) - assert.NoError(t, err) - assert.Len(t, pool.Pending(), 1) -} - -func TestMessagePoolAsync(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - - count := uint(400) - mpoolCfg := config.NewDefaultConfig().Mpool - mpoolCfg.MaxPoolSize = count - msgs := types.NewSignedMsgs(count, mockSigner) - - pool := message.NewPool(mpoolCfg, th.NewMockMessagePoolValidator()) - var wg sync.WaitGroup - - for i := uint(0); i < 4; i++ { - wg.Add(1) - go func(i uint) { - for j := uint(0); j < count/4; j++ { - _, err := pool.Add(ctx, msgs[j+(count/4)*i], 0) - assert.NoError(t, err) - } - wg.Done() - }(i) - } - - wg.Wait() - assert.Len(t, pool.Pending(), int(count)) -} - -func TestLargestNonce(t *testing.T) { - tf.UnitTest(t) - - t.Run("No matches", func(t *testing.T) { - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - - m := types.NewSignedMsgs(2, mockSigner) - reqAdd(t, p, 0, m[0], m[1]) - - _, found := p.LargestNonce(vmaddr.NewForTestGetter()()) - assert.False(t, found) - }) - - t.Run("Match, largest is zero", func(t *testing.T) { - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - - m := types.NewMsgsWithAddrs(1, mockSigner.Addresses) - m[0].CallSeqNum = 0 - - sm, err := types.SignMsgs(mockSigner, m) - require.NoError(t, err) - - reqAdd(t, p, 0, sm...) - - largest, found := p.LargestNonce(m[0].From) - assert.True(t, found) - assert.Equal(t, uint64(0), largest) - }) - - t.Run("Match", func(t *testing.T) { - p := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - - m := types.NewMsgsWithAddrs(3, mockSigner.Addresses) - m[1].CallSeqNum = 1 - m[2].CallSeqNum = 2 - m[2].From = m[1].From - - sm, err := types.SignMsgs(mockSigner, m) - require.NoError(t, err) - - reqAdd(t, p, 0, sm...) - - largest, found := p.LargestNonce(m[2].From) - assert.True(t, found) - assert.Equal(t, uint64(2), largest) - }) -} - -func mustSetNonce(signer types.Signer, message *types.SignedMessage, nonce uint64) *types.SignedMessage { - return mustResignMessage(signer, message, func(m *types.UnsignedMessage) { - m.CallSeqNum = nonce - }) -} - -func mustResignMessage(signer types.Signer, message *types.SignedMessage, f func(*types.UnsignedMessage)) *types.SignedMessage { - var msg types.UnsignedMessage - msg = message.Message - f(&msg) - smg, err := signMessage(signer, msg) - if err != nil { - panic("Error signing message") - } - return smg -} - -func signMessage(signer types.Signer, message types.UnsignedMessage) (*types.SignedMessage, error) { - return types.NewSignedMessage(context.TODO(), message, signer) -} - -func reqAdd(t *testing.T, p *message.Pool, height abi.ChainEpoch, msgs ...*types.SignedMessage) { - ctx := context.Background() - for _, m := range msgs { - _, err := p.Add(ctx, m, height) - require.NoError(t, err) - } -} diff --git a/internal/pkg/message/publisher.go b/internal/pkg/message/publisher.go deleted file mode 100644 index b963ee96e4..0000000000 --- a/internal/pkg/message/publisher.go +++ /dev/null @@ -1,46 +0,0 @@ -package message - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// DefaultPublisher adds messages to a message pool and can publish them to its topic. -// This is wiring for message publication from the outbox. -type DefaultPublisher struct { - network networkPublisher - pool *Pool -} - -type networkPublisher interface { - Publish(ctx context.Context, data []byte) error -} - -// NewDefaultPublisher creates a new publisher. -func NewDefaultPublisher(pubsub networkPublisher, pool *Pool) *DefaultPublisher { - return &DefaultPublisher{pubsub, pool} -} - -// Publish marshals and publishes a message to the core message pool, and if bcast is true, -// broadcasts it to the network with the publisher's topic. -func (p *DefaultPublisher) Publish(ctx context.Context, message *types.SignedMessage, height abi.ChainEpoch, bcast bool) error { - encoded, err := message.Marshal() - if err != nil { - return errors.Wrap(err, "failed to marshal message") - } - - if _, err := p.pool.Add(ctx, message, height); err != nil { - return errors.Wrap(err, "failed to add message to message pool") - } - - if bcast { - if err = p.network.Publish(ctx, encoded); err != nil { - return errors.Wrap(err, "failed to publish message to network") - } - } - return nil -} diff --git a/internal/pkg/message/publisher_test.go b/internal/pkg/message/publisher_test.go deleted file mode 100644 index 69cfe289e7..0000000000 --- a/internal/pkg/message/publisher_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package message_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -func TestDefaultMessagePublisher_Publish(t *testing.T) { - pool := message.NewPool(config.NewDefaultConfig().Mpool, testhelpers.NewMockMessagePoolValidator()) - - ms, _ := types.NewMockSignersAndKeyInfo(2) - msg := types.NewUnsignedMessage(ms.Addresses[0], ms.Addresses[1], 0, types.ZeroAttoFIL, builtin.MethodSend, []byte{}) - signed, err := types.NewSignedMessage(context.TODO(), *msg, ms) - require.NoError(t, err) - msgCid, err := signed.Cid() - require.NoError(t, err) - encoded, e := signed.Marshal() - require.NoError(t, e) - - testCases := []struct { - name string - bcast bool - }{ - {"Msg added to pool and Publish is called when bcast is true", true}, - {"Msg added to pool and Publish is NOT called when bcast is false", false}, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - mnp := message.MockNetworkPublisher{} - pub := message.NewDefaultPublisher(&mnp, pool) - assert.NoError(t, pub.Publish(context.Background(), signed, 0, test.bcast)) - smsg, ok := pool.Get(msgCid) - assert.True(t, ok) - assert.NotNil(t, smsg) - if test.bcast { - assert.Equal(t, encoded, mnp.Data) - } else { - assert.Nil(t, mnp.Data) - } - }) - } -} diff --git a/internal/pkg/message/queue.go b/internal/pkg/message/queue.go deleted file mode 100644 index fe482dff23..0000000000 --- a/internal/pkg/message/queue.go +++ /dev/null @@ -1,238 +0,0 @@ -package message - -import ( - "context" - "sync" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var ( - mqSizeGa = metrics.NewInt64Gauge("message_queue_size", "The size of the message queue") - mqOldestGa = metrics.NewInt64Gauge("message_queue_oldest", "The age of the oldest message in the queue or zero when empty") - mqExpireCt = metrics.NewInt64Counter("message_queue_expire", "The number messages expired from the queue") -) - -// Queue stores an ordered list of messages (per actor) and enforces that their nonces form a contiguous sequence. -// Each message is associated with a "stamp" (an opaque integer), and the queue supports expiring any list -// of messages where the first message has a stamp below some threshold. The relative order of stamps in a queue is -// not enforced. -// A message queue is intended to record outbound messages that have been transmitted but not yet appeared in a block, -// where the stamp could be block height. -// Queue is safe for concurrent access. -type Queue struct { - lk sync.RWMutex - // Message queues keyed by sending actor address, in nonce order - queues map[address.Address][]*Queued -} - -// Queued is a message an the stamp it was enqueued with. -type Queued struct { - Msg *types.SignedMessage - Stamp uint64 -} - -// NewQueue constructs a new, empty queue. -func NewQueue() *Queue { - return &Queue{ - queues: make(map[address.Address][]*Queued), - } -} - -// Enqueue appends a new message for an address. If the queue already contains any messages for -// from same address, the new message's nonce must be exactly one greater than the largest nonce -// present. -func (mq *Queue) Enqueue(ctx context.Context, msg *types.SignedMessage, stamp uint64) error { - defer func() { - mqSizeGa.Set(ctx, mq.Size()) - mqOldestGa.Set(ctx, int64(mq.Oldest())) - }() - - mq.lk.Lock() - defer mq.lk.Unlock() - - q := mq.queues[msg.Message.From] - if len(q) > 0 { - nextNonce := q[len(q)-1].Msg.Message.CallSeqNum + 1 - if msg.Message.CallSeqNum != nextNonce { - return errors.Errorf("Invalid nonce in %d in enqueue, expected %d", msg.Message.CallSeqNum, nextNonce) - } - } - mq.queues[msg.Message.From] = append(q, &Queued{msg, stamp}) - return nil -} - -// Requeue prepends a message for an address. If the queue already contains any messages from the -// same address, the message's nonce must be exactly one *less than* the smallest nonce present. -func (mq *Queue) Requeue(ctx context.Context, msg *types.SignedMessage, stamp uint64) error { - defer func() { - mqSizeGa.Set(ctx, mq.Size()) - mqOldestGa.Set(ctx, int64(mq.Oldest())) - }() - - mq.lk.Lock() - defer mq.lk.Unlock() - - q := mq.queues[msg.Message.From] - if len(q) > 0 { - prevNonce := q[0].Msg.Message.CallSeqNum - 1 - if msg.Message.CallSeqNum != prevNonce { - return errors.Errorf("Invalid nonce %d in requeue, expected %d", msg.Message.CallSeqNum, prevNonce) - } - } - mq.queues[msg.Message.From] = append([]*Queued{{msg, stamp}}, q...) - return nil -} - -// RemoveNext removes and returns a single message from the queue, if it bears the expected nonce value, with found = true. -// Returns found = false if the queue is empty or the expected nonce is less than any in the queue for that address -// (indicating the message had already been removed). -// Returns an error if the expected nonce is greater than the smallest in the queue. -// The caller may wish to check that the returned message is equal to that expected (not just in nonce value). -func (mq *Queue) RemoveNext(ctx context.Context, sender address.Address, expectedNonce uint64) (msg *types.SignedMessage, found bool, err error) { - defer func() { - mqSizeGa.Set(ctx, mq.Size()) - mqOldestGa.Set(ctx, int64(mq.Oldest())) - }() - - mq.lk.Lock() - defer mq.lk.Unlock() - - q := mq.queues[sender] - if len(q) > 0 { - head := q[0] - if expectedNonce == head.Msg.Message.CallSeqNum { - mq.queues[sender] = q[1:] // pop the head - msg = head.Msg - found = true - } else if expectedNonce > head.Msg.Message.CallSeqNum { - err = errors.Errorf("Next message for %s has nonce %d, expected %d", sender, head.Msg.Message.CallSeqNum, expectedNonce) - } - // else expected nonce was before the head of the queue, already removed - } - return -} - -// Clear removes all messages for a single sender address. -// Returns whether the queue was non-empty before being cleared. -func (mq *Queue) Clear(ctx context.Context, sender address.Address) bool { - defer func() { - mqSizeGa.Set(ctx, mq.Size()) - mqOldestGa.Set(ctx, int64(mq.Oldest())) - }() - - mq.lk.Lock() - defer mq.lk.Unlock() - - q := mq.queues[sender] - delete(mq.queues, sender) - return len(q) > 0 -} - -// ExpireBefore clears the queue of any sender where the first message in the queue has a stamp less than `stamp`. -// Returns a map containing any expired address queues. -func (mq *Queue) ExpireBefore(ctx context.Context, stamp uint64) map[address.Address][]*types.SignedMessage { - defer func() { - mqSizeGa.Set(ctx, mq.Size()) - mqOldestGa.Set(ctx, int64(mq.Oldest())) - }() - - mq.lk.Lock() - defer mq.lk.Unlock() - - expired := make(map[address.Address][]*types.SignedMessage) - - for sender, q := range mq.queues { - if len(q) > 0 && q[0].Stamp < stamp { - - // record the number of messages to be expired - mqExpireCt.Inc(ctx, int64(len(q))) - for _, m := range q { - expired[sender] = append(expired[sender], m.Msg) - } - - mq.queues[sender] = []*Queued{} - } - } - return expired -} - -// LargestNonce returns the largest nonce of any message in the queue for an address. -// If the queue for the address is empty, returns (0, false). -func (mq *Queue) LargestNonce(sender address.Address) (largest uint64, found bool) { - mq.lk.RLock() - defer mq.lk.RUnlock() - q := mq.queues[sender] - if len(q) > 0 { - return q[len(q)-1].Msg.Message.CallSeqNum, true - } - return 0, false -} - -// Queues returns the addresses associated with each non-empty queue. -// The order of returned addresses is neither defined nor stable. -func (mq *Queue) Queues() []address.Address { - mq.lk.RLock() - defer mq.lk.RUnlock() - - keys := make([]address.Address, len(mq.queues)) - i := 0 - for k := range mq.queues { - keys[i] = k - i++ - } - return keys -} - -// Size returns the total number of messages in the Queue. -func (mq *Queue) Size() int64 { - mq.lk.RLock() - defer mq.lk.RUnlock() - - var l int64 - for _, q := range mq.queues { - l += int64(len(q)) - } - return l -} - -// Oldest returns the oldest message stamp in the Queue. -// Oldest returns 0 if the queue is empty. -// Exported for testing only. -func (mq *Queue) Oldest() (oldest uint64) { - mq.lk.Lock() - defer mq.lk.Unlock() - - if len(mq.queues) == 0 { - return 0 - } - - // max uint64 value - oldest = 1<<64 - 1 - for _, qm := range mq.queues { - for _, m := range qm { - if m.Stamp < oldest { - oldest = m.Stamp - } - } - } - return oldest -} - -// List returns a copy of the list of messages queued for an address. -func (mq *Queue) List(sender address.Address) []*Queued { - mq.lk.RLock() - defer mq.lk.RUnlock() - q := mq.queues[sender] - out := make([]*Queued, len(q)) - for i, qm := range q { - out[i] = &Queued{} - *out[i] = *qm - } - return out -} diff --git a/internal/pkg/message/queue_test.go b/internal/pkg/message/queue_test.go deleted file mode 100644 index d97e5c655e..0000000000 --- a/internal/pkg/message/queue_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package message_test - -import ( - "context" - "math" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -func TestMessageQueue(t *testing.T) { - tf.UnitTest(t) - - // Individual tests share a MessageMaker so not parallel (but quick) - keys := types.MustGenerateKeyInfo(2, 42) - mm := vm.NewMessageMaker(t, keys) - - alice := mm.Addresses()[0] - bob := mm.Addresses()[1] - require.NotEqual(t, alice, bob) - - ctx := context.Background() - - requireEnqueue := func(q *message.Queue, msg *types.SignedMessage, stamp uint64) { - err := q.Enqueue(ctx, msg, stamp) - require.NoError(t, err) - } - - requireRequeue := func(q *message.Queue, msg *types.SignedMessage, stamp uint64) { - err := q.Requeue(ctx, msg, stamp) - require.NoError(t, err) - } - - requireRemoveNext := func(q *message.Queue, sender address.Address, expectedNonce uint64) *types.SignedMessage { - msg, found, e := q.RemoveNext(ctx, sender, expectedNonce) - require.True(t, found) - require.NoError(t, e) - return msg - } - - assertLargestNonce := func(q *message.Queue, sender address.Address, expected uint64) { - largest, found := q.LargestNonce(sender) - assert.True(t, found, "no messages") - assert.Equal(t, expected, largest) - } - - assertNoNonce := func(q *message.Queue, sender address.Address) { - _, found := q.LargestNonce(sender) - assert.False(t, found, "unexpected messages") - } - - t.Run("empty queue", func(t *testing.T) { - q := message.NewQueue() - msg, found, err := q.RemoveNext(ctx, alice, 0) - assert.Nil(t, msg) - assert.False(t, found) - assert.NoError(t, err) - - assert.Empty(t, q.ExpireBefore(ctx, math.MaxUint64)) - - nonce, found := q.LargestNonce(alice) - assert.False(t, found) - assert.Zero(t, nonce) - - assert.Empty(t, q.List(alice)) - assert.Empty(t, q.Size()) - }) - - t.Run("add and remove sequence", func(t *testing.T) { - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - } - - q := message.NewQueue() - assert.Equal(t, int64(0), q.Size()) - requireEnqueue(q, msgs[0], 0) - requireEnqueue(q, msgs[1], 0) - requireEnqueue(q, msgs[2], 0) - assert.Equal(t, int64(3), q.Size()) - - msg := requireRemoveNext(q, alice, 0) - assert.Equal(t, msgs[0], msg) - assert.Equal(t, int64(2), q.Size()) - - _, found, err := q.RemoveNext(ctx, alice, 0) // Remove first message again - assert.False(t, found) - assert.NoError(t, err) - assert.Equal(t, int64(2), q.Size()) - - msg = requireRemoveNext(q, alice, 1) - assert.Equal(t, msgs[1], msg) - assert.Equal(t, int64(1), q.Size()) - - _, found, err = q.RemoveNext(ctx, alice, 0) // Remove first message yet again - assert.False(t, found) - assert.NoError(t, err) - assert.Equal(t, int64(1), q.Size()) - - msg = requireRemoveNext(q, alice, 2) - assert.Equal(t, msgs[2], msg) - assert.Equal(t, int64(0), q.Size()) - }) - - t.Run("requeue", func(t *testing.T) { - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - } - q := message.NewQueue() - requireEnqueue(q, msgs[0], 0) - requireEnqueue(q, msgs[1], 0) - - // Can't re-queue message with larger nonce - assert.Error(t, q.Requeue(ctx, msgs[2], 0)) - requireEnqueue(q, msgs[2], 0) - - assert.Equal(t, msgs[0], requireRemoveNext(q, alice, 0)) - requireRequeue(q, msgs[0], 0) - - assert.Equal(t, msgs[0], requireRemoveNext(q, alice, 0)) - assert.Equal(t, msgs[1], requireRemoveNext(q, alice, 1)) - assert.Error(t, q.Requeue(ctx, msgs[0], 0)) // Can't re-queue with nonce gap - - requireRequeue(q, msgs[1], 0) - requireRequeue(q, msgs[0], 0) - - assert.Equal(t, msgs[0], requireRemoveNext(q, alice, 0)) - assert.Equal(t, msgs[1], requireRemoveNext(q, alice, 1)) - assert.Equal(t, msgs[2], requireRemoveNext(q, alice, 2)) - - // Queue is empty, can re-queue anything - requireRequeue(q, mm.NewSignedMessage(alice, 3), 0) - }) - - t.Run("invalid nonce sequence", func(t *testing.T) { - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - mm.NewSignedMessage(alice, 3), - } - - q := message.NewQueue() - requireEnqueue(q, msgs[1], 0) - - err := q.Enqueue(ctx, msgs[0], 0) // Prior to existing - assert.Error(t, err) - - err = q.Enqueue(ctx, msgs[1], 0) // Equal to existing - assert.Error(t, err) - - err = q.Enqueue(ctx, msgs[3], 0) // Gap after existing - assert.Error(t, err) - }) - - t.Run("invalid remove sequence", func(t *testing.T) { - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 10), - mm.NewSignedMessage(alice, 11), - } - - q := message.NewQueue() - requireEnqueue(q, msgs[0], 0) - requireEnqueue(q, msgs[1], 0) - - msg, found, err := q.RemoveNext(ctx, alice, 9) // Prior to head - assert.Nil(t, msg) - assert.False(t, found) - require.NoError(t, err) - - msg, found, err = q.RemoveNext(ctx, alice, 11) // After head - assert.False(t, found) - assert.Nil(t, msg) - assert.Error(t, err) - }) - - t.Run("largest nonce", func(t *testing.T) { - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - mm.NewSignedMessage(alice, 3), - } - q := message.NewQueue() - requireEnqueue(q, msgs[0], 0) - assertLargestNonce(q, alice, 0) - requireEnqueue(q, msgs[1], 0) - assertLargestNonce(q, alice, 1) - requireEnqueue(q, msgs[2], 0) - assertLargestNonce(q, alice, 2) - - requireRemoveNext(q, alice, 0) - assertLargestNonce(q, alice, 2) - - requireEnqueue(q, msgs[3], 0) - assertLargestNonce(q, alice, 3) - - requireRemoveNext(q, alice, 1) - requireRemoveNext(q, alice, 2) - assertLargestNonce(q, alice, 3) - - requireRemoveNext(q, alice, 3) // clears queue - assertNoNonce(q, alice) - }) - - t.Run("clear", func(t *testing.T) { - msgs := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - } - - q := message.NewQueue() - requireEnqueue(q, msgs[1], 0) - requireEnqueue(q, msgs[2], 0) - assert.Equal(t, int64(2), q.Size()) - assertLargestNonce(q, alice, 2) - q.Clear(ctx, alice) - assert.Equal(t, int64(0), q.Size()) - assertNoNonce(q, alice) - - requireEnqueue(q, msgs[0], 0) - requireEnqueue(q, msgs[1], 0) - assertLargestNonce(q, alice, 1) - }) - - t.Run("independent addresses", func(t *testing.T) { - fromAlice := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - mm.NewSignedMessage(alice, 2), - } - fromBob := []*types.SignedMessage{ - mm.NewSignedMessage(bob, 10), - mm.NewSignedMessage(bob, 11), - mm.NewSignedMessage(bob, 12), - } - q := message.NewQueue() - assert.Equal(t, int64(0), q.Size()) - - requireEnqueue(q, fromAlice[0], 0) - assertNoNonce(q, bob) - assert.Equal(t, int64(1), q.Size()) - - requireEnqueue(q, fromBob[0], 0) - assertLargestNonce(q, alice, 0) - assertLargestNonce(q, bob, 10) - assert.Equal(t, int64(2), q.Size()) - - requireEnqueue(q, fromBob[1], 0) - requireEnqueue(q, fromBob[2], 0) - assertLargestNonce(q, bob, 12) - assert.Equal(t, int64(4), q.Size()) - - requireEnqueue(q, fromAlice[1], 0) - requireEnqueue(q, fromAlice[2], 0) - assertLargestNonce(q, alice, 2) - assert.Equal(t, int64(6), q.Size()) - - msg := requireRemoveNext(q, alice, 0) - assert.Equal(t, fromAlice[0], msg) - assert.Equal(t, int64(5), q.Size()) - - msg = requireRemoveNext(q, bob, 10) - assert.Equal(t, fromBob[0], msg) - assert.Equal(t, int64(4), q.Size()) - - q.Clear(ctx, bob) - assertLargestNonce(q, alice, 2) - assertNoNonce(q, bob) - assert.Equal(t, int64(2), q.Size()) - }) - - t.Run("expire before stamp", func(t *testing.T) { - fromAlice := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - } - fromBob := []*types.SignedMessage{ - mm.NewSignedMessage(bob, 10), - mm.NewSignedMessage(bob, 11), - } - q := message.NewQueue() - - requireEnqueue(q, fromAlice[0], 100) - requireEnqueue(q, fromAlice[1], 101) - requireEnqueue(q, fromBob[0], 200) - requireEnqueue(q, fromBob[1], 201) - - assert.Equal(t, &message.Queued{Msg: fromAlice[0], Stamp: 100}, q.List(alice)[0]) - assert.Equal(t, &message.Queued{Msg: fromBob[0], Stamp: 200}, q.List(bob)[0]) - - expired := q.ExpireBefore(ctx, 0) - assert.Empty(t, expired) - - expired = q.ExpireBefore(ctx, 100) - assert.Empty(t, expired) - - // Alice's whole queue expires as soon as the first one does - expired = q.ExpireBefore(ctx, 101) - assert.Equal(t, map[address.Address][]*types.SignedMessage{ - alice: {fromAlice[0], fromAlice[1]}, - }, expired) - - assert.Empty(t, q.List(alice)) - assertNoNonce(q, alice) - assert.Equal(t, &message.Queued{Msg: fromBob[0], Stamp: 200}, q.List(bob)[0]) - assertLargestNonce(q, bob, 11) - - expired = q.ExpireBefore(ctx, 300) - assert.Equal(t, map[address.Address][]*types.SignedMessage{ - bob: {fromBob[0], fromBob[1]}, - }, expired) - - assert.Empty(t, q.List(bob)) - assertNoNonce(q, bob) - }) - - t.Run("oldest is correct", func(t *testing.T) { - fromAlice := []*types.SignedMessage{ - mm.NewSignedMessage(alice, 0), - mm.NewSignedMessage(alice, 1), - } - fromBob := []*types.SignedMessage{ - mm.NewSignedMessage(bob, 10), - mm.NewSignedMessage(bob, 11), - } - q := message.NewQueue() - - assert.Equal(t, uint64(0), q.Oldest()) - - requireEnqueue(q, fromAlice[0], 100) - assert.Equal(t, uint64(100), q.Oldest()) - - requireEnqueue(q, fromAlice[1], 101) - assert.Equal(t, uint64(100), q.Oldest()) - - requireEnqueue(q, fromBob[0], 99) - assert.Equal(t, uint64(99), q.Oldest()) - - requireEnqueue(q, fromBob[1], 1) - assert.Equal(t, uint64(1), q.Oldest()) - - }) -} diff --git a/internal/pkg/message/testing.go b/internal/pkg/message/testing.go deleted file mode 100644 index f9157fce06..0000000000 --- a/internal/pkg/message/testing.go +++ /dev/null @@ -1,128 +0,0 @@ -package message - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" -) - -// FakeProvider is a chain and actor provider for testing. -// The provider extends a chain.Builder for providing tipsets and maintains an explicit head CID. -// The provider can provide an actor for a single (head, address) pair. -type FakeProvider struct { - *chain.Builder - t *testing.T - - head block.TipSetKey // Provided by GetHead and expected by others - actors map[address.Address]*actor.Actor -} - -// NewFakeProvider creates a new builder and wraps with a provider. -// The builder may be accessed by `provider.Builder`. -func NewFakeProvider(t *testing.T) *FakeProvider { - builder := chain.NewBuilder(t, address.Address{}) - return &FakeProvider{ - Builder: builder, - t: t, - actors: make(map[address.Address]*actor.Actor)} -} - -// GetHead returns the head tipset key. -func (p *FakeProvider) GetHead() block.TipSetKey { - return p.head -} - -// Head fulfills the ChainReaderAPI interface -func (p *FakeProvider) Head() block.TipSetKey { - return p.GetHead() -} - -// GetActorAt returns the actor corresponding to (key, addr) if they match those last set. -func (p *FakeProvider) GetActorAt(ctx context.Context, key block.TipSetKey, addr address.Address) (*actor.Actor, error) { - if !key.Equals(p.head) { - return nil, errors.Errorf("No such tipset %s, expected %s", key, p.head) - } - a, ok := p.actors[addr] - if !ok { - return nil, xerrors.Errorf("No such address %s", addr.String()) - } - return a, nil -} - -// SetHead sets the head tipset -func (p *FakeProvider) SetHead(head block.TipSetKey) { - _, e := p.GetTipSet(head) - require.NoError(p.t, e) - p.head = head -} - -// SetActor sets an actor to be mocked on chain -func (p *FakeProvider) SetActor(addr address.Address, act *actor.Actor) { - p.actors[addr] = act -} - -// SetHeadAndActor sets the head tipset, along with the from address and actor to be provided. -func (p *FakeProvider) SetHeadAndActor(t *testing.T, head block.TipSetKey, addr address.Address, actor *actor.Actor) { - p.SetHead(head) - p.SetActor(addr, actor) -} - -// MockPublisher is a publisher which just stores the last message published. -type MockPublisher struct { - ReturnError error // Error to be returned by Publish() - Message *types.SignedMessage // Message received by Publish() - Height abi.ChainEpoch // Height received by Publish() - Bcast bool // was this broadcast? -} - -// Publish records the message etc for subsequent inspection. -func (p *MockPublisher) Publish(ctx context.Context, message *types.SignedMessage, height abi.ChainEpoch, bcast bool) error { - p.Message = message - p.Height = height - p.Bcast = bcast - return p.ReturnError -} - -// FakeValidator is a validator which configurably accepts or rejects messages. -type FakeValidator struct { - RejectMessages bool -} - -// Validate returns an error only if `RejectMessages` is true. -func (v FakeValidator) ValidateSignedMessageSyntax(ctx context.Context, msg *types.SignedMessage) error { - if v.RejectMessages { - return errors.New("rejected for testing") - } - return nil -} - -// NullPolicy is a policy that does nothing. -type NullPolicy struct { -} - -// HandleNewHead does nothing. -func (NullPolicy) HandleNewHead(ctx context.Context, target PolicyTarget, oldChain, newChain []block.TipSet) error { - return nil -} - -// MockNetworkPublisher records the last message published. -type MockNetworkPublisher struct { - Data []byte -} - -// Publish records the topic and message. -func (p *MockNetworkPublisher) Publish(ctx context.Context, data []byte) error { - p.Data = data - return nil -} diff --git a/internal/pkg/message/util.go b/internal/pkg/message/util.go deleted file mode 100644 index 4d4c11ff84..0000000000 --- a/internal/pkg/message/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package message - -import ( - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" -) - -// chainProvider provides chain access for updating the message pool in response to new heads. -type chainProvider interface { - // The TipSetProvider is used only for counting non-null tipsets when expiring messages. We could remove - // this dependency if expiration was based on round number, or if this object maintained a short - // list of non-empty tip heights. - chain.TipSetProvider - GetHead() block.TipSetKey -} diff --git a/internal/pkg/metrics/export.go b/internal/pkg/metrics/export.go deleted file mode 100644 index a93d5e1475..0000000000 --- a/internal/pkg/metrics/export.go +++ /dev/null @@ -1,85 +0,0 @@ -package metrics - -import ( - "net/http" - "time" - - "contrib.go.opencensus.io/exporter/jaeger" - "contrib.go.opencensus.io/exporter/prometheus" - ma "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr-net" - prom "github.com/prometheus/client_golang/prometheus" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" -) - -// RegisterPrometheusEndpoint registers and serves prometheus metrics -func RegisterPrometheusEndpoint(cfg *config.MetricsConfig) error { - if !cfg.PrometheusEnabled { - return nil - } - - // validate config values and marshal to types - interval, err := time.ParseDuration(cfg.ReportInterval) - if err != nil { - log.Errorf("invalid metrics interval: %s", err) - return err - } - - promma, err := ma.NewMultiaddr(cfg.PrometheusEndpoint) - if err != nil { - return err - } - - _, promAddr, err := manet.DialArgs(promma) - if err != nil { - return err - } - - // setup prometheus - registry := prom.NewRegistry() - pe, err := prometheus.NewExporter(prometheus.Options{ - Namespace: "filecoin", - Registry: registry, - }) - if err != nil { - return err - } - - view.RegisterExporter(pe) - view.SetReportingPeriod(interval) - - go func() { - mux := http.NewServeMux() - mux.Handle("/metrics", pe) - if err := http.ListenAndServe(promAddr, mux); err != nil { - log.Errorf("failed to serve /metrics endpoint on %v", err) - } - }() - - return nil -} - -// RegisterJaeger registers the jaeger endpoint with opencensus and names the -// tracer `name`. -func RegisterJaeger(name string, cfg *config.TraceConfig) error { - if !cfg.JaegerTracingEnabled { - return nil - } - je, err := jaeger.NewExporter(jaeger.Options{ - CollectorEndpoint: cfg.JaegerEndpoint, - Process: jaeger.Process{ - ServiceName: name, - }, - }) - if err != nil { - return err - } - - trace.RegisterExporter(je) - trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(cfg.ProbabilitySampler)}) - - return nil -} diff --git a/internal/pkg/metrics/heartbeat_test.go b/internal/pkg/metrics/heartbeat_test.go deleted file mode 100644 index 66dd3e404e..0000000000 --- a/internal/pkg/metrics/heartbeat_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package metrics_test - -import ( - "context" - "crypto/rand" - "encoding/json" - "fmt" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/host" - net "github.com/libp2p/go-libp2p-core/network" - ma "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" -) - -var testCid cid.Cid - -func init() { - c, err := cid.Decode("Qmd52WKRSwrBK5gUaJKawryZQ5by6UbNB8KVW2Zy6JtbyW") - if err != nil { - panic(err) - } - testCid = c -} - -type endpoint struct { - Host host.Host - Address string -} - -func newEndpoint(t *testing.T, port int) endpoint { - priv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, rand.Reader) - if err != nil { - t.Fatal(err) - } - opts := []libp2p.Option{ - libp2p.DisableRelay(), - libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port)), - libp2p.Identity(priv), - } - - basicHost, err := libp2p.New(context.Background(), opts...) - if err != nil { - t.Fatal(err) - } - - // Build host multiaddress - hostAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", basicHost.ID().Pretty())) - - // Now we can build a full multiaddress to reach this host - // by encapsulating both addresses: - addr := basicHost.Addrs()[0] - fullAddr := addr.Encapsulate(hostAddr) - - return endpoint{ - Host: basicHost, - Address: fullAddr.String(), - } -} - -func TestHeartbeatConnectSuccess(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - aggregator := newEndpoint(t, 0) - filecoin := newEndpoint(t, 0) - aggregator.Host.SetStreamHandler(metrics.HeartbeatProtocol, func(c net.Stream) { - }) - - hbs := metrics.NewHeartbeatService( - filecoin.Host, - testCid, - &config.HeartbeatConfig{ - BeatTarget: aggregator.Address, - BeatPeriod: "3s", - ReconnectPeriod: "10s", - Nickname: "BobHoblaw", - }, - func() (block.TipSet, error) { - tipSet := chain.NewBuilder(t, address.Undef).NewGenesis() - return tipSet, nil - }, - ) - - assert.Equal(t, 1, len(aggregator.Host.Peerstore().Peers())) - assert.Contains(t, aggregator.Host.Peerstore().Peers(), aggregator.Host.ID()) - assert.NoError(t, hbs.Connect(ctx)) - assert.Equal(t, 2, len(aggregator.Host.Peerstore().Peers())) - assert.Contains(t, aggregator.Host.Peerstore().Peers(), aggregator.Host.ID()) - assert.Contains(t, aggregator.Host.Peerstore().Peers(), filecoin.Host.ID()) -} - -func TestHeartbeatConnectFailure(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - filecoin := newEndpoint(t, 60001) - - hbs := metrics.NewHeartbeatService( - filecoin.Host, - testCid, - &config.HeartbeatConfig{ - BeatTarget: "", - BeatPeriod: "3s", - ReconnectPeriod: "10s", - Nickname: "BobHoblaw", - }, - func() (block.TipSet, error) { - tipSet := chain.NewBuilder(t, address.Undef).NewGenesis() - return tipSet, nil - }, - ) - assert.Error(t, hbs.Connect(ctx)) -} - -func TestHeartbeatRunSuccess(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - // we will use this to stop the run method after making assertions - runCtx, cancel := context.WithCancel(ctx) - - // port 0 to avoid conflicts - aggregator := newEndpoint(t, 0) - filecoin := newEndpoint(t, 0) - - // create a tipset, we will assert on it in the SetStreamHandler method - expHeight := abi.ChainEpoch(444) - expTs := mustMakeTipset(t, expHeight) - - addr, err := address.NewSecp256k1Address([]byte("miner address")) - require.NoError(t, err) - - // The handle method will run the assertions for the test - aggregator.Host.SetStreamHandler(metrics.HeartbeatProtocol, func(s net.Stream) { - defer func() { - require.NoError(t, s.Close()) - }() - - dec := json.NewDecoder(s) - var hb metrics.Heartbeat - require.NoError(t, dec.Decode(&hb)) - - assert.Equal(t, expTs.String(), hb.Head) - assert.Equal(t, abi.ChainEpoch(444), hb.Height) - assert.Equal(t, "BobHoblaw", hb.Nickname) - assert.Equal(t, addr, hb.MinerAddress) - cancel() - }) - - hbs := metrics.NewHeartbeatService( - filecoin.Host, - testCid, - &config.HeartbeatConfig{ - BeatTarget: aggregator.Address, - BeatPeriod: "1s", - ReconnectPeriod: "1s", - Nickname: "BobHoblaw", - }, - func() (block.TipSet, error) { - return expTs, nil - }, - metrics.WithMinerAddressGetter(func() address.Address { - return addr - }), - ) - - require.NoError(t, hbs.Connect(ctx)) - - assert.NoError(t, hbs.Run(runCtx)) - assert.Error(t, runCtx.Err(), context.Canceled.Error()) -} - -func mustMakeTipset(t *testing.T, height abi.ChainEpoch) block.TipSet { - ts, err := block.NewTipSet(&block.Block{ - Miner: vmaddr.NewForTestGetter()(), - Ticket: block.Ticket{VRFProof: []byte{0}}, - Parents: block.TipSetKey{}, - ParentWeight: fbig.Zero(), - Height: height, - MessageReceipts: e.NewCid(types.EmptyMessagesCID), - Messages: e.NewCid(types.EmptyTxMetaCID), - }) - if err != nil { - t.Fatal(err) - } - return ts -} diff --git a/internal/pkg/mining/block_generate.go b/internal/pkg/mining/block_generate.go deleted file mode 100644 index 7aa23ce400..0000000000 --- a/internal/pkg/mining/block_generate.go +++ /dev/null @@ -1,202 +0,0 @@ -package mining - -// Block generation is part of the logic of the DefaultWorker. -// 'generate' is that function that actually creates a new block from a base -// TipSet using the DefaultWorker's many utilities. - -import ( - "context" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// Generate returns a new block created from the messages in the pool. -func (w *DefaultWorker) Generate( - ctx context.Context, - baseTipSet block.TipSet, - ticket block.Ticket, - electionProof crypto.VRFPi, - nullBlockCount abi.ChainEpoch, - posts []block.PoStProof, - drandEntries []*drand.Entry, -) (*FullBlock, error) { - - generateTimer := time.Now() - defer func() { - log.Infof("[TIMER] DefaultWorker.Generate baseTipset: %s - elapsed time: %s", baseTipSet.String(), time.Since(generateTimer).Round(time.Millisecond)) - }() - - weight, err := w.getWeight(ctx, baseTipSet) - if err != nil { - return nil, errors.Wrap(err, "get weight") - } - - baseHeight, err := baseTipSet.Height() - if err != nil { - return nil, errors.Wrap(err, "get base tip set height") - } - - blockHeight := baseHeight + nullBlockCount + 1 - - // Construct list of message candidates for inclusion. - // These messages will be processed, and those that fail excluded from the block. - pending := w.messageSource.Pending() - mq := NewMessageQueue(pending) - candidateMsgs := orderMessageCandidates(mq.Drain(block.BlockMessageLimit)) - candidateMsgs = w.filterPenalizableMessages(ctx, candidateMsgs) - if len(candidateMsgs) > block.BlockMessageLimit { - return nil, errors.Errorf("too many messages returned from mq.Drain: %d", len(candidateMsgs)) - } - - var blsAccepted []*types.SignedMessage - var secpAccepted []*types.SignedMessage - - // Align the results with the candidate signed messages to accumulate the messages lists - // to include in the block, and handle failed messages. - for _, msg := range candidateMsgs { - if msg.Message.From.Protocol() == address.BLS { - blsAccepted = append(blsAccepted, msg) - } else { - secpAccepted = append(secpAccepted, msg) - } - } - - // Create an aggregage signature for messages - unwrappedBLSMessages, blsAggregateSig, err := aggregateBLS(blsAccepted) - if err != nil { - return nil, errors.Wrap(err, "could not aggregate bls messages") - } - - // Persist messages to ipld storage - txMetaCid, err := w.messageStore.StoreMessages(ctx, secpAccepted, unwrappedBLSMessages) - if err != nil { - return nil, errors.Wrap(err, "error persisting messages") - } - - // get tipset state root and receipt root - baseStateRoot, err := w.tsMetadata.GetTipSetStateRoot(baseTipSet.Key()) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving state root for tipset %s", baseTipSet.Key().String()) - } - - baseReceiptRoot, err := w.tsMetadata.GetTipSetReceiptsRoot(baseTipSet.Key()) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving receipt root for tipset %s", baseTipSet.Key().String()) - } - - // Set the block timestamp to be exactly the start of the target epoch, regardless of the current time. - // The real time might actually be much later than this if catching up from a pause in chain progress. - epochStartTime := w.clock.StartTimeOfEpoch(blockHeight) - - if drandEntries == nil { - drandEntries = []*drand.Entry{} - } - - if posts == nil { - posts = []block.PoStProof{} - } - - next := &block.Block{ - Miner: w.minerAddr, - Height: blockHeight, - BeaconEntries: drandEntries, - ElectionProof: &crypto.ElectionProof{VRFProof: electionProof}, - Messages: e.NewCid(txMetaCid), - MessageReceipts: e.NewCid(baseReceiptRoot), - Parents: baseTipSet.Key(), - ParentWeight: weight, - PoStProofs: posts, - StateRoot: e.NewCid(baseStateRoot), - Ticket: ticket, - Timestamp: uint64(epochStartTime.Unix()), - BLSAggregateSig: &blsAggregateSig, - } - - view, err := w.api.PowerStateView(baseTipSet.Key()) - if err != nil { - return nil, errors.Wrapf(err, "failed to read state view") - } - _, workerAddr, err := view.MinerControlAddresses(ctx, w.minerAddr) - if err != nil { - return nil, errors.Wrap(err, "failed to read workerAddr during block generation") - } - workerSigningAddr, err := view.AccountSignerAddress(ctx, workerAddr) - if err != nil { - return nil, errors.Wrap(err, "failed to convert worker address to signing address") - } - blockSig, err := w.workerSigner.SignBytes(ctx, next.SignatureData(), workerSigningAddr) - if err != nil { - return nil, errors.Wrap(err, "failed to sign block") - } - next.BlockSig = &blockSig - - return NewfullBlock(next, blsAccepted, secpAccepted), nil -} - -// The resulting output is not empty: it has either a block or an error. - -func aggregateBLS(blsMessages []*types.SignedMessage) ([]*types.UnsignedMessage, crypto.Signature, error) { - var sigs []bls.Signature - var unwrappedMsgs []*types.UnsignedMessage - for _, msg := range blsMessages { - // unwrap messages - unwrappedMsgs = append(unwrappedMsgs, &msg.Message) - if msg.Signature.Type != crypto.SigTypeBLS { - return []*types.UnsignedMessage{}, crypto.Signature{}, errors.New("non-BLS message signature") - } - // store message signature as bls signature - blsSig := bls.Signature{} - copy(blsSig[:], msg.Signature.Data) - sigs = append(sigs, blsSig) - } - blsAggregateSig := bls.Aggregate(sigs) - if blsAggregateSig == nil { - return []*types.UnsignedMessage{}, crypto.Signature{}, errors.New("could not aggregate signatures") - } - - return unwrappedMsgs, crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: blsAggregateSig[:], - }, nil - -} - -// When a block is validated, BLS messages are processed first, so for simplicity all BLS -// messages are considered first here too. -func orderMessageCandidates(messages []*types.SignedMessage) []*types.SignedMessage { - blsMessages := []*types.SignedMessage{} - secpMessages := []*types.SignedMessage{} - - for _, m := range messages { - if m.Message.From.Protocol() == address.BLS { - blsMessages = append(blsMessages, m) - } else { - secpMessages = append(secpMessages, m) - } - } - return append(blsMessages, secpMessages...) -} - -func (w *DefaultWorker) filterPenalizableMessages(ctx context.Context, messages []*types.SignedMessage) []*types.SignedMessage { - var goodMessages []*types.SignedMessage - for _, msg := range messages { - err := w.penaltyChecker.PenaltyCheck(ctx, &msg.Message) - if err != nil { - mCid, _ := msg.Cid() - log.Debugf("Msg: %s excluded in block because penalized with err %s", mCid, err) - continue - } - goodMessages = append(goodMessages, msg) - } - return goodMessages -} diff --git a/internal/pkg/mining/mqueue.go b/internal/pkg/mining/mqueue.go deleted file mode 100644 index 9f21933008..0000000000 --- a/internal/pkg/mining/mqueue.go +++ /dev/null @@ -1,125 +0,0 @@ -package mining - -import ( - "bytes" - "container/heap" - "sort" - - "github.com/filecoin-project/go-address" - specsbig "github.com/filecoin-project/specs-actors/actors/abi/big" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// MessageQueue is a priority queue of messages from different actors. Messages are ordered -// by decreasing gas price, subject to the constraint that messages from a single actor are -// always in increasing nonce order. -// All messages for a queue are inserted at construction, after which messages may only -// be popped. -// Potential improvements include: -// - deprioritising messages after a gap in nonce value, which can never be mined (see Ethereum) -// - attempting to pack messages into a fixed gas limit (i.e. 0/1 knapsack subject to nonce ordering), -// see https://en.wikipedia.org/wiki/Knapsack_problem -type MessageQueue struct { - // A heap of nonce-ordered queues, one per sender. - senderQueues queueHeap -} - -// NewMessageQueue allocates and initializes a message queue. -func NewMessageQueue(msgs []*types.SignedMessage) MessageQueue { - // Group messages by sender. - bySender := make(map[address.Address]nonceQueue) - for _, m := range msgs { - bySender[m.Message.From] = append(bySender[m.Message.From], m) - } - - // Order each sender queue by nonce and initialize heap structure. - addrHeap := make(queueHeap, len(bySender)) - heapIdx := 0 - for _, nq := range bySender { - sort.Slice(nq, func(i, j int) bool { return nq[i].Message.CallSeqNum < nq[j].Message.CallSeqNum }) - addrHeap[heapIdx] = nq - heapIdx++ - } - heap.Init(&addrHeap) - - return MessageQueue{addrHeap} -} - -// Empty tests whether the queue is empty. -func (mq *MessageQueue) Empty() bool { - return len(mq.senderQueues) == 0 -} - -// Pop removes and returns the next message from the queue, returning (nil, false) if none remain. -func (mq *MessageQueue) Pop() (*types.SignedMessage, bool) { - if len(mq.senderQueues) == 0 { - return nil, false - } - // Select actor with best gas price. - bestQueue := &mq.senderQueues[0] - - // Pop first message off that actor's queue - msg := (*bestQueue)[0] - if len(*bestQueue) == 1 { - // If the actor's queue will become empty, remove it from the heap. - heap.Pop(&mq.senderQueues) - } else { - // If the actor's queue still has elements, remove the first and relocate the queue in the heap - // according to the gas price of its next message. - *bestQueue = (*bestQueue)[1:] - heap.Fix(&mq.senderQueues, 0) - } - return msg, true -} - -// Drain removes and returns all messages in a slice. If max is < 0 returns all -func (mq *MessageQueue) Drain(nToPop int) []*types.SignedMessage { - var out []*types.SignedMessage - for msg, hasMore := mq.Pop(); hasMore; msg, hasMore = mq.Pop() { - if nToPop == 0 { - break - } - nToPop-- - out = append(out, msg) - } - return out -} - -// A slice of messages ordered by CallSeqNum (for a single sender). -type nonceQueue []*types.SignedMessage - -// Implements heap.Interface to hold a priority queue of nonce-ordered queues, one per sender. -// Heap priority is given by the gas price of the first message for each queue. -// Each sender queue is expected to be ordered by increasing nonce. -// Implementation is simplified from https://golang.org/pkg/container/heap/#example__priorityQueue. -type queueHeap []nonceQueue - -func (pq queueHeap) Len() int { return len(pq) } - -// Less implements Heap.Interface.Less to compare items on gas price and sender address. -func (pq queueHeap) Less(i, j int) bool { - delta := specsbig.Sub(pq[i][0].Message.GasPrice, pq[j][0].Message.GasPrice) - if !delta.IsZero() { - // We want Pop to give us the highest gas price, so use GreaterThan. - return delta.GreaterThan(types.ZeroAttoFIL) - } - // Secondarily order by address to give a stable ordering. - return bytes.Compare(pq[i][0].Message.From.Bytes(), pq[j][0].Message.From.Bytes()) < 0 -} - -func (pq queueHeap) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] -} - -func (pq *queueHeap) Push(x interface{}) { - item := x.(nonceQueue) - *pq = append(*pq, item) -} - -func (pq *queueHeap) Pop() interface{} { - n := len(*pq) - item := (*pq)[n-1] - *pq = (*pq)[0 : n-1] - return item -} diff --git a/internal/pkg/mining/mqueue_test.go b/internal/pkg/mining/mqueue_test.go deleted file mode 100644 index b78aa9893d..0000000000 --- a/internal/pkg/mining/mqueue_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package mining - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -func TestMessageQueueOrder(t *testing.T) { - tf.UnitTest(t) - - var ki = types.MustGenerateKeyInfo(10, 42) - var mockSigner = types.NewMockSigner(ki) - - a0 := mockSigner.Addresses[0] - a1 := mockSigner.Addresses[2] - a2 := mockSigner.Addresses[3] - to := mockSigner.Addresses[9] - - sign := func(from address.Address, to address.Address, nonce uint64, units uint64, price int64) *types.SignedMessage { - msg := types.UnsignedMessage{ - From: from, - To: to, - CallSeqNum: nonce, - GasPrice: types.NewGasPrice(price), - GasLimit: gas.NewGas(int64(units)), - } - s, err := types.NewSignedMessage(context.TODO(), msg, &mockSigner) - require.NoError(t, err) - return s - } - - t.Run("empty", func(t *testing.T) { - q := NewMessageQueue([]*types.SignedMessage{}) - assert.True(t, q.Empty()) - msg, ok := q.Pop() - assert.Nil(t, msg) - assert.False(t, ok) - }) - - t.Run("orders by nonce", func(t *testing.T) { - msgs := []*types.SignedMessage{ - // Msgs from a0 are in increasing order. - // Msgs from a1 are in decreasing order. - // Msgs from a2 are out of order. - // Messages from different signers are interleaved. - sign(a0, to, 0, 0, 0), - sign(a1, to, 15, 0, 0), - sign(a2, to, 5, 0, 0), - - sign(a0, to, 1, 0, 0), - sign(a1, to, 2, 0, 0), - sign(a2, to, 7, 0, 0), - - sign(a0, to, 20, 0, 0), - sign(a1, to, 1, 0, 0), - sign(a2, to, 1, 0, 0), - } - - q := NewMessageQueue(msgs) - - lastFromAddr := make(map[address.Address]uint64) - for msg, more := q.Pop(); more == true; msg, more = q.Pop() { - last, seen := lastFromAddr[msg.Message.From] - if seen { - assert.True(t, last <= msg.Message.CallSeqNum) - } - lastFromAddr[msg.Message.From] = msg.Message.CallSeqNum - } - assert.True(t, q.Empty()) - }) - - t.Run("orders by gas price", func(t *testing.T) { - msgs := []*types.SignedMessage{ - sign(a0, to, 0, 0, 2), - sign(a1, to, 0, 0, 3), - sign(a2, to, 0, 0, 1), - } - q := NewMessageQueue(msgs) - expected := []*types.SignedMessage{msgs[1], msgs[0], msgs[2]} - actual := q.Drain(-1) - assert.Equal(t, expected, actual) - assert.True(t, q.Empty()) - }) - - t.Run("nonce overrides gas price", func(t *testing.T) { - msgs := []*types.SignedMessage{ - sign(a0, to, 0, 0, 1), - sign(a0, to, 1, 0, 3), // More valuable but must come after previous message from a0 - sign(a2, to, 0, 0, 2), - } - expected := []*types.SignedMessage{msgs[2], msgs[0], msgs[1]} - - q := NewMessageQueue(msgs) - actual := q.Drain(-1) - assert.Equal(t, expected, actual) - assert.True(t, q.Empty()) - }) - - t.Run("only take as many as specified", func(t *testing.T) { - msgs := []*types.SignedMessage{ - sign(a0, to, 0, 0, 2), - sign(a1, to, 0, 0, 3), - sign(a2, to, 0, 0, 1), - } - q := NewMessageQueue(msgs) - expected := []*types.SignedMessage{msgs[1]} - actual := q.Drain(1) - assert.Equal(t, expected, actual) - assert.False(t, q.Empty()) - }) -} diff --git a/internal/pkg/mining/scheduler.go b/internal/pkg/mining/scheduler.go deleted file mode 100644 index 31263962c1..0000000000 --- a/internal/pkg/mining/scheduler.go +++ /dev/null @@ -1,212 +0,0 @@ -package mining - -// The Scheduler listens for new heaviest TipSets and schedules mining work on -// these TipSets. The scheduler is ultimately responsible for informing the -// rest of the system about new blocks mined by the Worker. This is the -// interface to implement if you want to explore an alternate mining strategy. - -import ( - "context" - "sync" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// FullBlock is the result of a single mining attempt. It will have a new block header with included messages. -type FullBlock struct { - Header *block.Block - BLSMessages []*types.SignedMessage - SECPMessages []*types.SignedMessage -} - -func NewfullBlock(b *block.Block, BLSMessages, SECPMessages []*types.SignedMessage) *FullBlock { - return &FullBlock{Header: b, BLSMessages: BLSMessages, SECPMessages: SECPMessages} -} - -// Scheduler is the mining interface consumers use. -type Scheduler interface { - Start(miningCtx context.Context) (<-chan FullBlock, *sync.WaitGroup) - IsStarted() bool - Pause() - Continue() -} - -// NewScheduler returns a new timingScheduler to schedule mining work on the -// input worker. -func NewScheduler(w Worker, f func() (block.TipSet, error), c clock.ChainEpochClock) Scheduler { - return &timingScheduler{ - worker: w, - pollHeadFunc: f, - chainClock: c, - } -} - -type timingScheduler struct { - // worker contains the actual mining logic. - worker Worker - // pollHeadFunc is the function the scheduler uses to poll for the - // current heaviest tipset - pollHeadFunc func() (block.TipSet, error) - // chainClock measures time and tracks the epoch-time relationship - chainClock clock.ChainEpochClock - - // mu protects skipping - mu sync.Mutex - // skipping tracks whether we should skip mining - skipping bool - - isStarted bool -} - -// Start starts mining taking in a context. -// It returns a channel for reading mining outputs and a waitgroup for teardown. -// It is the callers responsibility to close the out channel only after waiting -// on the waitgroup. -func (s *timingScheduler) Start(miningCtx context.Context) (<-chan FullBlock, *sync.WaitGroup) { - var doneWg sync.WaitGroup - outCh := make(chan FullBlock, 1) - - // loop mining work - doneWg.Add(1) - go func() { - err := s.mineLoop(miningCtx, outCh) - if err != nil { - log.Errorf("Unrecoverable error in mining loop. Mining stoped: %s", err) - close(outCh) - } - doneWg.Done() - }() - s.isStarted = true - - return outCh, &doneWg -} - -func (s *timingScheduler) mineLoop(ctx context.Context, outCh chan FullBlock) error { - // start on epoch boundary - targetEpoch := s.chainClock.WaitNextEpoch(ctx) - if s.isDone(ctx) { - return nil - } - - // The main event loop for the timing scheduler. - // Waits for a new epoch to start, polls the heaviest head, includes the correct number - // of null blocks and starts a mining job async. - // - // The scheduler will skip mining jobs if the skipping flag is set - for { - // wait for prop delay after epoch start for parent blocks to arrive - s.chainClock.WaitForEpochPropDelay(ctx, targetEpoch) - if s.isDone(ctx) { - return nil - } - - // our target is now the next epoch - targetEpoch++ - - // continue if we are skipping - if s.isSkipping() { - continue - } - - // Check for a new base tipset, and reset null count if one is found. - base, err := s.pollHeadFunc() - if err != nil { - return errors.Wrap(err, "error polling head from mining scheduler") - } - - baseHeight, err := base.Height() - if err != nil { - log.Errorf("error getting height from base", err) - } - - // block time validation should prevent us from seeing a base height than we expect - if baseHeight >= targetEpoch { - log.Errorf("Scheduled target epoch %d is not greater than base height %d", targetEpoch, baseHeight) - } - - // null count is the number of epochs between the one we are mining and the one now. - nullCount := uint64(targetEpoch-baseHeight) - 1 - - // mine now - block, err := s.worker.Mine(ctx, base, nullCount) - if err != nil { - log.Errorf("Mining failed: %s", err) - continue - } - - // wait until target to send - s.chainClock.WaitForEpoch(ctx, targetEpoch) - if s.isDone(ctx) { - return nil - } - - // send block at epoch boundary if we won - if block != nil { - outCh <- *block - } - } -} - -func (s *timingScheduler) isSkipping() bool { - s.mu.Lock() - defer s.mu.Unlock() - return s.skipping -} - -// IsStarted is called when starting mining to tell whether the scheduler should be -// started -func (s *timingScheduler) IsStarted() bool { - return s.isStarted -} - -// Pause is called to pause the scheduler from running mining work -func (s *timingScheduler) Pause() { - s.mu.Lock() - defer s.mu.Unlock() - s.skipping = true -} - -// Continue is called to unpause the scheduler's mining work -func (s *timingScheduler) Continue() { - s.mu.Lock() - defer s.mu.Unlock() - s.skipping = false -} - -// check if context is done. Should be called after every wait. -func (s *timingScheduler) isDone(ctx context.Context) bool { - select { // check for interrupt during waiting - case <-ctx.Done(): - s.isStarted = false - return true - default: - } - return false -} - -// MineOnce mines on a given base until it finds a winner or errors out. -func MineOnce(ctx context.Context, w DefaultWorker, ts block.TipSet) (*FullBlock, error) { - var nullCount uint64 - for { - blk, err := MineOneEpoch(ctx, w, ts, nullCount) - if err != nil { - return nil, err - } - - if blk != nil { - return blk, nil - } - - nullCount++ - } -} - -// MineOneEpoch attempts to mine a block in an epoch and returns the mined block, -// or nil if no block could be mined -func MineOneEpoch(ctx context.Context, w DefaultWorker, ts block.TipSet, nullCount uint64) (*FullBlock, error) { - return w.Mine(ctx, ts, nullCount) -} diff --git a/internal/pkg/mining/scheduler_test.go b/internal/pkg/mining/scheduler_test.go deleted file mode 100644 index 687aa91db2..0000000000 --- a/internal/pkg/mining/scheduler_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package mining_test - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - . "github.com/filecoin-project/go-filecoin/internal/pkg/mining" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -const epochDuration = builtin.EpochDurationSeconds -const propDelay = 6 * time.Second - -// Mining loop unit tests - -func TestWorkerCalled(t *testing.T) { - tf.UnitTest(t) - ts := testHead(t) - - called := make(chan struct{}, 1) - w := NewTestWorker(t, func(_ context.Context, workHead block.TipSet, _ uint64) (*FullBlock, error) { - assert.True(t, workHead.Equals(ts)) - called <- struct{}{} - return nil, nil - }) - - fakeClock, chainClock := clock.NewFakeChain(1234567890, epochDuration, propDelay, 1234567890) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - scheduler := NewScheduler(w, headFunc(ts), chainClock) - scheduler.Start(ctx) - fakeClock.BlockUntil(1) - fakeClock.Advance(epochDuration) - fakeClock.Advance(propDelay) - - <-called -} - -func TestCorrectNullBlocksGivenEpoch(t *testing.T) { - tf.UnitTest(t) - ts := testHead(t) - h, err := ts.Height() - require.NoError(t, err) - - fakeClock, chainClock := clock.NewFakeChain(1234567890, epochDuration, propDelay, 1234567890) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Move forward 20 epochs - for i := 0; i < 19; i++ { - fakeClock.Advance(epochDuration) - } - - called := make(chan struct{}, 20) - w := NewTestWorker(t, func(_ context.Context, workHead block.TipSet, nullCount uint64) (*FullBlock, error) { - assert.Equal(t, uint64(h+20), nullCount) - called <- struct{}{} - return nil, nil - }) - - scheduler := NewScheduler(w, headFunc(ts), chainClock) - scheduler.Start(ctx) - fakeClock.BlockUntil(1) - // Move forward 1 epoch for a total of 21 - fakeClock.Advance(epochDuration) - fakeClock.Advance(propDelay) - - <-called -} - -func TestWaitsForEpochStart(t *testing.T) { - // If the scheduler starts partway through an epoch it will wait to mine - // until there is a new epoch boundary - tf.UnitTest(t) - ts := testHead(t) - - fakeClock, chainClock := clock.NewFakeChain(1234567890, epochDuration, propDelay, 1234567890) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - genTime := chainClock.Now() - - var wg sync.WaitGroup - wg.Add(1) - waitGroupDoneCh := make(chan struct{}) - go func() { - wg.Wait() - waitGroupDoneCh <- struct{}{} - }() - - called := make(chan struct{}, 1) - expectMiningCall := false - w := NewTestWorker(t, func(_ context.Context, workHead block.TipSet, _ uint64) (*FullBlock, error) { - if !expectMiningCall { - t.Fatal("mining worker called too early") - } - // This doesn't get called until the clock has advanced to prop delay past epoch - assert.Equal(t, genTime.Add(epochDuration).Add(propDelay), chainClock.Now()) - called <- struct{}{} - return nil, nil - }) - - scheduler := NewScheduler(w, headFunc(ts), chainClock) - scheduler.Start(ctx) - - fakeClock.BlockUntil(1) - expectMiningCall = false - fakeClock.Advance(epochDuration) // advance to epoch start - fakeClock.Advance(propDelay / 2) // advance halfway into prop delay - - // advance past propagation delay in next block and expect worker to be called - fakeClock.BlockUntil(1) - expectMiningCall = true - fakeClock.Advance(propDelay / 2) - <-called -} - -func TestSkips(t *testing.T) { - tf.UnitTest(t) - ts := testHead(t) - - fakeClock, chainClock := clock.NewFakeChain(1234567890, epochDuration, propDelay, 1234567890) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var wg sync.WaitGroup - wg.Add(1) - w := NewTestWorker(t, func(_ context.Context, workHead block.TipSet, nullCount uint64) (*FullBlock, error) { - // This should never be reached as the first epoch should skip mining - if nullCount == 0 { - t.Fail() - return nil, nil - } - wg.Done() - return nil, nil - }) - - scheduler := NewScheduler(w, headFunc(ts), chainClock) - scheduler.Pause() - scheduler.Start(ctx) - fakeClock.BlockUntil(1) - fakeClock.Advance(epochDuration + propDelay) - fakeClock.BlockUntil(1) - scheduler.Continue() - fakeClock.Advance(epochDuration) - wg.Wait() -} - -// Helper functions - -func testHead(t *testing.T) block.TipSet { - baseBlock := &block.Block{StateRoot: e.NewCid(types.CidFromString(t, "somecid"))} - ts, err := block.NewTipSet(baseBlock) - require.NoError(t, err) - return ts -} - -func headFunc(ts block.TipSet) func() (block.TipSet, error) { - return func() (block.TipSet, error) { - return ts, nil - } -} diff --git a/internal/pkg/mining/testing.go b/internal/pkg/mining/testing.go deleted file mode 100644 index 3493904236..0000000000 --- a/internal/pkg/mining/testing.go +++ /dev/null @@ -1,49 +0,0 @@ -package mining - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -type miningFunc func(runCtx context.Context, base block.TipSet, nullBlkCount uint64) (*FullBlock, error) - -// TestWorker is a worker with a customizable work function to facilitate -// easy testing. -type TestWorker struct { - WorkFunc miningFunc - t *testing.T -} - -// Mine is the TestWorker's Work function. It simply calls the WorkFunc -// field. -func (w *TestWorker) Mine(ctx context.Context, ts block.TipSet, nullBlockCount uint64) (*FullBlock, error) { - require.NotNil(w.t, w.WorkFunc) - return w.WorkFunc(ctx, ts, nullBlockCount) -} - -// NewTestWorker creates a worker that calls the provided input -// function when Mine() is called. -func NewTestWorker(t *testing.T, f miningFunc) *TestWorker { - return &TestWorker{ - WorkFunc: f, - t: t, - } -} - -// NthTicket returns a ticket with a vrf proof equal to a byte slice wrapping -// the input uint8 value. -func NthTicket(i uint8) block.Ticket { - return block.Ticket{VRFProof: []byte{i}} -} - -// NoMessageQualifier always returns no error -type NoMessageQualifier struct{} - -func (npc *NoMessageQualifier) PenaltyCheck(_ context.Context, _ *types.UnsignedMessage) error { - return nil -} diff --git a/internal/pkg/mining/worker.go b/internal/pkg/mining/worker.go deleted file mode 100644 index 3167871c8f..0000000000 --- a/internal/pkg/mining/worker.go +++ /dev/null @@ -1,348 +0,0 @@ -package mining - -// The Worker Mines on Input received from a Scheduler. The Worker is -// responsible for generating the necessary proofs, checking for success, -// generating new blocks, and forwarding them out to the wider node. - -import ( - "context" - "time" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - cid "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log/v2" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - "github.com/filecoin-project/go-filecoin/internal/pkg/postgenerator" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -var log = logging.Logger("mining") - -// Worker is the interface called by the Scheduler to run the mining work being -// scheduled. -type Worker interface { - Mine(runCtx context.Context, base block.TipSet, nullBlkCount uint64) (*FullBlock, error) -} - -// GetStateTree is a function that gets the aggregate state tree of a TipSet. It's -// its own function to facilitate testing. -type GetStateTree func(context.Context, block.TipSetKey) (state.Tree, error) - -// GetWeight is a function that calculates the weight of a TipSet. Weight is -// expressed as two uint64s comprising a rational number. -type GetWeight func(context.Context, block.TipSet) (big.Int, error) - -// MessageSource provides message candidates for mining into blocks -type MessageSource interface { - // Pending returns a slice of un-mined messages. - Pending() []*types.SignedMessage - // Remove removes a message from the source permanently - Remove(message cid.Cid) -} - -// A MessageApplier processes all the messages in a message pool. -type MessageApplier interface { - // Dragons: add something back or remove -} - -type workerPorcelainAPI interface { - consensus.ChainRandomness - BlockTime() time.Duration - PowerStateView(baseKey block.TipSetKey) (consensus.PowerStateView, error) - FaultsStateView(baseKey block.TipSetKey) (consensus.FaultStateView, error) -} - -type electionUtil interface { - GenerateElectionProof(ctx context.Context, entry *drand.Entry, epoch abi.ChainEpoch, miner address.Address, worker address.Address, signer types.Signer) (crypto.VRFPi, error) - IsWinner(challengeTicket []byte, minerPower, networkPower abi.StoragePower) bool - GenerateWinningPoSt(ctx context.Context, entry *drand.Entry, epoch abi.ChainEpoch, ep postgenerator.PoStGenerator, maddr address.Address, sectors consensus.SectorsStateView) ([]block.PoStProof, error) -} - -// ticketGenerator creates tickets. -type ticketGenerator interface { - MakeTicket(ctx context.Context, base block.TipSetKey, epoch abi.ChainEpoch, miner address.Address, entry *drand.Entry, newPeriod bool, worker address.Address, signer types.Signer) (block.Ticket, error) -} - -type tipSetMetadata interface { - GetTipSetStateRoot(key block.TipSetKey) (cid.Cid, error) - GetTipSetReceiptsRoot(key block.TipSetKey) (cid.Cid, error) -} - -type messageMessageQualifier interface { - PenaltyCheck(ctx context.Context, msg *types.UnsignedMessage) error -} - -// DefaultWorker runs a mining job. -type DefaultWorker struct { - api workerPorcelainAPI - - minerAddr address.Address - minerOwnerAddr address.Address - workerSigner types.Signer - - tsMetadata tipSetMetadata - getStateTree GetStateTree - getWeight GetWeight - election electionUtil - ticketGen ticketGenerator - messageSource MessageSource - penaltyChecker messageMessageQualifier - messageStore chain.MessageWriter // nolint: structcheck - blockstore blockstore.Blockstore - clock clock.ChainEpochClock - poster postgenerator.PoStGenerator - chainState chain.TipSetProvider - drand drand.IFace -} - -// WorkerParameters use for NewDefaultWorker parameters -type WorkerParameters struct { - API workerPorcelainAPI - - MinerAddr address.Address - MinerOwnerAddr address.Address - WorkerSigner types.Signer - - // consensus things - TipSetMetadata tipSetMetadata - GetStateTree GetStateTree - MessageQualifier messageMessageQualifier - GetWeight GetWeight - Election electionUtil - TicketGen ticketGenerator - Drand drand.IFace - - // core filecoin things - MessageSource MessageSource - MessageStore chain.MessageWriter - Blockstore blockstore.Blockstore - Clock clock.ChainEpochClock - Poster postgenerator.PoStGenerator - ChainState chain.TipSetProvider -} - -// NewDefaultWorker instantiates a new Worker. -func NewDefaultWorker(parameters WorkerParameters) *DefaultWorker { - return &DefaultWorker{ - api: parameters.API, - getStateTree: parameters.GetStateTree, - getWeight: parameters.GetWeight, - messageSource: parameters.MessageSource, - messageStore: parameters.MessageStore, - penaltyChecker: parameters.MessageQualifier, - blockstore: parameters.Blockstore, - minerAddr: parameters.MinerAddr, - minerOwnerAddr: parameters.MinerOwnerAddr, - workerSigner: parameters.WorkerSigner, - election: parameters.Election, - ticketGen: parameters.TicketGen, - tsMetadata: parameters.TipSetMetadata, - clock: parameters.Clock, - poster: parameters.Poster, - chainState: parameters.ChainState, - drand: parameters.Drand, - } -} - -// Mine implements the DefaultWorkers main mining function.. -// The returned bool indicates if this miner created a new block or not. -func (w *DefaultWorker) Mine(ctx context.Context, base block.TipSet, nullBlkCount uint64) (*FullBlock, error) { - log.Info("Worker.Mine") - if !base.Defined() { - log.Warn("Worker.Mine returning because it can't mine on an empty tipset") - return nil, errors.New("bad input tipset with no blocks sent to Mine()") - } - baseEpoch, err := base.Height() - if err != nil { - log.Warnf("Worker.Mine couldn't read base height %s", err) - return nil, err - } - currEpoch := baseEpoch + abi.ChainEpoch(1) + abi.ChainEpoch(nullBlkCount) - - log.Debugf("Mining on tipset %s, at epoch %d with %d null blocks.", base.String(), baseEpoch, nullBlkCount) - if ctx.Err() != nil { - log.Warnf("Worker.Mine returning with ctx error %s", ctx.Err().Error()) - return nil, ctx.Err() - } - - // Read uncached worker address - keyView, err := w.api.PowerStateView(base.Key()) - if err != nil { - return nil, err - } - _, workerAddr, err := keyView.MinerControlAddresses(ctx, w.minerAddr) - if err != nil { - return nil, err - } - - // Look-back for the election ticket. - // The parameter is interpreted as: lookback=1 means parent tipset. Subtract one here because the base from - // which the lookback is counted is already the parent, rather than "current" tipset. - // The sampling code will handle this underflowing past the genesis. - lookbackEpoch := currEpoch - miner.ElectionLookback - - workerSignerAddr, err := keyView.AccountSignerAddress(ctx, workerAddr) - if err != nil { - return nil, err - } - - drandEntries, err := w.drandEntriesForEpoch(ctx, base, nullBlkCount) - if err != nil { - log.Errorf("Worker.Mine failed to collect drand entries for block %s", err) - return nil, err - } - - // Determine if we've won election - electionEntry, err := w.electionEntry(ctx, base, drandEntries) - if err != nil { - log.Errorf("Worker.Mine failed to calculate drand entry for election randomness %s", err) - return nil, err - } - - newPeriod := len(drandEntries) > 0 - nextTicket, err := w.ticketGen.MakeTicket(ctx, base.Key(), lookbackEpoch, w.minerAddr, electionEntry, newPeriod, workerSignerAddr, w.workerSigner) - if err != nil { - log.Warnf("Worker.Mine couldn't generate next ticket %s", err) - return nil, err - } - - electionVRFProof, err := w.election.GenerateElectionProof(ctx, electionEntry, currEpoch, w.minerAddr, workerSignerAddr, w.workerSigner) - if err != nil { - log.Errorf("Worker.Mine failed to generate electionVRFProof %s", err) - } - electionVRFDigest := electionVRFProof.Digest() - electionPowerAncestor, err := w.lookbackTipset(ctx, base, nullBlkCount, consensus.ElectionPowerTableLookback) - if err != nil { - log.Errorf("Worker.Mine couldn't get ancestor tipset: %s", err.Error()) - return nil, err - } - electionPowerTable, err := w.getPowerTable(electionPowerAncestor.Key(), base.Key()) - if err != nil { - log.Errorf("Worker.Mine couldn't get snapshot for tipset: %s", err.Error()) - return nil, err - } - networkPower, err := electionPowerTable.NetworkTotalPower(ctx) - if err != nil { - log.Errorf("failed to get network power: %s", err) - return nil, err - } - minerPower, err := electionPowerTable.MinerClaimedPower(ctx, w.minerAddr) - if err != nil { - log.Errorf("failed to get power claim for miner: %s", err) - return nil, err - } - wins := w.election.IsWinner(electionVRFDigest[:], minerPower, networkPower) - if !wins { - // no winners we are done - return nil, nil - } - - // we have a winning block - sectorSetAncestor, err := w.lookbackTipset(ctx, base, nullBlkCount, consensus.WinningPoStSectorSetLookback) - if err != nil { - log.Errorf("Worker.Mine couldn't get ancestor tipset: %s", err.Error()) - return nil, err - } - sectorStateView, err := w.api.PowerStateView(sectorSetAncestor.Key()) - if err != nil { - log.Errorf("Worker.Mine couldn't get snapshot for tipset: %s", err.Error()) - return nil, err - } - - posts, err := w.election.GenerateWinningPoSt(ctx, electionEntry, currEpoch, w.poster, w.minerAddr, sectorStateView) - if err != nil { - log.Warnf("Worker.Mine failed to generate post") - return nil, err - } - - return w.Generate(ctx, base, nextTicket, electionVRFProof, abi.ChainEpoch(nullBlkCount), posts, drandEntries) -} - -func (w *DefaultWorker) getPowerTable(powerKey, faultsKey block.TipSetKey) (consensus.PowerTableView, error) { - powerView, err := w.api.PowerStateView(powerKey) - if err != nil { - return consensus.PowerTableView{}, err - } - faultsView, err := w.api.FaultsStateView(faultsKey) - if err != nil { - return consensus.PowerTableView{}, err - } - return consensus.NewPowerTableView(powerView, faultsView), nil -} - -func (w *DefaultWorker) lookbackTipset(ctx context.Context, base block.TipSet, nullBlkCount uint64, lookback uint64) (block.TipSet, error) { - if lookback <= nullBlkCount+1 { // new block looks back to base - return base, nil - } - baseHeight, err := base.Height() - if err != nil { - return block.UndefTipSet, err - } - targetEpoch := abi.ChainEpoch(uint64(baseHeight) + 1 + nullBlkCount - lookback) - - return chain.FindTipsetAtEpoch(ctx, base, targetEpoch, w.chainState) -} - -// drandEntriesForEpoch returns the array of drand entries that should be -// included in the next block. The return value maay be nil. -func (w *DefaultWorker) drandEntriesForEpoch(ctx context.Context, base block.TipSet, nullBlkCount uint64) ([]*drand.Entry, error) { - baseHeight, err := base.Height() - if err != nil { - return nil, err - } - // Special case genesis - var rounds []drand.Round - lastTargetEpoch := abi.ChainEpoch(uint64(baseHeight) + nullBlkCount + 1 - consensus.DRANDEpochLookback) - if baseHeight == abi.ChainEpoch(0) { - // no latest entry, targetEpoch undefined as its before genesis - - // There should be a first genesis drand round from time before genesis - // and then we grab everything between this round and genesis time - startTime := w.drand.StartTimeOfRound(w.drand.FirstFilecoinRound()) - endTime := w.clock.StartTimeOfEpoch(lastTargetEpoch + 1) - rounds = w.drand.RoundsInInterval(startTime, endTime) - } else { - latestEntry, err := chain.FindLatestDRAND(ctx, base, w.chainState) - if err != nil { - return nil, err - } - - startTime := w.drand.StartTimeOfRound(latestEntry.Round) - // end of interval is beginning of next epoch after lastTargetEpoch so - // we add 1 to lastTargetEpoch - endTime := w.clock.StartTimeOfEpoch(lastTargetEpoch + 1) - rounds = w.drand.RoundsInInterval(startTime, endTime) - // first round is round of latestEntry so omit the 0th round - rounds = rounds[1:] - } - - entries := make([]*drand.Entry, len(rounds)) - for i, round := range rounds { - entries[i], err = w.drand.ReadEntry(ctx, round) - if err != nil { - return nil, err - } - } - return entries, nil -} - -func (w *DefaultWorker) electionEntry(ctx context.Context, base block.TipSet, drandEntriesInBlock []*drand.Entry) (*drand.Entry, error) { - numEntries := len(drandEntriesInBlock) - if numEntries > 0 { - return drandEntriesInBlock[numEntries-1], nil - } - - return chain.FindLatestDRAND(ctx, base, w.chainState) -} diff --git a/internal/pkg/mining/worker_test.go b/internal/pkg/mining/worker_test.go deleted file mode 100644 index 7f6c976f70..0000000000 --- a/internal/pkg/mining/worker_test.go +++ /dev/null @@ -1,737 +0,0 @@ -package mining_test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - fbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - dag "github.com/ipfs/go-merkledag" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/mining" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -func TestLookbackElection(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - mockSignerVal, blockSignerAddr := setupSigner() - mockSigner := &mockSignerVal - - builder := chain.NewBuilder(t, address.Undef) - head := builder.NewGenesis() - for i := 1; i < int(miner.ElectionLookback); i++ { - head = builder.AppendOn(head, 1) - } - - st, pool, addrs, bs := sharedSetup(t, mockSignerVal) - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - - rnd := &consensus.FakeChainRandomness{Seed: 0} - samp := &consensus.FakeSampler{Seed: 0} - minerAddr := addrs[3] // addr4 in sharedSetup - minerOwnerAddr := addrs[4] // addr5 in sharedSetup - - messages := chain.NewMessageStore(bs) - - t.Run("Election sees ticket lookback ancestors back", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: minerAddr, - MinerOwnerAddr: minerOwnerAddr, - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: consensus.NewElectionMachine(rnd), - TicketGen: consensus.NewTicketMachine(samp), - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - blk, err := worker.Mine(ctx, head, 0) - assert.NoError(t, err) - - expectedTicket := makeExpectedTicket(ctx, t, rnd, mockSigner, head, miner.ElectionLookback, minerAddr, minerOwnerAddr) - assert.Equal(t, expectedTicket, blk.Header.Ticket) - }) -} - -func Test_Mine(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - mockSignerVal, blockSignerAddr := setupSigner() - mockSigner := &mockSignerVal - - newCid := types.NewCidForTestGetter() - stateRoot := newCid() - baseBlock := &block.Block{Height: 0, StateRoot: e.NewCid(stateRoot), Ticket: block.Ticket{VRFProof: []byte{0}}} - tipSet := block.RequireNewTipSet(t, baseBlock) - - st, pool, addrs, bs := sharedSetup(t, mockSignerVal) - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - - rnd := &consensus.FakeChainRandomness{Seed: 0} - samp := &consensus.FakeSampler{Seed: 0} - minerAddr := addrs[3] // addr4 in sharedSetup - minerOwnerAddr := addrs[4] // addr5 in sharedSetup - messages := chain.NewMessageStore(bs) - - // TODO #3311: this case isn't testing much. Testing w.Mine further needs a lot more attention. - t.Run("Trivial success case", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: minerAddr, - MinerOwnerAddr: minerOwnerAddr, - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: consensus.NewElectionMachine(rnd), - TicketGen: consensus.NewTicketMachine(samp), - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - blk, err := worker.Mine(ctx, tipSet, 0) - assert.NoError(t, err) - - expectedTicket := makeExpectedTicket(ctx, t, rnd, mockSigner, tipSet, miner.ElectionLookback, minerAddr, minerOwnerAddr) - assert.Equal(t, expectedTicket, blk.Header.Ticket) - }) - - t.Run("Block generation fails", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: minerAddr, - MinerOwnerAddr: minerOwnerAddr, - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{shouldError: true}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: consensus.NewElectionMachine(rnd), - TicketGen: consensus.NewTicketMachine(samp), - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - _, err := worker.Mine(ctx, tipSet, 0) - require.Error(t, err) - assert.Contains(t, err.Error(), "test error retrieving state root") - }) - - t.Run("Sent empty tipset", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: minerAddr, - MinerOwnerAddr: minerOwnerAddr, - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: consensus.NewElectionMachine(rnd), - TicketGen: consensus.NewTicketMachine(samp), - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - input := block.TipSet{} - _, err := worker.Mine(ctx, input, 0) - assert.EqualError(t, err, "bad input tipset with no blocks sent to Mine()") - }) -} - -func sharedSetupInitial() (cbor.IpldStore, *message.Pool, cid.Cid) { - r := repo.NewInMemoryRepo() - bs := blockstore.NewBlockstore(r.Datastore()) - cst := cborutil.NewIpldStore(bs) - pool := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) - // Install the fake actor so we can execute it. - fakeActorCodeCid := builtin.AccountActorCodeID - return cst, pool, fakeActorCodeCid -} - -func sharedSetup(t *testing.T, mockSigner types.MockSigner) ( - state.Tree, *message.Pool, []address.Address, blockstore.Blockstore) { - - cst, pool, _ := sharedSetupInitial() - ctx := context.TODO() - d := datastore.NewMapDatastore() - bs := blockstore.NewBlockstore(d) - vms := vm.NewStorage(bs) - - addr1, addr2, addr3, addr5 := mockSigner.Addresses[0], mockSigner.Addresses[1], mockSigner.Addresses[2], mockSigner.Addresses[4] - _, st := th.RequireMakeStateTree(t, cst, map[address.Address]*actor.Actor{ - // Ensure core.NetworkAddress exists to prevent mining reward failures. - builtin.RewardActorAddr: actor.NewActor(builtin.RewardActorCodeID, abi.NewTokenAmount(1000000), cid.Undef), - }) - - _, addr4 := th.RequireNewMinerActor(ctx, t, st, vms, addr5, 10, th.RequireRandomPeerID(t), types.NewAttoFILFromFIL(10000)) - return st, pool, []address.Address{addr1, addr2, addr3, addr4, addr5}, bs -} - -func makeExpectedTicket(ctx context.Context, t *testing.T, rnd *consensus.FakeChainRandomness, mockSigner *types.MockSigner, - head block.TipSet, lookback abi.ChainEpoch, minerAddr address.Address, minerOwnerAddr address.Address) block.Ticket { - height, err := head.Height() - require.NoError(t, err) - entropy, err := encoding.Encode(minerAddr) - require.NoError(t, err) - seed, err := rnd.SampleChainRandomness(ctx, head.Key(), acrypto.DomainSeparationTag_TicketProduction, height-lookback, entropy) - require.NoError(t, err) - expectedVrfProof, err := mockSigner.SignBytes(ctx, seed, minerOwnerAddr) - require.NoError(t, err) - return block.Ticket{VRFProof: expectedVrfProof.Data} -} - -func TestApplyBLSMessages(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ki := types.MustGenerateMixedKeyInfo(5, 5) - mockSigner := types.NewMockSigner(ki) - - newCid := types.NewCidForTestGetter() - stateRoot := newCid() - baseBlock := &block.Block{Height: 0, StateRoot: e.NewCid(stateRoot), Ticket: block.Ticket{VRFProof: []byte{0}}} - tipSet := block.RequireNewTipSet(t, baseBlock) - - st, pool, addrs, bs := sharedSetup(t, mockSigner) - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - - rnd := &consensus.FakeChainRandomness{Seed: 0} - msgStore := chain.NewMessageStore(bs) - - // assert that first two addresses have different protocols - blsAddress := addrs[0] - assert.Equal(t, address.BLS, blsAddress.Protocol()) - secpAddress := addrs[1] - assert.Equal(t, address.SECP256K1, secpAddress.Protocol()) - - // create secp and bls signed messages interleaved - for i := 0; i < 10; i++ { - var addr address.Address - if i%2 == 0 { - addr = blsAddress - } else { - addr = secpAddress - } - smsg := requireSignedMessage(t, &mockSigner, addr, addrs[3], uint64(i/2), types.NewAttoFILFromFIL(1)) - _, err := pool.Add(ctx, smsg, abi.ChainEpoch(0)) - require.NoError(t, err) - } - - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI((&mockSigner).Addresses[5], rnd), - - MinerAddr: addrs[3], - MinerOwnerAddr: addrs[4], - WorkerSigner: &mockSigner, - - TipSetMetadata: fakeTSMetadata{}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: &consensus.FakeElectionMachine{}, - TicketGen: &consensus.FakeTicketMachine{}, - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: msgStore, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - block, err := worker.Mine(ctx, tipSet, 0) - require.NoError(t, err) - - t.Run("messages are divided into bls and secp messages", func(t *testing.T) { - secpMessages, blsMessages, err := msgStore.LoadMessages(ctx, block.Header.Messages.Cid) - require.NoError(t, err) - - assert.Len(t, secpMessages, 5) - assert.Len(t, blsMessages, 5) - - for _, msg := range secpMessages { - assert.Equal(t, address.SECP256K1, msg.Message.From.Protocol()) - } - - for _, msg := range blsMessages { - assert.Equal(t, address.BLS, msg.From.Protocol()) - } - }) - - t.Run("all 10 messages are stored", func(t *testing.T) { - secpMessages, blsMessages, err := msgStore.LoadMessages(ctx, block.Header.Messages.Cid) - require.NoError(t, err) - - assert.Len(t, secpMessages, 5) - assert.Len(t, blsMessages, 5) - }) - - t.Run("block bls signature can be used to validate messages", func(t *testing.T) { - digests := []bls.Digest{} - keys := []bls.PublicKey{} - - _, blsMessages, err := msgStore.LoadMessages(ctx, block.Header.Messages.Cid) - require.NoError(t, err) - for _, msg := range blsMessages { - msgBytes, err := msg.Marshal() - require.NoError(t, err) - digests = append(digests, bls.Hash(msgBytes)) - - pubKey := bls.PublicKey{} - copy(pubKey[:], msg.From.Payload()) - keys = append(keys, pubKey) - } - - blsSig := bls.Signature{} - copy(blsSig[:], block.Header.BLSAggregateSig.Data) - valid := bls.Verify(&blsSig, digests, keys) - - assert.True(t, valid) - }) -} - -func requireSignedMessage(t *testing.T, signer types.Signer, from, to address.Address, nonce uint64, value types.AttoFIL) *types.SignedMessage { - msg := types.NewMeteredMessage(from, to, nonce, value, builtin.MethodSend, []byte{}, types.NewAttoFILFromFIL(1), 300) - smsg, err := types.NewSignedMessage(context.TODO(), *msg, signer) - require.NoError(t, err) - return smsg -} - -func TestGenerateMultiBlockTipSet(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - ctx := context.Background() - - mockSigner, blockSignerAddr := setupSigner() - st, pool, addrs, bs := sharedSetup(t, mockSigner) - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - rnd := &consensus.FakeChainRandomness{Seed: 0} - minerAddr := addrs[4] - minerOwnerAddr := addrs[3] - messages := chain.NewMessageStore(bs) - - meta := fakeTSMetadata{} - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: minerAddr, - MinerOwnerAddr: minerOwnerAddr, - WorkerSigner: mockSigner, - - TipSetMetadata: meta, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: &consensus.FakeElectionMachine{}, - TicketGen: &consensus.FakeTicketMachine{}, - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - builder := chain.NewBuilder(t, address.Undef) - genesis := builder.NewGenesis() - - parentTipset := builder.AppendManyOn(99, genesis) - baseTipset := builder.AppendOn(parentTipset, 2) - assert.Equal(t, 2, baseTipset.Len()) - - blk, err := worker.Generate(ctx, baseTipset, block.Ticket{VRFProof: []byte{2}}, consensus.MakeFakeVRFProofForTest(), 0, consensus.MakeFakePoStsForTest(), nil) - assert.NoError(t, err) - - txMeta, err := messages.LoadTxMeta(ctx, blk.Header.Messages.Cid) - require.NoError(t, err) - assert.Equal(t, types.EmptyMessagesCID, txMeta.SecpRoot.Cid) - - expectedStateRoot, err := meta.GetTipSetStateRoot(parentTipset.Key()) - require.NoError(t, err) - assert.Equal(t, expectedStateRoot, blk.Header.StateRoot.Cid) - - expectedReceipts, err := meta.GetTipSetReceiptsRoot(parentTipset.Key()) - require.NoError(t, err) - assert.Equal(t, expectedReceipts, blk.Header.MessageReceipts.Cid) - - assert.Equal(t, uint64(101), blk.Header.Height) - assert.Equal(t, fbig.NewInt(120), blk.Header.ParentWeight) - assert.Equal(t, block.Ticket{VRFProof: []byte{2}}, blk.Header.Ticket) -} - -// After calling Generate, do the new block and new state of the message pool conform to our expectations? -func TestGeneratePoolBlockResults(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - ctx := context.Background() - mockSigner, blockSignerAddr := setupSigner() - newCid := types.NewCidForTestGetter() - st, pool, addrs, bs := sharedSetup(t, mockSigner) - - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - rnd := &consensus.FakeChainRandomness{Seed: 0} - messages := chain.NewMessageStore(bs) - - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: addrs[4], - MinerOwnerAddr: addrs[3], - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: &consensus.FakeElectionMachine{}, - TicketGen: &consensus.FakeTicketMachine{}, - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - // addr3 doesn't correspond to an extant account, so this will trigger errAccountNotFound -- a temporary failure. - msg1 := types.NewMeteredMessage(addrs[2], addrs[0], 0, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(1), gas.NewGas(0)) - smsg1, err := types.NewSignedMessage(ctx, *msg1, &mockSigner) - require.NoError(t, err) - - // This is actually okay and should result in a receipt - msg2 := types.NewMeteredMessage(addrs[0], addrs[1], 0, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(1), gas.NewGas(0)) - smsg2, err := types.NewSignedMessage(ctx, *msg2, &mockSigner) - require.NoError(t, err) - - // add the following and then increment the actor nonce at addrs[1], nonceTooLow, a permanent error. - msg3 := types.NewMeteredMessage(addrs[1], addrs[0], 0, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(1), gas.NewGas(0)) - smsg3, err := types.NewSignedMessage(ctx, *msg3, &mockSigner) - require.NoError(t, err) - - msg4 := types.NewMeteredMessage(addrs[1], addrs[2], 1, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(1), gas.NewGas(0)) - smsg4, err := types.NewSignedMessage(ctx, *msg4, &mockSigner) - require.NoError(t, err) - - _, err = pool.Add(ctx, smsg1, 0) - assert.NoError(t, err) - _, err = pool.Add(ctx, smsg2, 0) - assert.NoError(t, err) - _, err = pool.Add(ctx, smsg3, 0) - assert.NoError(t, err) - _, err = pool.Add(ctx, smsg4, 0) - assert.NoError(t, err) - - assert.Len(t, pool.Pending(), 4) - - // Set actor nonce past nonce of message in pool. - // Have to do this here to get a permanent error in the pool. - act, actID := th.RequireLookupActor(ctx, t, st, vm.NewStorage(bs), addrs[1]) - require.NoError(t, err) - - act.CallSeqNum = 2 - err = st.SetActor(ctx, actID, act) - require.NoError(t, err) - - stateRoot, err := st.Commit(ctx) - require.NoError(t, err) - - baseBlock := block.Block{ - Parents: block.NewTipSetKey(newCid()), - Height: 100, - StateRoot: e.NewCid(stateRoot), - } - - blk, err := worker.Generate(ctx, block.RequireNewTipSet(t, &baseBlock), block.Ticket{VRFProof: []byte{0}}, consensus.MakeFakeVRFProofForTest(), 0, consensus.MakeFakePoStsForTest(), nil) - assert.NoError(t, err) - - // This is the temporary failure + the good message, - // which will be removed by the node if this block is accepted. - assert.Len(t, pool.Pending(), 2) - assert.Contains(t, pool.Pending(), smsg1) - assert.Contains(t, pool.Pending(), smsg2) - - // message and receipts can be loaded from message store and have - // length 1. - msgs, _, err := messages.LoadMessages(ctx, blk.Header.Messages.Cid) - require.NoError(t, err) - assert.Len(t, msgs, 1) // This is the good message -} - -func TestGenerateSetsBasicFields(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - ctx := context.Background() - mockSigner, blockSignerAddr := setupSigner() - newCid := types.NewCidForTestGetter() - - st, pool, addrs, bs := sharedSetup(t, mockSigner) - - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - rnd := &consensus.FakeChainRandomness{Seed: 0} - minerAddr := addrs[3] - minerOwnerAddr := addrs[4] - - messages := chain.NewMessageStore(bs) - - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: minerAddr, - MinerOwnerAddr: minerOwnerAddr, - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: &consensus.FakeElectionMachine{}, - TicketGen: &consensus.FakeTicketMachine{}, - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - h := abi.ChainEpoch(100) - w := fbig.NewInt(1000) - baseBlock := block.Block{ - Height: h, - ParentWeight: w, - StateRoot: e.NewCid(newCid()), - } - baseTipSet := block.RequireNewTipSet(t, &baseBlock) - ticket := mining.NthTicket(7) - blk, err := worker.Generate(ctx, baseTipSet, ticket, consensus.MakeFakeVRFProofForTest(), 0, consensus.MakeFakePoStsForTest(), nil) - assert.NoError(t, err) - - assert.Equal(t, h+1, blk.Header.Height) - assert.Equal(t, minerAddr, blk.Header.Miner) - assert.Equal(t, ticket, blk.Header.Ticket) - - blk, err = worker.Generate(ctx, baseTipSet, block.Ticket{VRFProof: []byte{0}}, consensus.MakeFakeVRFProofForTest(), 1, consensus.MakeFakePoStsForTest(), nil) - assert.NoError(t, err) - - assert.Equal(t, h+2, blk.Header.Height) - assert.Equal(t, fbig.Add(w, fbig.NewInt(10.0)), blk.Header.ParentWeight) - assert.Equal(t, minerAddr, blk.Header.Miner) -} - -func TestGenerateWithoutMessages(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - ctx := context.Background() - mockSigner, blockSignerAddr := setupSigner() - newCid := types.NewCidForTestGetter() - - st, pool, addrs, bs := sharedSetup(t, mockSigner) - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - rnd := &consensus.FakeChainRandomness{Seed: 0} - messages := chain.NewMessageStore(bs) - - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: addrs[4], - MinerOwnerAddr: addrs[3], - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: &consensus.FakeElectionMachine{}, - TicketGen: &consensus.FakeTicketMachine{}, - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - assert.Len(t, pool.Pending(), 0) - baseBlock := block.Block{ - Parents: block.NewTipSetKey(newCid()), - Height: 100, - StateRoot: e.NewCid(newCid()), - } - blk, err := worker.Generate(ctx, block.RequireNewTipSet(t, &baseBlock), block.Ticket{VRFProof: []byte{0}}, consensus.MakeFakeVRFProofForTest(), 0, consensus.MakeFakePoStsForTest(), nil) - assert.NoError(t, err) - - assert.Len(t, pool.Pending(), 0) // This is the temporary failure. - txMeta, err := messages.LoadTxMeta(ctx, blk.Header.Messages.Cid) - require.NoError(t, err) - assert.Equal(t, types.EmptyMessagesCID, txMeta.SecpRoot.Cid) - assert.Equal(t, types.EmptyMessagesCID, txMeta.BLSRoot.Cid) -} - -// If something goes wrong while generating a new block, even as late as when flushing it, -// no block should be returned, and the message pool should not be pruned. -func TestGenerateError(t *testing.T) { - tf.UnitTest(t) - t.Skip("using legacy vmcontext") - - ctx := context.Background() - mockSigner, blockSignerAddr := setupSigner() - newCid := types.NewCidForTestGetter() - - st, pool, addrs, bs := sharedSetup(t, mockSigner) - - getStateTree := func(c context.Context, tsKey block.TipSetKey) (state.Tree, error) { - return st, nil - } - rnd := &consensus.FakeChainRandomness{Seed: 0} - messages := chain.NewMessageStore(bs) - worker := mining.NewDefaultWorker(mining.WorkerParameters{ - API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr, rnd), - - MinerAddr: addrs[4], - MinerOwnerAddr: addrs[3], - WorkerSigner: mockSigner, - - TipSetMetadata: fakeTSMetadata{shouldError: true}, - GetStateTree: getStateTree, - GetWeight: getWeightTest, - Election: &consensus.FakeElectionMachine{}, - TicketGen: &consensus.FakeTicketMachine{}, - - MessageSource: pool, - MessageQualifier: &mining.NoMessageQualifier{}, - Blockstore: bs, - MessageStore: messages, - Clock: clock.NewChainClock(100000000, 30*time.Second, 6*time.Second), - }) - - // This is actually okay and should result in a receipt - msg := types.NewMeteredMessage(addrs[0], addrs[1], 0, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(0), gas.Unit(0)) - smsg, err := types.NewSignedMessage(ctx, *msg, &mockSigner) - require.NoError(t, err) - _, err = pool.Add(ctx, smsg, 0) - require.NoError(t, err) - - assert.Len(t, pool.Pending(), 1) - baseBlock := block.Block{ - Parents: block.NewTipSetKey(newCid()), - Height: 100, - StateRoot: e.NewCid(newCid()), - } - baseTipSet := block.RequireNewTipSet(t, &baseBlock) - blk, err := worker.Generate(ctx, baseTipSet, block.Ticket{VRFProof: []byte{0}}, consensus.MakeFakeVRFProofForTest(), 0, consensus.MakeFakePoStsForTest(), nil) - assert.Error(t, err, "boom") - assert.Nil(t, blk.Header) - - assert.Len(t, pool.Pending(), 1) // No messages are removed from the pool. -} - -func getWeightTest(_ context.Context, ts block.TipSet) (fbig.Int, error) { - w, err := ts.ParentWeight() - if err != nil { - return fbig.Zero(), err - } - // consensus.ecV = 10 - return fbig.Add(w, fbig.NewInt(int64(ts.Len()*10))), nil -} - -func setupSigner() (types.MockSigner, address.Address) { - mockSigner, _ := types.NewMockSignersAndKeyInfo(10) - - signerAddr := mockSigner.Addresses[len(mockSigner.Addresses)-1] - return mockSigner, signerAddr -} - -type fakeTSMetadata struct { - shouldError bool -} - -func (tm fakeTSMetadata) GetTipSetStateRoot(key block.TipSetKey) (cid.Cid, error) { - if tm.shouldError { - return cid.Undef, errors.New("test error retrieving state root") - } - return dag.NewRawNode([]byte("state root")).Cid(), nil -} - -func (tm fakeTSMetadata) GetTipSetReceiptsRoot(key block.TipSetKey) (cid.Cid, error) { - return dag.NewRawNode([]byte("receipt root")).Cid(), nil -} diff --git a/internal/pkg/net/address.go b/internal/pkg/net/address.go deleted file mode 100644 index cd4eb1ba3e..0000000000 --- a/internal/pkg/net/address.go +++ /dev/null @@ -1,34 +0,0 @@ -package net - -import ( - "github.com/libp2p/go-libp2p-core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -// PeerAddrsToAddrInfo converts a slice of string peer addresses -// (multiaddr + ipfs peerid) to PeerInfos. -func PeerAddrsToAddrInfo(addrs []string) ([]peer.AddrInfo, error) { - var pis []peer.AddrInfo - for _, addr := range addrs { - a, err := ma.NewMultiaddr(addr) - if err != nil { - return nil, err - } - - pinfo, err := peer.AddrInfoFromP2pAddr(a) - if err != nil { - return nil, err - } - pis = append(pis, *pinfo) - } - return pis, nil -} - -// AddrInfoToPeerIDs converts a slice of AddrInfo to a slice of peerID's. -func AddrInfoToPeerIDs(ai []peer.AddrInfo) []peer.ID { - var pis []peer.ID - for _, a := range ai { - pis = append(pis, a.ID) - } - return pis -} diff --git a/internal/pkg/net/address_test.go b/internal/pkg/net/address_test.go deleted file mode 100644 index 8da8004d01..0000000000 --- a/internal/pkg/net/address_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package net - -import ( - "testing" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/stretchr/testify/assert" -) - -func TestPeerAddrsToPeerInfosSuccess(t *testing.T) { - tf.UnitTest(t) - - addrs := []string{ - "/ip4/127.0.0.1/ipfs/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", - "/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - } - pis, err := PeerAddrsToAddrInfo(addrs) - assert.NoError(t, err) - assert.Equal(t, "QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", pis[0].ID.Pretty()) - assert.Equal(t, "QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", pis[1].ID.Pretty()) -} - -func TestPeerAddrsToPeerInfosFailure(t *testing.T) { - tf.UnitTest(t) - - addrs := []string{ - "/ipv4/no/such/address/ipfs/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", - } - _, err := PeerAddrsToAddrInfo(addrs) - assert.Error(t, err) -} diff --git a/internal/pkg/net/blocksub/topic.go b/internal/pkg/net/blocksub/topic.go deleted file mode 100644 index 1bc4e6a464..0000000000 --- a/internal/pkg/net/blocksub/topic.go +++ /dev/null @@ -1,49 +0,0 @@ -package blocksub - -import ( - "fmt" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// BlockTopic returns the network pubsub topic identifier on which new blocks are announced. -func Topic(networkName string) string { - return fmt.Sprintf("/fil/blocks/%s", networkName) -} - -type Payload struct { - _ struct{} `cbor:",toarray"` - Header block.Block - BLSMsgCids []e.Cid - SECPMsgCids []e.Cid -} - -func MakePayload(header *block.Block, BLSMessages, SECPMessages []*types.SignedMessage) ([]byte, error) { - blsCIDs := make([]e.Cid, len(BLSMessages)) - for i, m := range BLSMessages { - c, err := m.Message.Cid() // CID of the unsigned message - if err != nil { - return nil, errors.Wrapf(err, "failed to create blocksub payload for BLS msg %s", m) - } - blsCIDs[i] = e.NewCid(c) - } - secpCIDs := make([]e.Cid, len(SECPMessages)) - for i, m := range SECPMessages { - c, err := m.Cid() // CID of the signed message - if err != nil { - return nil, errors.Wrapf(err, "failed to create blocksub payload for SECP msg %s", m) - } - secpCIDs[i] = e.NewCid(c) - } - payload := Payload{ - Header: *header, - BLSMsgCids: blsCIDs, - SECPMsgCids: secpCIDs, - } - return encoding.Encode(payload) -} diff --git a/internal/pkg/net/blocksub/validator.go b/internal/pkg/net/blocksub/validator.go deleted file mode 100644 index b53472c3e4..0000000000 --- a/internal/pkg/net/blocksub/validator.go +++ /dev/null @@ -1,62 +0,0 @@ -package blocksub - -import ( - "context" - - "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-pubsub" - - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" -) - -var blockTopicLogger = log.Logger("net/block_validator") -var mDecodeBlkFail = metrics.NewInt64Counter("net/pubsub_block_decode_failure", "Number of blocks that fail to decode seen on block pubsub channel") -var mInvalidBlk = metrics.NewInt64Counter("net/pubsub_invalid_block", "Number of blocks that fail syntax validation seen on block pubsub channel") - -// BlockTopicValidator may be registered on go-libp2p-pubsub to validate blocksub messages. -type BlockTopicValidator struct { - validator pubsub.Validator - opts []pubsub.ValidatorOpt -} - -// NewBlockTopicValidator retruns a BlockTopicValidator using `bv` for message validation -func NewBlockTopicValidator(bv consensus.BlockSyntaxValidator, opts ...pubsub.ValidatorOpt) *BlockTopicValidator { - return &BlockTopicValidator{ - opts: opts, - validator: func(ctx context.Context, p peer.ID, msg *pubsub.Message) bool { - var payload Payload - err := encoding.Decode(msg.GetData(), &payload) - if err != nil { - blockTopicLogger.Debugf("failed to decode blocksub payload from peer %s: %s", p.String(), err.Error()) - mDecodeBlkFail.Inc(ctx, 1) - return false - } - if err := bv.ValidateSyntax(ctx, &payload.Header); err != nil { - blockTopicLogger.Debugf("failed to validate block %s from peer %s: %s", payload.Header.Cid().String(), p.String(), err.Error()) - mInvalidBlk.Inc(ctx, 1) - return false - } - // Note: there is no validation here that the BLS and SECP message CIDs included in the payload - // produce the AMT roots referenced in the block header. - // At present, those lists are ignored by chain validation anyway. - // Such a check happens later in block semantic validation, but it would probably be a good idea to do - // it here too. https://github.com/filecoin-project/go-filecoin/issues/3903 - return true - }, - } -} - -func (btv *BlockTopicValidator) Topic(network string) string { - return Topic(network) -} - -func (btv *BlockTopicValidator) Validator() pubsub.Validator { - return btv.validator -} - -func (btv *BlockTopicValidator) Opts() []pubsub.ValidatorOpt { - return btv.opts -} diff --git a/internal/pkg/net/blocksub/validator_test.go b/internal/pkg/net/blocksub/validator_test.go deleted file mode 100644 index 7a4ac32111..0000000000 --- a/internal/pkg/net/blocksub/validator_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package blocksub_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - pubsub "github.com/libp2p/go-libp2p-pubsub" - pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/net/blocksub" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" -) - -func TestBlockTopicValidator(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - mbv := th.NewStubBlockValidator() - tv := blocksub.NewBlockTopicValidator(mbv, nil) - builder := chain.NewBuilder(t, address.Undef) - pid1 := th.RequireIntPeerID(t, 1) - - goodBlk := builder.BuildOnBlock(nil, func(b *chain.BlockBuilder) {}) - badBlk := builder.BuildOnBlock(nil, func(b *chain.BlockBuilder) { - b.IncHeight(1) - }) - - mbv.StubSyntaxValidationForBlock(badBlk, fmt.Errorf("invalid block")) - - validator := tv.Validator() - - network := "gfctest" - assert.Equal(t, blocksub.Topic(network), tv.Topic(network)) - assert.True(t, validator(ctx, pid1, blkToPubSub(t, goodBlk))) - assert.False(t, validator(ctx, pid1, blkToPubSub(t, badBlk))) - assert.False(t, validator(ctx, pid1, nonBlkPubSubMsg())) -} - -func TestBlockPubSubValidation(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - - // setup a mock network and generate a host - mn := mocknet.New(ctx) - host1, err := mn.GenPeer() - require.NoError(t, err) - - // create a fake clock to trigger block validation failures - now := time.Unix(1234567890, 0) - mclock := clock.NewFake(now) - // block time will be 1 second - blocktime := time.Second * 1 - propDelay := 200 * time.Millisecond - - // setup a block validator and a topic validator - chainClock := clock.NewChainClockFromClock(uint64(now.Unix()), blocktime, propDelay, mclock) - bv := consensus.NewDefaultBlockValidator(chainClock, nil, nil) - btv := blocksub.NewBlockTopicValidator(bv) - - // setup a floodsub instance on the host and register the topic validator - network := "gfctest" - fsub1, err := pubsub.NewFloodSub(ctx, host1, pubsub.WithMessageSigning(false)) - require.NoError(t, err) - err = fsub1.RegisterTopicValidator(btv.Topic(network), btv.Validator(), btv.Opts()...) - require.NoError(t, err) - - // subscribe to the block validator topic - top1, err := fsub1.Join(btv.Topic(network)) - require.NoError(t, err) - sub1, err := top1.Subscribe() - require.NoError(t, err) - - // generate a miner address for blocks - miner := vmaddr.NewForTestGetter()() - - mclock.Advance(blocktime) // enter epoch 1 - - // create an invalid block - invalidBlk := &block.Block{ - Height: 1, - Timestamp: uint64(now.Add(time.Second * 60).Unix()), // invalid timestamp, 60 seconds in future - StateRoot: e.NewCid(types.NewCidForTestGetter()()), - Miner: miner, - Ticket: block.Ticket{VRFProof: []byte{0}}, - BlockSig: &crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte{}}, - BLSAggregateSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte{}}, - } - // publish the invalid block - payload := blocksub.Payload{ - Header: *invalidBlk, - BLSMsgCids: nil, - SECPMsgCids: nil, - } - payloadBytes, err := encoding.Encode(payload) - require.NoError(t, err) - err = top1.Publish(ctx, payloadBytes) - assert.NoError(t, err) - - // see FIXME below (#3285) - time.Sleep(time.Millisecond * 100) - - // create a valid block - validTime := chainClock.StartTimeOfEpoch(abi.ChainEpoch(1)) - validBlk := &block.Block{ - Height: 1, - Timestamp: uint64(validTime.Unix()), - StateRoot: e.NewCid(types.NewCidForTestGetter()()), - Miner: miner, - Ticket: block.Ticket{VRFProof: []byte{0}}, - BlockSig: &crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte{}}, - BLSAggregateSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte{}}, - } - // publish the invalid block - payload = blocksub.Payload{ - Header: *validBlk, - BLSMsgCids: nil, - SECPMsgCids: nil, - } - payloadBytes, err = encoding.Encode(payload) - require.NoError(t, err) - err = top1.Publish(ctx, payloadBytes) - assert.NoError(t, err) - - // FIXME: #3285 - // Floodsub makes no guarantees on the order of messages, this means the block we - // get here is nondeterministic. For now we do our best to let the invalid block propagate first - // by sleeping (*wince*), but it could be the case that the valid block arrives first - meaning this - // test could pass incorrectly since we don't know if the invalid block is in the channel and we - // have no easy way of checking since Next blocks if the channel is empty. A solution here - // could be to create a metrics registry in the block validator code and assert that it has seen - // one invalid block and one valid block. - // If this test ever flakes we know there is an issue with libp2p since the block validator has - // a test and sine TestBlockTopicValidator tests the plumbing of this code. - // This test should be reimplemented by starting an in-process node using something like GenNode - // refer to #3285 for details. - received, err := sub1.Next(ctx) - assert.NoError(t, err, "Receieved an invalid block over pubsub, seee issue #3285 for help debugging") - - // decode the block from pubsub - var receivedPayload blocksub.Payload - err = encoding.Decode(received.GetData(), &receivedPayload) - require.NoError(t, err) - - // assert this block is the valid one - assert.Equal(t, validBlk.Cid().String(), receivedPayload.Header.Cid().String()) -} - -// convert a types.Block to a pubsub message -func blkToPubSub(t *testing.T, blk *block.Block) *pubsub.Message { - payload := blocksub.Payload{ - Header: *blk, - BLSMsgCids: nil, - SECPMsgCids: nil, - } - data, err := encoding.Encode(&payload) - require.NoError(t, err) - return &pubsub.Message{ - Message: &pubsubpb.Message{ - Data: data, - }, - } -} - -// returns a pubsub message that will not decode to a types.Block -func nonBlkPubSubMsg() *pubsub.Message { - pbm := &pubsubpb.Message{ - Data: []byte("meow"), - } - return &pubsub.Message{ - Message: pbm, - } -} diff --git a/internal/pkg/net/msgsub/topic.go b/internal/pkg/net/msgsub/topic.go deleted file mode 100644 index 634258f877..0000000000 --- a/internal/pkg/net/msgsub/topic.go +++ /dev/null @@ -1,9 +0,0 @@ -package msgsub - -import "fmt" - -// MessageTopic returns the network pubsub topic identifier on which new messages are announced. -// The message payload is just a SignedMessage. -func Topic(networkName string) string { - return fmt.Sprintf("/fil/msgs/%s", networkName) -} diff --git a/internal/pkg/net/msgsub/validator.go b/internal/pkg/net/msgsub/validator.go deleted file mode 100644 index 2e34307eb6..0000000000 --- a/internal/pkg/net/msgsub/validator.go +++ /dev/null @@ -1,64 +0,0 @@ -package msgsub - -import ( - "context" - - "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-pubsub" - - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -var messageTopicLogger = log.Logger("net/message_validator") -var mDecodeMsgFail = metrics.NewInt64Counter("net/pubsub_message_decode_failure", "Number of messages that fail to decode seen on message pubsub channel") -var mInvalidMsg = metrics.NewInt64Counter("net/pubsub_invalid_message", "Number of messages that fail syntax validation seen on message pubsub channel") - -// MessageTopicValidator may be registered on go-libp3p-pubsub to validate msgsub payloads. -type MessageTopicValidator struct { - validator pubsub.Validator - opts []pubsub.ValidatorOpt -} - -// NewMessageTopicValidator returns a MessageTopicValidator using the input -// signature and syntax validators. -func NewMessageTopicValidator(syntaxVal consensus.MessageSyntaxValidator, sigVal *consensus.MessageSignatureValidator, opts ...pubsub.ValidatorOpt) *MessageTopicValidator { - return &MessageTopicValidator{ - opts: opts, - validator: func(ctx context.Context, p peer.ID, msg *pubsub.Message) bool { - unmarshaled := &types.SignedMessage{} - if err := unmarshaled.Unmarshal(msg.GetData()); err != nil { - messageTopicLogger.Debugf("message from peer: %s failed to decode: %s", p.String(), err.Error()) - mDecodeMsgFail.Inc(ctx, 1) - return false - } - if err := syntaxVal.ValidateSignedMessageSyntax(ctx, unmarshaled); err != nil { - mCid, _ := unmarshaled.Cid() - messageTopicLogger.Debugf("message %s from peer: %s failed to syntax validate: %s", mCid.String(), p.String(), err.Error()) - mInvalidMsg.Inc(ctx, 1) - return false - } - if err := sigVal.Validate(ctx, unmarshaled); err != nil { - mCid, _ := unmarshaled.Cid() - messageTopicLogger.Debugf("message %s from peer: %s failed to signature validate: %s", mCid.String(), p.String(), err.Error()) - mInvalidMsg.Inc(ctx, 1) - return false - } - return true - }, - } -} - -func (mtv *MessageTopicValidator) Topic(network string) string { - return Topic(network) -} - -func (mtv *MessageTopicValidator) Validator() pubsub.Validator { - return mtv.validator -} - -func (mtv *MessageTopicValidator) Opts() []pubsub.ValidatorOpt { - return mtv.opts -} diff --git a/internal/pkg/net/network.go b/internal/pkg/net/network.go deleted file mode 100644 index 05f4469c83..0000000000 --- a/internal/pkg/net/network.go +++ /dev/null @@ -1,183 +0,0 @@ -package net - -import ( - "context" - "fmt" - "sort" - "sync" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/peer" - swarm "github.com/libp2p/go-libp2p-swarm" - ma "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" -) - -// SwarmConnInfo represents details about a single swarm connection. -type SwarmConnInfo struct { - Addr string - Peer string - Latency string - Muxer string - Streams []SwarmStreamInfo -} - -// SwarmStreamInfo represents details about a single swarm stream. -type SwarmStreamInfo struct { - Protocol string -} - -func (ci *SwarmConnInfo) Less(i, j int) bool { - return ci.Streams[i].Protocol < ci.Streams[j].Protocol -} - -func (ci *SwarmConnInfo) Len() int { - return len(ci.Streams) -} - -func (ci *SwarmConnInfo) Swap(i, j int) { - ci.Streams[i], ci.Streams[j] = ci.Streams[j], ci.Streams[i] -} - -// SwarmConnInfos represent details about a list of swarm connections. -type SwarmConnInfos struct { - Peers []SwarmConnInfo -} - -func (ci SwarmConnInfos) Less(i, j int) bool { - return ci.Peers[i].Addr < ci.Peers[j].Addr -} - -func (ci SwarmConnInfos) Len() int { - return len(ci.Peers) -} - -func (ci SwarmConnInfos) Swap(i, j int) { - ci.Peers[i], ci.Peers[j] = ci.Peers[j], ci.Peers[i] -} - -// Network is a unified interface for dealing with libp2p -type Network struct { - host host.Host - metrics.Reporter - *Router - *Pinger -} - -// New returns a new Network -func New( - host host.Host, - router *Router, - reporter metrics.Reporter, - pinger *Pinger, -) *Network { - return &Network{ - host: host, - Pinger: pinger, - Reporter: reporter, - Router: router, - } -} - -// GetPeerAddresses gets the current addresses of the node -func (network *Network) GetPeerAddresses() []ma.Multiaddr { - return network.host.Addrs() -} - -// GetPeerID gets the current peer id from libp2p-host -func (network *Network) GetPeerID() peer.ID { - return network.host.ID() -} - -// GetBandwidthStats gets stats on the current bandwidth usage of the network -func (network *Network) GetBandwidthStats() metrics.Stats { - return network.Reporter.GetBandwidthTotals() -} - -// ConnectionResult represents the result of an attempted connection from the -// Connect method. -type ConnectionResult struct { - PeerID peer.ID - Err error -} - -// Connect connects to peers at the given addresses. Does not retry. -func (network *Network) Connect(ctx context.Context, addrs []string) (<-chan ConnectionResult, error) { - outCh := make(chan ConnectionResult) - - swrm, ok := network.host.Network().(*swarm.Swarm) - if !ok { - return nil, fmt.Errorf("peerhost network was not a swarm") - } - - pis, err := PeerAddrsToAddrInfo(addrs) - if err != nil { - return nil, err - } - - go func() { - var wg sync.WaitGroup - wg.Add(len(pis)) - - for _, pi := range pis { - go func(pi peer.AddrInfo) { - swrm.Backoff().Clear(pi.ID) - err := network.host.Connect(ctx, pi) - outCh <- ConnectionResult{ - PeerID: pi.ID, - Err: err, - } - wg.Done() - }(pi) - } - - wg.Wait() - close(outCh) - }() - - return outCh, nil -} - -// Peers lists peers currently available on the network -func (network *Network) Peers(ctx context.Context, verbose, latency, streams bool) (*SwarmConnInfos, error) { - if network.host == nil { - return nil, errors.New("node must be online") - } - - conns := network.host.Network().Conns() - - out := SwarmConnInfos{ - Peers: []SwarmConnInfo{}, - } - for _, c := range conns { - pid := c.RemotePeer() - addr := c.RemoteMultiaddr() - - ci := SwarmConnInfo{ - Addr: addr.String(), - Peer: pid.Pretty(), - } - - if verbose || latency { - lat := network.host.Peerstore().LatencyEWMA(pid) - if lat == 0 { - ci.Latency = "n/a" - } else { - ci.Latency = lat.String() - } - } - if verbose || streams { - strs := c.GetStreams() - - for _, s := range strs { - ci.Streams = append(ci.Streams, SwarmStreamInfo{Protocol: string(s.Protocol())}) - } - } - sort.Sort(&ci) - out.Peers = append(out.Peers, ci) - } - - sort.Sort(&out) - return &out, nil -} diff --git a/internal/pkg/net/pinger.go b/internal/pkg/net/pinger.go deleted file mode 100644 index d21f93d032..0000000000 --- a/internal/pkg/net/pinger.go +++ /dev/null @@ -1,37 +0,0 @@ -package net - -import ( - "context" - "errors" - - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" -) - -// ErrPingSelf is returned if the pinger is instructed to ping itself. -var ErrPingSelf = errors.New("cannot ping self") - -// Pinger wraps a libp2p ping service. It exists to serve more helpful -// error messages in the case a node is pinging itself. -type Pinger struct { - *ping.PingService - self host.Host -} - -// NewPinger creates a filecoin pinger provided with a pingService and a PID. -func NewPinger(h host.Host, p *ping.PingService) *Pinger { - return &Pinger{ - PingService: p, - self: h, - } -} - -// Ping connects to other nodes on the network to test connections. The -// Pinger will error if the caller Pings the Pinger's self id. -func (p *Pinger) Ping(ctx context.Context, pid peer.ID) (<-chan ping.Result, error) { - if pid == p.self.ID() { - return nil, ErrPingSelf - } - return p.PingService.Ping(ctx, pid), nil -} diff --git a/internal/pkg/net/protocols.go b/internal/pkg/net/protocols.go deleted file mode 100644 index 620ba42a37..0000000000 --- a/internal/pkg/net/protocols.go +++ /dev/null @@ -1,12 +0,0 @@ -package net - -import ( - "fmt" - - "github.com/libp2p/go-libp2p-core/protocol" -) - -// FilecoinDHT is creates a protocol for the filecoin DHT. -func FilecoinDHT(network string) protocol.ID { - return protocol.ID(fmt.Sprintf("/fil/kad/%s", network)) -} diff --git a/internal/pkg/net/pubsub/testing.go b/internal/pkg/net/pubsub/testing.go deleted file mode 100644 index 3f00fd0b04..0000000000 --- a/internal/pkg/net/pubsub/testing.go +++ /dev/null @@ -1,100 +0,0 @@ -package pubsub - -import ( - "context" - "sync" - - "github.com/libp2p/go-libp2p-core/peer" -) - -// FakeMessage is a simple pubsub message -type FakeMessage struct { - peerID peer.ID - data []byte -} - -// GetFrom returns the message's sender ID -func (m *FakeMessage) GetFrom() peer.ID { - return m.peerID -} - -// GetData returns the message's payload -func (m *FakeMessage) GetData() []byte { - return m.data -} - -// FakeSubscription is a fake pubsub subscription. -type FakeSubscription struct { - topic string - pending chan Message - err error - cancelled bool - awaitCancel sync.WaitGroup -} - -// NewFakeSubscription builds a new fake subscription to a topic. -func NewFakeSubscription(topic string, bufSize int) *FakeSubscription { - sub := &FakeSubscription{ - topic: topic, - pending: make(chan Message, bufSize), - awaitCancel: sync.WaitGroup{}, - } - sub.awaitCancel.Add(1) - return sub -} - -// Subscription interface - -// Topic returns this subscription's topic. -func (s *FakeSubscription) Topic() string { - return s.topic -} - -// Next returns the next messages from this subscription. -func (s *FakeSubscription) Next(ctx context.Context) (Message, error) { - if s.err != nil { - return nil, s.err - } - select { - case msg := <-s.pending: - return msg, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -// Cancel cancels this subscription, after which no subsequently posted messages will be received. -func (s *FakeSubscription) Cancel() { - if s.cancelled { - panic("subscription already cancelled") - } - s.cancelled = true - s.awaitCancel.Done() -} - -// Manipulators - -// Post posts a new message to this subscription. -func (s *FakeSubscription) Post(msg Message) { - if s.err != nil { - panic("subscription has failed") - } - if !s.cancelled { - s.pending <- msg - } -} - -// Fail causes subsequent reads from this subscription to fail. -func (s *FakeSubscription) Fail(err error) { - if err != nil { - panic("error is nil") - } - if !s.cancelled { - s.err = err - } -} - -// AwaitCancellation waits for the subscription to be canceled by the subscriber. -func (s *FakeSubscription) AwaitCancellation() { - s.awaitCancel.Wait() -} diff --git a/internal/pkg/net/pubsub/topic.go b/internal/pkg/net/pubsub/topic.go deleted file mode 100644 index eb954e2f92..0000000000 --- a/internal/pkg/net/pubsub/topic.go +++ /dev/null @@ -1,82 +0,0 @@ -package pubsub - -import ( - "context" - - "github.com/libp2p/go-libp2p-core/peer" - libp2p "github.com/libp2p/go-libp2p-pubsub" -) - -// Topic publishes and subscribes to a libp2p pubsub topic -type Topic struct { - pubsubTopic *libp2p.Topic -} - -// Message defines the common interface for go-filecoin message consumers. -// It's a subset of the go-libp2p-pubsub/pubsub.go Message type. -type Message interface { - GetSource() peer.ID - GetSender() peer.ID - GetData() []byte -} - -type message struct { - inner *libp2p.Message -} - -// Subscription is a handle to a pubsub subscription. -// This matches part of the interface to a libp2p.pubsub.Subscription. -type Subscription interface { - // Topic returns this subscription's topic name - Topic() string - // Next returns the next message from this subscription - Next(ctx context.Context) (Message, error) - // Cancel cancels this subscription - Cancel() -} - -// NewTopic builds a new topic. -func NewTopic(topic *libp2p.Topic) *Topic { - return &Topic{pubsubTopic: topic} -} - -// Subscribe subscribes to a pubsub topic -func (t *Topic) Subscribe() (Subscription, error) { - sub, err := t.pubsubTopic.Subscribe() - return &subscriptionWrapper{sub}, err -} - -// Publish publishes to a pubsub topic. It blocks until there is at least one -// peer on the mesh that can receive the publish. -func (t *Topic) Publish(ctx context.Context, data []byte) error { - // return t.pubsubTopic.Publish(ctx, data) - return t.pubsubTopic.Publish(ctx, data, libp2p.WithReadiness(libp2p.MinTopicSize(1))) -} - -// subscriptionWrapper extends a pubsub.Subscription in order to wrap the Message type. -type subscriptionWrapper struct { - *libp2p.Subscription -} - -// Next wraps pubsub.Subscription.Next, implicitly adapting *pubsub.Message to the Message interface. -func (w subscriptionWrapper) Next(ctx context.Context) (Message, error) { - msg, err := w.Subscription.Next(ctx) - if err != nil { - return nil, err - } - return message{ - inner: msg, - }, nil -} - -func (m message) GetSender() peer.ID { - return m.inner.ReceivedFrom -} - -func (m message) GetSource() peer.ID { - return m.inner.GetFrom() -} - -func (m message) GetData() []byte { - return m.inner.GetData() -} diff --git a/internal/pkg/piecemanager/fsm_back_end.go b/internal/pkg/piecemanager/fsm_back_end.go deleted file mode 100644 index ecc4e1a7c7..0000000000 --- a/internal/pkg/piecemanager/fsm_back_end.go +++ /dev/null @@ -1,76 +0,0 @@ -package piecemanager - -import ( - "context" - "io" - - "github.com/pkg/errors" - - "github.com/filecoin-project/specs-actors/actors/abi" - fsm "github.com/filecoin-project/storage-fsm" -) - -var _ PieceManager = new(FiniteStateMachineBackEnd) - -type FiniteStateMachineBackEnd struct { - idc fsm.SectorIDCounter - fsm *fsm.Sealing -} - -func NewFiniteStateMachineBackEnd(fsm *fsm.Sealing, idc fsm.SectorIDCounter) FiniteStateMachineBackEnd { - return FiniteStateMachineBackEnd{ - idc: idc, - fsm: fsm, - } -} - -func (f *FiniteStateMachineBackEnd) SealPieceIntoNewSector(ctx context.Context, dealID abi.DealID, dealStart, dealEnd abi.ChainEpoch, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) error { - sectorNumber, err := f.idc.Next() - if err != nil { - return err - } - - return f.fsm.SealPiece(ctx, pieceSize, pieceReader, sectorNumber, fsm.DealInfo{ - DealID: dealID, - DealSchedule: fsm.DealSchedule{ - StartEpoch: dealStart, - EndEpoch: dealEnd, - }, - }) -} - -func (f *FiniteStateMachineBackEnd) PledgeSector(ctx context.Context) error { - return f.fsm.PledgeSector() -} - -func (f *FiniteStateMachineBackEnd) UnsealSector(ctx context.Context, sectorID uint64) (io.ReadCloser, error) { - panic("implement me") -} - -func (f *FiniteStateMachineBackEnd) LocatePieceForDealWithinSector(ctx context.Context, dealID uint64) (sectorID uint64, offset uint64, length uint64, err error) { - sectors, err := f.fsm.ListSectors() - if err != nil { - return 0, 0, 0, errors.Wrap(err, "failed to list sectors") - } - - isEncoded := func(s fsm.SectorState) bool { - return fsm.PreCommit2 <= s && s <= fsm.Proving - } - - for _, sector := range sectors { - offset := uint64(0) - for _, piece := range sector.Pieces { - if piece.DealInfo.DealID == abi.DealID(dealID) { - if !isEncoded(sector.State) { - return 0, 0, 0, errors.Errorf("no encoded replica exists corresponding to deal id: %d", dealID) - } - - return uint64(sector.SectorNumber), offset, uint64(piece.Piece.Size.Unpadded()), nil - } - - offset += uint64(piece.Piece.Size.Unpadded()) - } - } - - return 0, 0, 0, errors.Errorf("no encoded piece could be found corresponding to deal id: %d", dealID) -} diff --git a/internal/pkg/piecemanager/interface.go b/internal/pkg/piecemanager/interface.go deleted file mode 100644 index ac7c38a5b0..0000000000 --- a/internal/pkg/piecemanager/interface.go +++ /dev/null @@ -1,37 +0,0 @@ -package piecemanager - -import ( - "context" - "io" - - "github.com/filecoin-project/specs-actors/actors/abi" -) - -// PieceManager is responsible for sealing pieces into sectors and progressing -// the sector through its lifecycle, including coordinating with the node for -// purposes of creating pre-commit and commit messages. -type PieceManager interface { - // SealPieceIntoNewSector writes the provided piece into a sector and fills - // the remaining space in the sector with self deal-data. The now-filled - // sector is encoded and, when the required bits of chain randomness are - // available, committed to the network. This method is fire-and-forget; any - // errors encountered during the pre-commit or commit flows (including - // message creation) are recorded in StorageMining metadata but not exposed - // through this API. - SealPieceIntoNewSector(ctx context.Context, dealID abi.DealID, dealStart, dealEnd abi.ChainEpoch, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) error - - // PledgeSector behaves similarly to SealPieceIntoNewSector, but differs in - // that it does not require a deal having been made on-chain beforehand. It - // provisions a new sector, fills it with self-deal junk, and seals. - PledgeSector(ctx context.Context) error - - // UnsealSector produces a reader to the unsealed bytes associated with the - // provided sector id, or an error if no such sealed sector exists. The - // bytes produced by the Reader will not include any bit-padding. - UnsealSector(ctx context.Context, sectorID uint64) (io.ReadCloser, error) - - // LocatePieceForDealWithinSector produces information about the location of - // a deal's piece within a sealed sector, or an error if that piece does not - // exist within any sealed sectors. - LocatePieceForDealWithinSector(ctx context.Context, dealID uint64) (sectorID uint64, offset uint64, length uint64, err error) -} diff --git a/internal/pkg/poster/poster.go b/internal/pkg/poster/poster.go deleted file mode 100644 index 6425a5dab9..0000000000 --- a/internal/pkg/poster/poster.go +++ /dev/null @@ -1,240 +0,0 @@ -package poster - -import ( - "bytes" - "context" - "sync" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-address" - sectorstorage "github.com/filecoin-project/sector-storage" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/message" - appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/specs-actors/actors/abi" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" -) - -var log = logging.Logger("poster") - -// Poster listens for changes to the chain head and generates and submits a PoSt if one is required. -type Poster struct { - postMutex sync.Mutex - postCancel context.CancelFunc - scheduleCancel context.CancelFunc - challenge abi.Randomness - - minerAddr address.Address - outbox *message.Outbox - mgr sectorstorage.SectorManager - chain *cst.ChainStateReadWriter - stateViewer *appstate.Viewer - waiter *msg.Waiter -} - -// NewPoster creates a Poster struct -func NewPoster( - minerAddr address.Address, - outbox *message.Outbox, - mgr sectorstorage.SectorManager, - chain *cst.ChainStateReadWriter, - stateViewer *appstate.Viewer, - waiter *msg.Waiter) *Poster { - - return &Poster{ - minerAddr: minerAddr, - outbox: outbox, - mgr: mgr, - chain: chain, - stateViewer: stateViewer, - waiter: waiter, - challenge: abi.Randomness{}, - } -} - -// HandleNewHead submits a new chain head for possible fallback PoSt. -func (p *Poster) HandleNewHead(ctx context.Context, newHead block.TipSet) error { - return p.startPoStIfNeeded(ctx, newHead) -} - -// StopPoSting stops the posting scheduler if running and any outstanding PoSts. -func (p *Poster) StopPoSting() { - p.postMutex.Lock() - defer p.postMutex.Unlock() - - if p.scheduleCancel != nil { - p.postCancel() - - p.scheduleCancel() - p.scheduleCancel = nil - } -} - -func (p *Poster) startPoStIfNeeded(ctx context.Context, newHead block.TipSet) error { - p.postMutex.Lock() - defer p.postMutex.Unlock() - - if p.postCancel != nil { - // already posting - return nil - } - - tipsetHeight, err := newHead.Height() - if err != nil { - return err - } - - root, err := p.chain.GetTipSetStateRoot(ctx, newHead.Key()) - if err != nil { - return err - } - - stateView := p.stateViewer.StateView(root) - index, open, _, challengeAt, err := stateView.MinerDeadlineInfo(ctx, p.minerAddr, tipsetHeight) - if err != nil { - return err - } - - // exit if we haven't yet hit the deadline - if tipsetHeight < open { - return nil - } - - randomness, err := p.getChallenge(ctx, newHead.Key(), challengeAt) - if err != nil { - return err - } - - // If we have not already seen this randomness, either the deadline has changed - // or the chain as reorged to a point prior to the challenge. Either way, - // it is time to start a new PoSt. - if bytes.Equal(p.challenge, randomness) { - return nil - } - p.challenge = randomness - - // stop existing PoSt, if one exists - p.cancelPoSt() - - ctx, p.postCancel = context.WithCancel(ctx) - go p.doPoSt(ctx, stateView, index) - - return nil -} - -func (p *Poster) doPoSt(ctx context.Context, stateView *appstate.View, deadlineIndex uint64) { - defer p.safeCancelPoSt() - - minerID, err := address.IDFromAddress(p.minerAddr) - if err != nil { - log.Errorf("Error retrieving miner ID from address %s: %s", p.minerAddr, err) - return - } - - partitions, err := stateView.MinerPartitionIndicesForDeadline(ctx, p.minerAddr, deadlineIndex) - if err != nil { - log.Errorf("Error retrieving partitions for address %s at index %d: %s", p.minerAddr, deadlineIndex, err) - return - } - - // if no partitions, we're done - if len(partitions) == 0 { - return - } - - // Some day we might want to choose a subset of partitions to prove at one time. Today is not that day. - sectors, err := stateView.MinerSectorInfoForDeadline(ctx, p.minerAddr, deadlineIndex, partitions) - if err != nil { - log.Errorf("error retrieving sector info for miner %s partitions at index %d: %s", p.minerAddr, deadlineIndex, err) - return - } - - proofs, err := p.mgr.GenerateWindowPoSt(ctx, abi.ActorID(minerID), sectors, abi.PoStRandomness(p.challenge)) - if err != nil { - log.Errorf("error generating window PoSt: %s", err) - return - } - - _, workerAddr, err := stateView.MinerControlAddresses(ctx, p.minerAddr) - if err != nil { - log.Errorf("could not get miner worker address fro miner %s: %s", p.minerAddr, err) - return - } - - err = p.sendPoSt(ctx, workerAddr, deadlineIndex, partitions, proofs) - if err != nil { - log.Error("error sending window PoSt: ", err) - return - } -} - -func (p *Poster) sendPoSt(ctx context.Context, workerAddr address.Address, index uint64, partitions []uint64, proofs []abi.PoStProof) error { - - windowedPost := &miner.SubmitWindowedPoStParams{ - Deadline: index, - Partitions: partitions, - Proofs: proofs, - Skipped: abi.BitField{}, - } - - mcid, errCh, err := p.outbox.Send( - ctx, - workerAddr, - p.minerAddr, - types.ZeroAttoFIL, - types.NewGasPrice(1), - gas.NewGas(10000), - true, - builtin.MethodsMiner.SubmitWindowedPoSt, - windowedPost, - ) - if err != nil { - return err - } - if err := <-errCh; err != nil { - return err - } - - // wait until we see the post on chain at least once - err = p.waiter.Wait(ctx, mcid, msg.DefaultMessageWaitLookback, func(_ *block.Block, _ *types.SignedMessage, recp *vm.MessageReceipt) error { - return nil - }) - if err != nil { - return err - } - - return nil -} - -func (p *Poster) getChallenge(ctx context.Context, head block.TipSetKey, at abi.ChainEpoch) (abi.Randomness, error) { - buf := new(bytes.Buffer) - err := p.minerAddr.MarshalCBOR(buf) - if err != nil { - return nil, err - } - - return p.chain.SampleChainRandomness(ctx, head, acrypto.DomainSeparationTag_WindowedPoStChallengeSeed, at, buf.Bytes()) -} - -func (p *Poster) safeCancelPoSt() { - p.postMutex.Lock() - defer p.postMutex.Unlock() - - p.cancelPoSt() -} - -func (p *Poster) cancelPoSt() { - if p.postCancel != nil { - p.postCancel() - p.postCancel = nil - } -} diff --git a/internal/pkg/postgenerator/interface.go b/internal/pkg/postgenerator/interface.go deleted file mode 100644 index 5f6ed58ab1..0000000000 --- a/internal/pkg/postgenerator/interface.go +++ /dev/null @@ -1,12 +0,0 @@ -package postgenerator - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" -) - -// PoStGenerator defines a method set used to generate PoSts -type PoStGenerator interface { - GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) -} diff --git a/internal/pkg/proofs/testing.go b/internal/pkg/proofs/testing.go deleted file mode 100644 index 407291f78d..0000000000 --- a/internal/pkg/proofs/testing.go +++ /dev/null @@ -1,30 +0,0 @@ -package proofs - -import ( - "context" - - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" -) - -// FakeVerifier is a simple mock Verifier for testing. -type FakeVerifier struct { -} - -var _ ffiwrapper.Verifier = (*FakeVerifier)(nil) - -func (f *FakeVerifier) VerifySeal(abi.SealVerifyInfo) (bool, error) { - return true, nil -} - -func (f *FakeVerifier) VerifyWinningPoSt(context.Context, abi.WinningPoStVerifyInfo) (bool, error) { - return true, nil -} - -func (f *FakeVerifier) VerifyWindowPoSt(context.Context, abi.WindowPoStVerifyInfo) (bool, error) { - return true, nil -} - -func (f *FakeVerifier) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) { - return []uint64{}, nil -} diff --git a/internal/pkg/protocol/drand/drand_api.go b/internal/pkg/protocol/drand/drand_api.go deleted file mode 100644 index eefdb98abb..0000000000 --- a/internal/pkg/protocol/drand/drand_api.go +++ /dev/null @@ -1,106 +0,0 @@ -package drand - -import ( - "context" - "encoding/json" - - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" -) - -type Config interface { - ConfigSet(dottedPath string, paramJSON string) error -} - -type API struct { - drand drand.IFace - config Config -} - -// New creates a new API -func New(drand drand.IFace, config Config) *API { - return &API{ - drand: drand, - config: config, - } -} - -// Configure fetches group configuration from a drand server. -// It runs through the list of addrs trying each one to fetch the group config. -// Once the group is retrieved, the node's group key will be set in config. -// If overrideGroupAddrs is true, the given set of addresses will be set as the drand nodes. -// Otherwise drand address config will be set from the retrieved group info. The -// override is useful when the the drand server is behind NAT. -// This method assumes all drand nodes are secure or that all of them are not. This -// mis-models the drand config, but is unlikely to be false in practice. -func (api *API) Configure(addrs []string, secure bool, overrideGroupAddrs bool) error { - groupAddrs, keyCoeffs, genesisTime, roundSeconds, err := api.drand.FetchGroupConfig(addrs, secure, overrideGroupAddrs) - if err != nil { - return errors.Wrapf(err, "Could not retrieve drand group from %+v", addrs) - } - - jsonCoeffs, err := json.Marshal(keyCoeffs) - if err != nil { - return errors.New("Could not convert coefficients to json") - } - - err = api.config.ConfigSet("drand.distKey", string(jsonCoeffs)) - if err != nil { - return errors.Wrap(err, "Could not set dist key in config") - } - - if overrideGroupAddrs { - groupAddrs = addrs - } - - jsonAddrs, err := json.Marshal(groupAddrs) - if err != nil { - return errors.New("Could not convert addresses to json") - } - - err = api.config.ConfigSet("drand.addresses", string(jsonAddrs)) - if err != nil { - return errors.Wrap(err, "Could not set drand addresses in config") - } - - jsonSecure, err := json.Marshal(secure) - if err != nil { - return errors.New("Could not convert secure to json") - } - - err = api.config.ConfigSet("drand.secure", string(jsonSecure)) - if err != nil { - return errors.Wrap(err, "Could not set drand secure in config") - } - - jsonStart, err := json.Marshal(genesisTime) - if err != nil { - return errors.Wrap(err, "Could not convert startTimeUnix to json") - } - err = api.config.ConfigSet("drand.startTimeUnix", string(jsonStart)) - if err != nil { - return errors.Wrap(err, "Could not set drand start time unix in config") - } - - jsonRoundSeconds, err := json.Marshal(roundSeconds) - if err != nil { - return errors.Wrap(err, "Could not convert roundSeconds to json") - } - err = api.config.ConfigSet("drand.roundSeconds", string(jsonRoundSeconds)) - if err != nil { - return errors.Wrap(err, "Could not set drand round seconds in config") - } - - return nil -} - -// GetEntry retrieves an entry from the drand server -func (api *API) GetEntry(ctx context.Context, round drand.Round) (*drand.Entry, error) { - return api.drand.ReadEntry(ctx, round) -} - -// VerifyEntry verifies that child is a valid entry if its parent is. -func (api *API) VerifyEntry(parent, child *drand.Entry) (bool, error) { - return api.drand.VerifyEntry(parent, child) -} diff --git a/internal/pkg/protocol/mining/mining_api.go b/internal/pkg/protocol/mining/mining_api.go deleted file mode 100644 index 4ffc9c3d71..0000000000 --- a/internal/pkg/protocol/mining/mining_api.go +++ /dev/null @@ -1,110 +0,0 @@ -package mining - -import ( - "context" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - "github.com/filecoin-project/go-filecoin/internal/pkg/mining" - "github.com/pkg/errors" -) - -type miningChainReader interface { - GetHead() block.TipSetKey - GetTipSet(tsKey block.TipSetKey) (block.TipSet, error) -} - -// API provides an interface to the block mining protocol. -type API struct { - minerAddress func() (address.Address, error) - addNewBlockFunc func(context.Context, mining.FullBlock) (err error) - chainReader miningChainReader - isMiningFunc func() bool - setupMiningFunc func(context.Context) error - startMiningFunc func(context.Context) error - stopMiningFunc func(context.Context) - getWorkerFunc func(ctx context.Context) (*mining.DefaultWorker, error) - chainClock clock.ChainEpochClock -} - -// New creates a new API instance with the provided deps -func New( - minerAddr func() (address.Address, error), - addNewBlockFunc func(context.Context, mining.FullBlock) (err error), - chainReader miningChainReader, - isMiningFunc func() bool, - setupMiningFunc func(ctx context.Context) error, - startMiningFunc func(context.Context) error, - stopMiningfunc func(context.Context), - getWorkerFunc func(ctx context.Context) (*mining.DefaultWorker, error), - chainClock clock.ChainEpochClock, -) API { - return API{ - minerAddress: minerAddr, - addNewBlockFunc: addNewBlockFunc, - chainReader: chainReader, - isMiningFunc: isMiningFunc, - setupMiningFunc: setupMiningFunc, - startMiningFunc: startMiningFunc, - stopMiningFunc: stopMiningfunc, - getWorkerFunc: getWorkerFunc, - chainClock: chainClock, - } -} - -// MinerAddress returns the mining address the API is using, an error is -// returned if the mining address is not set. -func (a *API) MinerAddress() (address.Address, error) { - return a.minerAddress() -} - -// MiningIsActive calls the node's IsMining function -func (a *API) MiningIsActive() bool { - return a.isMiningFunc() -} - -// MiningOnce mines and returns a single block based on the current chain head. -// It tries each epoch in turn until it finds a winner. -func (a *API) MiningOnce(ctx context.Context) (*block.Block, error) { - if a.isMiningFunc() { - return nil, errors.New("Node is already mining") - } - - ts, err := a.chainReader.GetTipSet(a.chainReader.GetHead()) - if err != nil { - return nil, err - } - - miningWorker, err := a.getWorkerFunc(ctx) - if err != nil { - return nil, err - } - - res, err := mining.MineOnce(ctx, *miningWorker, ts) - if err != nil { - return nil, err - } - - if err := a.addNewBlockFunc(ctx, *res); err != nil { - return nil, err - } - - return res.Header, nil -} - -// MiningSetup sets up a storage miner without running repeated tasks like mining -func (a *API) MiningSetup(ctx context.Context) error { - return a.setupMiningFunc(ctx) -} - -// MiningStart calls the node's StartMining function -func (a *API) MiningStart(ctx context.Context) error { - return a.startMiningFunc(ctx) -} - -// MiningStop calls the node's StopMining function -func (a *API) MiningStop(ctx context.Context) { - a.stopMiningFunc(ctx) -} diff --git a/internal/pkg/protocol/mining/mining_api_test.go b/internal/pkg/protocol/mining/mining_api_test.go deleted file mode 100644 index a9d8a32ab2..0000000000 --- a/internal/pkg/protocol/mining/mining_api_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package mining_test - -import ( - "context" - "testing" - - bapi "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/mining" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestTrivialNew(t *testing.T) { - tf.UnitTest(t) - - api, _ := newAPI(t) - require.NotNil(t, api) -} - -func TestAPI_MineOnce(t *testing.T) { - tf.UnitTest(t) - t.Skip("Unskip with fake proofs") - - ctx := context.Background() - api, nd := newAPI(t) - require.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - blk, err := api.MiningOnce(ctx) - require.Nil(t, err) - require.NotNil(t, blk) -} - -func TestMiningAPI_MiningSetup(t *testing.T) { - tf.UnitTest(t) - t.Skip("turn back on once vm integration is complete") - - ctx := context.Background() - api, nd := newAPI(t) - require.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - require.NoError(t, api.MiningSetup(ctx)) - assert.NotNil(t, nd.PieceManager()) -} - -func TestMiningAPI_MiningStart(t *testing.T) { - tf.UnitTest(t) - t.Skip("turn back on once vm integration is complete") - - ctx := context.Background() - api, nd := newAPI(t) - require.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - require.NoError(t, api.MiningStart(ctx)) - assert.True(t, nd.IsMining()) - nd.StopMining(ctx) -} - -func TestMiningAPI_MiningIsActive(t *testing.T) { - tf.UnitTest(t) - t.Skip("turn back on once vm integration is complete") - - ctx := context.Background() - api, nd := newAPI(t) - require.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - require.NoError(t, nd.StartMining(ctx)) - assert.True(t, api.MiningIsActive()) - nd.StopMining(ctx) - assert.False(t, api.MiningIsActive()) - - nd.StopMining(ctx) -} - -func TestMiningAPI_MiningStop(t *testing.T) { - tf.UnitTest(t) - t.Skip("turn back on once vm integration is complete") - - ctx := context.Background() - api, nd := newAPI(t) - require.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - require.NoError(t, nd.StartMining(ctx)) - api.MiningStop(ctx) - assert.False(t, nd.IsMining()) -} - -func TestMiningAPI_MiningAddress(t *testing.T) { - tf.UnitTest(t) - t.Skip("turn back on once vm integration is complete") - - ctx := context.Background() - api, nd := newAPI(t) - - require.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - require.NoError(t, nd.StartMining(ctx)) - - maybeAddress, err := api.MinerAddress() - require.NoError(t, err) - minerAddress, err := nd.MiningAddress() - require.NoError(t, err) - - assert.Equal(t, minerAddress, maybeAddress) - - nd.StopMining(ctx) -} - -func TestMiningAPI_MiningTogether(t *testing.T) { - tf.UnitTest(t) - t.Skip("Ready to unskip with fake proofs") - - ctx := context.Background() - api, nd := newAPI(t) - require.NoError(t, nd.Start(ctx)) - defer nd.Stop(ctx) - - require.NoError(t, api.MiningStart(ctx)) - assert.True(t, nd.IsMining()) - blk, err := api.MiningOnce(ctx) - require.Nil(t, blk) - require.Contains(t, err.Error(), "Node is already mining") - nd.StopMining(ctx) - blk, err = api.MiningOnce(ctx) - require.Nil(t, err) - require.NotNil(t, blk) -} - -func newAPI(t *testing.T) (bapi.API, *node.Node) { - seed := node.MakeChainSeed(t, node.MakeTestGenCfg(t, 100)) - ctx := context.Background() - builder := test.NewNodeBuilder(t) - builder.WithGenesisInit(seed.GenesisInitFunc) - nd := builder.Build(ctx) - seed.GiveKey(t, nd, 0) - seed.GiveMiner(t, nd, 0) // TODO: go-fil-markets integration - return bapi.New( - nd.MiningAddress, - nd.AddNewBlock, - nd.Chain().ChainReader, - nd.IsMining, - nd.SetupMining, - nd.StartMining, - nd.StopMining, - nd.CreateMiningWorker, - nd.ChainClock, - ), nd -} diff --git a/internal/pkg/protocol/retrieval/api.go b/internal/pkg/protocol/retrieval/api.go deleted file mode 100644 index a901577ff9..0000000000 --- a/internal/pkg/protocol/retrieval/api.go +++ /dev/null @@ -1,11 +0,0 @@ -package retrieval - -import ( - iface "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -// API is the retrieval api for the test environment -type API interface { - Client() iface.RetrievalClient - Provider() iface.RetrievalProvider -} diff --git a/internal/pkg/protocol/retrieval/doc.go b/internal/pkg/protocol/retrieval/doc.go deleted file mode 100644 index f31f24ec14..0000000000 --- a/internal/pkg/protocol/retrieval/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package retrieval implements a very simple retrieval protocol that works on high level like this: -// -// 1. CLIENT opens /fil/retrieval/free/0.0.0 stream to MINER -// 2. CLIENT sends MINER a RetrievePieceRequest -// 3. MINER sends CLIENT a RetrievePieceResponse with Status set to Success if it has PieceRef in a sealed sector -// 4. MINER sends CLIENT RetrievePieceChunks until all data associated with PieceRef has been sent -// 5. CLIENT reads RetrievePieceChunk from stream until EOF and then closes stream -package retrieval diff --git a/internal/pkg/protocol/retrieval/retrieval_protocol_test.go b/internal/pkg/protocol/retrieval/retrieval_protocol_test.go deleted file mode 100644 index 27e232defa..0000000000 --- a/internal/pkg/protocol/retrieval/retrieval_protocol_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package retrieval_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test" - "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/retrieval" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -// NOTE: The test TestRetrievalProtocolHappyPath has been deleted due to flakiness. -// Coverage of this feature has been relegated to the functional-tests/retrieval script. -// See https://github.com/filecoin-project/go-filecoin/pull/1643 - -func TestRetrievalProtocolPieceNotFound(t *testing.T) { - t.Skip("Skip pending retrieval market shared component") - tf.UnitTest(t) - - //ctx := context.Background() - - //minerNode, _, minerAddr, _ := configureMinerAndClient(t) - - //require.NoError(t, minerNode.StartMining(ctx)) - //defer minerNode.StopMining(ctx) - - //someRandomCid := types.NewCidForTestGetter()() - // - //minerPID, err := minerNode.PorcelainAPI.MinerGetPeerID(ctx, minerAddr) - //require.NoError(t, err) - - //_, err = retrievePieceBytes(ctx, minerNode.RetrievalProtocol.RetrievalProvider, someRandomCid, minerPID, minerAddr) - //require.Error(t, err) -} - -func retrievePieceBytes(ctx context.Context, retrievalAPI *retrieval.API, data cid.Cid, minerPID peer.ID, addr address.Address) ([]byte, error) { // nolint: deadcode - //r, err := retrievalAPI.RetrievePiece(ctx, data, minerPID, addr) - //if err != nil { - // return nil, err - //} - // - //slice, err := ioutil.ReadAll(r) - //if err != nil { - // return nil, err - //} - // - //return slice, nil - return nil, nil -} - -func configureMinerAndClient(t *testing.T) (minerNode *node.Node, clientNode *node.Node, minerAddr address.Address, minerOwnerAddr address.Address) { // nolint: deadcode - ctx := context.Background() - - seed := node.MakeChainSeed(t, node.MakeTestGenCfg(t, 100)) - builder1 := test.NewNodeBuilder(t) - builder1.WithInitOpt(node.PeerKeyOpt(node.PeerKeys[0])) - builder1.WithGenesisInit(seed.GenesisInitFunc) - builder2 := test.NewNodeBuilder(t) - builder2.WithGenesisInit(seed.GenesisInitFunc) - - // make two nodes, one of which is the minerNode (and gets the miner peer key) - minerNode = builder1.Build(ctx) - clientNode = builder2.Build(ctx) - - // give the minerNode node a key and the miner associated with that key - seed.GiveKey(t, minerNode, 0) - minerAddr, minerOwnerAddr = seed.GiveMiner(t, minerNode, 0) - - // give the clientNode node a private key, too - seed.GiveKey(t, clientNode, 1) - - // start 'em up - require.NoError(t, minerNode.Start(ctx)) - require.NoError(t, clientNode.Start(ctx)) - - // make sure they're swarmed together (for block propagation) - node.ConnectNodes(t, minerNode, clientNode) - - return -} diff --git a/internal/pkg/protocol/retrieval/types.go b/internal/pkg/protocol/retrieval/types.go deleted file mode 100644 index b95e7c3b74..0000000000 --- a/internal/pkg/protocol/retrieval/types.go +++ /dev/null @@ -1,35 +0,0 @@ -package retrieval - -import ( - "github.com/ipfs/go-cid" -) - -// RetrievePieceStatus communicates a successful (or failed) piece retrieval -type RetrievePieceStatus int - -const ( - // Unset is the default status - Unset = RetrievePieceStatus(iota) - - // Failure indicates that the piece could not be retrieved from the miner - Failure - - // Success means that the piece could be retrieved from the miner - Success -) - -// RetrievePieceRequest represents a retrieval miner's request for content. -type RetrievePieceRequest struct { - PieceRef cid.Cid -} - -// RetrievePieceResponse contains the requested content. -type RetrievePieceResponse struct { - Status RetrievePieceStatus - ErrorMessage string -} - -// RetrievePieceChunk is a subset of bytes for a piece being retrieved. -type RetrievePieceChunk struct { - Data []byte -} diff --git a/internal/pkg/protocol/storage/api.go b/internal/pkg/protocol/storage/api.go deleted file mode 100644 index 597f56df5b..0000000000 --- a/internal/pkg/protocol/storage/api.go +++ /dev/null @@ -1,92 +0,0 @@ -package storage - -import ( - "context" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" - "github.com/filecoin-project/specs-actors/actors/abi" -) - -type storage interface { - Client() storagemarket.StorageClient - Provider() (storagemarket.StorageProvider, error) - PieceManager() (piecemanager.PieceManager, error) -} - -// API is the storage API for the test environment -type API struct { - storage storage -} - -// NewAPI creates a new API -func NewAPI(storage storage) *API { - return &API{storage} -} - -// PledgeSector creates a new, empty sector and seals it. -func (api *API) PledgeSector(ctx context.Context) error { - pm, err := api.storage.PieceManager() - if err != nil { - return err - } - - return pm.PledgeSector(ctx) -} - -// AddAsk stores a new price for storage -func (api *API) AddAsk(price abi.TokenAmount, duration abi.ChainEpoch) error { - provider, err := api.storage.Provider() - if err != nil { - return err - } - - return provider.AddAsk(price, duration) -} - -// ListAsks lists all asks for the miner -func (api *API) ListAsks(maddr address.Address) ([]*storagemarket.SignedStorageAsk, error) { - provider, err := api.storage.Provider() - if err != nil { - return nil, err - } - - return provider.ListAsks(maddr), nil -} - -// ProposeStorageDeal proposes a storage deal -func (api *API) ProposeStorageDeal( - ctx context.Context, - addr address.Address, - info *storagemarket.StorageProviderInfo, - data *storagemarket.DataRef, - startEpoch abi.ChainEpoch, - endEpoch abi.ChainEpoch, - price abi.TokenAmount, - collateral abi.TokenAmount, - rt abi.RegisteredProof, -) (*storagemarket.ProposeStorageDealResult, error) { - return api.storage.Client().ProposeStorageDeal(ctx, addr, info, data, startEpoch, endEpoch, price, collateral, rt) -} - -// GetStorageDeal retrieves information about an in-progress deal -func (api *API) GetStorageDeal(ctx context.Context, c cid.Cid) (storagemarket.ClientDeal, error) { - return api.storage.Client().GetLocalDeal(ctx, c) -} - -// GetClientDeals retrieves information about a in-progress deals on th miner side -func (api *API) GetClientDeals(ctx context.Context) ([]storagemarket.ClientDeal, error) { - return api.storage.Client().ListLocalDeals(ctx) -} - -// GetProviderDeals retrieves information about a in-progress deals on th miner side -func (api *API) GetProviderDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) { - provider, err := api.storage.Provider() - if err != nil { - return nil, err - } - return provider.ListLocalDeals() -} diff --git a/internal/pkg/repo/fsrepo.go b/internal/pkg/repo/fsrepo.go deleted file mode 100644 index 732ce29a4b..0000000000 --- a/internal/pkg/repo/fsrepo.go +++ /dev/null @@ -1,598 +0,0 @@ -package repo - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - ds "github.com/ipfs/go-datastore" - badgerds "github.com/ipfs/go-ds-badger2" - lockfile "github.com/ipfs/go-fs-lock" - keystore "github.com/ipfs/go-ipfs-keystore" - logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" -) - -const ( - // apiFile is the filename containing the filecoin node's api address. - apiFile = "api" - configFilename = "config.json" - tempConfigFilename = ".config.json.temp" - lockFile = "repo.lock" - versionFilename = "version" - walletDatastorePrefix = "wallet" - chainDatastorePrefix = "chain" - dealsDatastorePrefix = "deals" - snapshotStorePrefix = "snapshots" - snapshotFilenamePrefix = "snapshot" -) - -var log = logging.Logger("repo") - -// FSRepo is a repo implementation backed by a filesystem. -type FSRepo struct { - // Path to the repo root directory. - path string - version uint - - // lk protects the config file - lk sync.RWMutex - cfg *config.Config - - ds Datastore - keystore keystore.Keystore - walletDs Datastore - chainDs Datastore - dealsDs Datastore - - // lockfile is the file system lock to prevent others from opening the same repo. - lockfile io.Closer -} - -var _ Repo = (*FSRepo)(nil) - -// InitFSRepo initializes a new repo at the target path with the provided configuration. -// The successful result creates a symlink at targetPath pointing to a sibling directory -// named with a timestamp and repo version number. -// The link path must be empty prior. If the computed actual directory exists, it must be empty. -func InitFSRepo(targetPath string, version uint, cfg *config.Config) error { - linkPath, err := homedir.Expand(targetPath) - if err != nil { - return err - } - - container, basename := filepath.Split(linkPath) - if container == "" { // path contained no separator - container = "./" - } - - dirpath := container + MakeRepoDirName(basename, time.Now(), version, 0) - - exists, err := fileExists(linkPath) - if err != nil { - return errors.Wrapf(err, "error inspecting repo symlink path %s", linkPath) - } else if exists { - return errors.Errorf("refusing to init repo symlink at %s, file exists", linkPath) - } - - // Create the actual directory and then the link to it. - if err = InitFSRepoDirect(dirpath, version, cfg); err != nil { - return err - } - if err = os.Symlink(dirpath, linkPath); err != nil { - return err - } - - return nil -} - -// InitFSRepoDirect initializes a new repo at a target path, establishing a provided configuration. -// The target path must not exist, or must reference an empty, read/writable directory. -func InitFSRepoDirect(targetPath string, version uint, cfg *config.Config) error { - repoPath, err := homedir.Expand(targetPath) - if err != nil { - return err - } - - if err := ensureWritableDirectory(repoPath); err != nil { - return errors.Wrap(err, "no writable directory") - } - - empty, err := isEmptyDir(repoPath) - if err != nil { - return errors.Wrapf(err, "failed to list repo directory %s", repoPath) - } - if !empty { - return fmt.Errorf("refusing to initialize repo in non-empty directory %s", repoPath) - } - - if err := WriteVersion(repoPath, version); err != nil { - return errors.Wrap(err, "initializing repo version failed") - } - - if err := initConfig(repoPath, cfg); err != nil { - return errors.Wrap(err, "initializing config file failed") - } - return nil -} - -// OpenFSRepo opens an initialized fsrepo, expecting a specific version. -// The provided path may be to a directory, or a symbolic link pointing at a directory, which -// will be resolved just once at open. -func OpenFSRepo(repoPath string, version uint) (*FSRepo, error) { - repoPath, err := homedir.Expand(repoPath) - if err != nil { - return nil, err - } - - hasConfig, err := hasConfig(repoPath) - if err != nil { - return nil, errors.Wrap(err, "failed to check for repo config") - } - - if !hasConfig { - return nil, errors.Errorf("no repo found at %s; run: 'go-filecoin init [--repodir=%s]'", repoPath, repoPath) - } - - info, err := os.Stat(repoPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to stat repo link %s", repoPath) - } - - // Resolve path if it's a symlink. - var actualPath string - if info.IsDir() { - actualPath = repoPath - } else { - actualPath, err = os.Readlink(repoPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to follow repo symlink %s", repoPath) - } - } - - r := &FSRepo{path: actualPath, version: version} - - r.lockfile, err = lockfile.Lock(r.path, lockFile) - if err != nil { - return nil, errors.Wrap(err, "failed to take repo lock") - } - - if err := r.loadFromDisk(); err != nil { - _ = r.lockfile.Close() - return nil, err - } - - return r, nil -} - -// MakeRepoDirName constructs a name for a concrete repo directory, which includes its -// version number and a timestamp. The name will begin with prefix and, if uniqueifier is -// non-zero, end with that (intended as an ordinal for finding a free name). -// E.g. ".filecoin-20190102-140425-012-1 -// This is exported for use by migrations. -func MakeRepoDirName(prefix string, ts time.Time, version uint, uniqueifier uint) string { - name := strings.Join([]string{ - prefix, - ts.Format("20060102-150405"), - fmt.Sprintf("v%03d", version), - }, "-") - if uniqueifier != 0 { - name = name + fmt.Sprintf("-%d", uniqueifier) - } - return name -} - -func (r *FSRepo) loadFromDisk() error { - localVersion, err := r.readVersion() - if err != nil { - return errors.Wrap(err, "failed to read version") - } - - if localVersion < r.version { - return fmt.Errorf("out of date repo version, got %d expected %d. Migrate with tools/migration/go-filecoin-migrate", localVersion, Version) - } - - if localVersion > r.version { - return fmt.Errorf("binary needs update to handle repo version, got %d expected %d. Update binary to latest release", localVersion, Version) - } - - if err := r.loadConfig(); err != nil { - return errors.Wrap(err, "failed to load config file") - } - - if err := r.openDatastore(); err != nil { - return errors.Wrap(err, "failed to open datastore") - } - - if err := r.openKeystore(); err != nil { - return errors.Wrap(err, "failed to open keystore") - } - - if err := r.openWalletDatastore(); err != nil { - return errors.Wrap(err, "failed to open wallet datastore") - } - - if err := r.openChainDatastore(); err != nil { - return errors.Wrap(err, "failed to open chain datastore") - } - - if err := r.openDealsDatastore(); err != nil { - return errors.Wrap(err, "failed to open deals datastore") - } - return nil -} - -// Config returns the configuration object. -func (r *FSRepo) Config() *config.Config { - r.lk.RLock() - defer r.lk.RUnlock() - - return r.cfg -} - -// ReplaceConfig replaces the current config with the newly passed in one. -func (r *FSRepo) ReplaceConfig(cfg *config.Config) error { - if err := r.SnapshotConfig(r.Config()); err != nil { - log.Warnf("failed to create snapshot: %s", err.Error()) - } - r.lk.Lock() - defer r.lk.Unlock() - - r.cfg = cfg - tmp := filepath.Join(r.path, tempConfigFilename) - err := os.RemoveAll(tmp) - if err != nil { - return err - } - err = r.cfg.WriteFile(tmp) - if err != nil { - return err - } - return os.Rename(tmp, filepath.Join(r.path, configFilename)) -} - -// SnapshotConfig stores a copy `cfg` in /snapshots/ appending the -// time of snapshot to the filename. -func (r *FSRepo) SnapshotConfig(cfg *config.Config) error { - snapshotFile := filepath.Join(r.path, snapshotStorePrefix, genSnapshotFileName()) - exists, err := fileExists(snapshotFile) - if err != nil { - return errors.Wrap(err, "error checking snapshot file") - } else if exists { - // this should never happen - return fmt.Errorf("file already exists: %s", snapshotFile) - } - return cfg.WriteFile(snapshotFile) -} - -// Datastore returns the datastore. -func (r *FSRepo) Datastore() ds.Batching { - return r.ds -} - -// WalletDatastore returns the wallet datastore. -func (r *FSRepo) WalletDatastore() Datastore { - return r.walletDs -} - -// ChainDatastore returns the chain datastore. -func (r *FSRepo) ChainDatastore() Datastore { - return r.chainDs -} - -// DealsDatastore returns the deals datastore. -func (r *FSRepo) DealsDatastore() Datastore { - return r.dealsDs -} - -// Version returns the version of the repo -func (r *FSRepo) Version() uint { - return r.version -} - -// Keystore returns the keystore -func (r *FSRepo) Keystore() keystore.Keystore { - return r.keystore -} - -// Close closes the repo. -func (r *FSRepo) Close() error { - if err := r.ds.Close(); err != nil { - return errors.Wrap(err, "failed to close datastore") - } - - if err := r.walletDs.Close(); err != nil { - return errors.Wrap(err, "failed to close wallet datastore") - } - - if err := r.chainDs.Close(); err != nil { - return errors.Wrap(err, "failed to close chain datastore") - } - - if err := r.dealsDs.Close(); err != nil { - return errors.Wrap(err, "failed to close miner deals datastore") - } - - if err := r.removeAPIFile(); err != nil { - return errors.Wrap(err, "error removing API file") - } - - return r.lockfile.Close() -} - -func (r *FSRepo) removeFile(path string) error { - if err := os.Remove(path); err != nil && !os.IsNotExist(err) { - return err - } - - return nil -} - -func (r *FSRepo) removeAPIFile() error { - return r.removeFile(filepath.Join(r.path, apiFile)) -} - -// Tests whether a repo directory contains the expected config file. -func hasConfig(p string) (bool, error) { - configPath := filepath.Join(p, configFilename) - - _, err := os.Lstat(configPath) - switch { - case err == nil: - return true, nil - case os.IsNotExist(err): - return false, nil - default: - return false, err - } -} - -func (r *FSRepo) loadConfig() error { - configFile := filepath.Join(r.path, configFilename) - - cfg, err := config.ReadFile(configFile) - if err != nil { - return errors.Wrapf(err, "failed to read config file at %q", configFile) - } - - r.cfg = cfg - return nil -} - -// readVersion reads the repo's version file (but does not change r.version). -func (r *FSRepo) readVersion() (uint, error) { - content, err := ReadVersion(r.path) - if err != nil { - return 0, err - } - - version, err := strconv.Atoi(content) - if err != nil { - return 0, errors.New("corrupt version file: version is not an integer") - } - - return uint(version), nil -} - -func (r *FSRepo) openDatastore() error { - switch r.cfg.Datastore.Type { - case "badgerds": - ds, err := badgerds.NewDatastore(filepath.Join(r.path, r.cfg.Datastore.Path), badgerOptions()) - if err != nil { - return err - } - r.ds = ds - default: - return fmt.Errorf("unknown datastore type in config: %s", r.cfg.Datastore.Type) - } - - return nil -} - -func (r *FSRepo) openKeystore() error { - ksp := filepath.Join(r.path, "keystore") - - ks, err := keystore.NewFSKeystore(ksp) - if err != nil { - return err - } - - r.keystore = ks - - return nil -} - -func (r *FSRepo) openChainDatastore() error { - ds, err := badgerds.NewDatastore(filepath.Join(r.path, chainDatastorePrefix), badgerOptions()) - if err != nil { - return err - } - - r.chainDs = ds - - return nil -} - -func (r *FSRepo) openWalletDatastore() error { - // TODO: read wallet datastore info from config, use that to open it up - ds, err := badgerds.NewDatastore(filepath.Join(r.path, walletDatastorePrefix), badgerOptions()) - if err != nil { - return err - } - - r.walletDs = ds - - return nil -} - -func (r *FSRepo) openDealsDatastore() error { - ds, err := badgerds.NewDatastore(filepath.Join(r.path, dealsDatastorePrefix), badgerOptions()) - if err != nil { - return err - } - - r.dealsDs = ds - - return nil -} - -// WriteVersion writes the given version to the repo version file. -func WriteVersion(p string, version uint) error { - return ioutil.WriteFile(filepath.Join(p, versionFilename), []byte(strconv.Itoa(int(version))), 0644) -} - -// ReadVersion returns the unparsed (string) version -// from the version file in the specified repo. -func ReadVersion(repoPath string) (string, error) { - file, err := ioutil.ReadFile(filepath.Join(repoPath, versionFilename)) - if err != nil { - return "", err - } - return strings.Trim(string(file), "\n"), nil -} - -func initConfig(p string, cfg *config.Config) error { - configFile := filepath.Join(p, configFilename) - exists, err := fileExists(configFile) - if err != nil { - return errors.Wrap(err, "error inspecting config file") - } else if exists { - return fmt.Errorf("config file already exists: %s", configFile) - } - - if err := cfg.WriteFile(configFile); err != nil { - return err - } - - // make the snapshot dir - snapshotDir := filepath.Join(p, snapshotStorePrefix) - return ensureWritableDirectory(snapshotDir) -} - -func genSnapshotFileName() string { - return fmt.Sprintf("%s-%d.json", snapshotFilenamePrefix, time.Now().UTC().UnixNano()) -} - -// Ensures that path points to a read/writable directory, creating it if necessary. -func ensureWritableDirectory(path string) error { - // Attempt to create the requested directory, accepting that something might already be there. - err := os.Mkdir(path, 0775) - - if err == nil { - return nil // Skip the checks below, we just created it. - } else if !os.IsExist(err) { - return errors.Wrapf(err, "failed to create directory %s", path) - } - - // Inspect existing directory. - stat, err := os.Stat(path) - if err != nil { - return errors.Wrapf(err, "failed to stat path \"%s\"", path) - } - if !stat.IsDir() { - return errors.Errorf("%s is not a directory", path) - } - if (stat.Mode() & 0600) != 0600 { - return errors.Errorf("insufficient permissions for path %s, got %04o need %04o", path, stat.Mode(), 0600) - } - return nil -} - -// Tests whether the directory at path is empty -func isEmptyDir(path string) (bool, error) { - infos, err := ioutil.ReadDir(path) - if err != nil { - return false, err - } - return len(infos) == 0, nil -} - -func fileExists(file string) (bool, error) { - _, err := os.Stat(file) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -// SetAPIAddr writes the address to the API file. SetAPIAddr expects parameter -// `port` to be of the form `:`. -func (r *FSRepo) SetAPIAddr(maddr string) error { - f, err := os.Create(filepath.Join(r.path, apiFile)) - if err != nil { - return errors.Wrap(err, "could not create API file") - } - - defer f.Close() // nolint: errcheck - - _, err = f.WriteString(maddr) - if err != nil { - // If we encounter an error writing to the API file, - // delete the API file. The error encountered while - // deleting the API file will be returned (if one - // exists) instead of the write-error. - if err := r.removeAPIFile(); err != nil { - return errors.Wrap(err, "failed to remove API file") - } - - return errors.Wrap(err, "failed to write to API file") - } - - return nil -} - -// Path returns the path the fsrepo is at -func (r *FSRepo) Path() (string, error) { - return r.path, nil -} - -// JournalPath returns the path the journal is at. -func (r *FSRepo) JournalPath() string { - return fmt.Sprintf("%s/journal.json", r.path) -} - -// APIAddrFromRepoPath returns the api addr from the filecoin repo -func APIAddrFromRepoPath(repoPath string) (string, error) { - repoPath, err := homedir.Expand(repoPath) - if err != nil { - return "", errors.Wrap(err, fmt.Sprintf("can't resolve local repo path %s", repoPath)) - } - return apiAddrFromFile(filepath.Join(repoPath, apiFile)) -} - -// APIAddrFromFile reads the address from the API file at the given path. -// A relevant comment from a similar function at go-ipfs/repo/fsrepo/fsrepo.go: -// This is a concurrent operation, meaning that any process may read this file. -// Modifying this file, therefore, should use "mv" to replace the whole file -// and avoid interleaved read/writes -func apiAddrFromFile(apiFilePath string) (string, error) { - contents, err := ioutil.ReadFile(apiFilePath) - if err != nil { - return "", errors.Wrap(err, "failed to read API file") - } - - return string(contents), nil -} - -// APIAddr reads the FSRepo's api file and returns the api address -func (r *FSRepo) APIAddr() (string, error) { - return apiAddrFromFile(filepath.Join(filepath.Clean(r.path), apiFile)) -} - -func badgerOptions() *badgerds.Options { - result := &badgerds.DefaultOptions - result.Truncate = true - return result -} diff --git a/internal/pkg/repo/fsrepo_test.go b/internal/pkg/repo/fsrepo_test.go deleted file mode 100644 index d3341a0114..0000000000 --- a/internal/pkg/repo/fsrepo_test.go +++ /dev/null @@ -1,391 +0,0 @@ -package repo - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "testing" - - ds "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestInitRepoDirect(t *testing.T) { - tf.UnitTest(t) - cfg := config.NewDefaultConfig() - - // Inits a repo and opens it (ensuring it is openable) - initAndOpenRepoDirect := func(repoPath string, version uint, cfg *config.Config) (*FSRepo, error) { - if err := InitFSRepoDirect(repoPath, version, cfg); err != nil { - return nil, err - } - return OpenFSRepo(repoPath, version) - } - - t.Run("successfully creates when directory exists", func(t *testing.T) { - dir, err := ioutil.TempDir("", "init") - require.NoError(t, err) - defer RequireRemoveAll(t, dir) - - _, err = initAndOpenRepoDirect(dir, 42, cfg) - assert.NoError(t, err) - checkNewRepoFiles(t, dir, 42) - }) - - t.Run("successfully creates when directory does not exist", func(t *testing.T) { - dir, err := ioutil.TempDir("", "init") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() - - dir = filepath.Join(dir, "nested") - - _, err = initAndOpenRepoDirect(dir, 42, cfg) - assert.NoError(t, err) - checkNewRepoFiles(t, dir, 42) - }) - - t.Run("fails with error if directory is not writeable", func(t *testing.T) { - parentDir, err := ioutil.TempDir("", "init") - require.NoError(t, err) - defer RequireRemoveAll(t, parentDir) - - // make read only dir - dir := filepath.Join(parentDir, "readonly") - err = os.Mkdir(dir, 0444) - assert.NoError(t, err) - assert.False(t, ConfigExists(dir)) - - _, err = initAndOpenRepoDirect(dir, 42, cfg) - assert.Contains(t, err.Error(), "permission") - }) - - t.Run("fails with error if directory not empty", func(t *testing.T) { - dir, err := ioutil.TempDir("", "init") - require.NoError(t, err) - defer RequireRemoveAll(t, dir) - - err = ioutil.WriteFile(filepath.Join(dir, "hi"), []byte("hello"), 0644) - assert.NoError(t, err) - - _, err = initAndOpenRepoDirect(dir, 42, cfg) - assert.Contains(t, err.Error(), "empty") - }) -} - -func TestFSRepoOpen(t *testing.T) { - tf.UnitTest(t) - - t.Run("[fail] repo version newer than binary", func(t *testing.T) { - container, err := ioutil.TempDir("", "") - require.NoError(t, err) - defer RequireRemoveAll(t, container) - repoPath := path.Join(container, "repo") - - assert.NoError(t, InitFSRepo(repoPath, 1, config.NewDefaultConfig())) - // set wrong version - assert.NoError(t, WriteVersion(repoPath, 99)) - - _, err = OpenFSRepo(repoPath, 1) - expected := fmt.Sprintf("binary needs update to handle repo version, got 99 expected %d. Update binary to latest release", Version) - assert.EqualError(t, err, expected) - }) - t.Run("[fail] binary version newer than repo", func(t *testing.T) { - container, err := ioutil.TempDir("", "") - require.NoError(t, err) - defer RequireRemoveAll(t, container) - repoPath := path.Join(container, "repo") - - assert.NoError(t, InitFSRepo(repoPath, 1, config.NewDefaultConfig())) - // set wrong version - assert.NoError(t, WriteVersion(repoPath, 0)) - - _, err = OpenFSRepo(repoPath, 1) - expected := fmt.Sprintf("out of date repo version, got 0 expected %d. Migrate with tools/migration/go-filecoin-migrate", Version) - - assert.EqualError(t, err, expected) - }) - t.Run("[fail] version corrupt", func(t *testing.T) { - container, err := ioutil.TempDir("", "") - require.NoError(t, err) - defer RequireRemoveAll(t, container) - repoPath := path.Join(container, "repo") - - assert.NoError(t, InitFSRepo(repoPath, 1, config.NewDefaultConfig())) - // set wrong version - assert.NoError(t, ioutil.WriteFile(filepath.Join(repoPath, versionFilename), []byte("v.8"), 0644)) - - _, err = OpenFSRepo(repoPath, 1) - assert.EqualError(t, err, "failed to read version: corrupt version file: version is not an integer") - }) -} - -func TestFSRepoRoundtrip(t *testing.T) { - tf.UnitTest(t) - - container, err := ioutil.TempDir("", "container") - require.NoError(t, err) - defer RequireRemoveAll(t, container) - - cfg := config.NewDefaultConfig() - cfg.API.Address = "foo" // testing that what we get back isnt just the default - - repoPath := path.Join(container, "repo") - assert.NoError(t, err, InitFSRepo(repoPath, 42, cfg)) - - r, err := OpenFSRepo(repoPath, 42) - assert.NoError(t, err) - - assert.Equal(t, cfg, r.Config()) - assert.NoError(t, r.Datastore().Put(ds.NewKey("beep"), []byte("boop"))) - assert.NoError(t, r.Close()) - - r2, err := OpenFSRepo(repoPath, 42) - assert.NoError(t, err) - - val, err := r2.Datastore().Get(ds.NewKey("beep")) - assert.NoError(t, err) - assert.Equal(t, []byte("boop"), val) - - assert.NoError(t, r2.Close()) -} - -func TestFSRepoReplaceAndSnapshotConfig(t *testing.T) { - tf.UnitTest(t) - - container, err := ioutil.TempDir("", "container") - require.NoError(t, err) - defer RequireRemoveAll(t, container) - repoPath := path.Join(container, "repo") - - cfg := config.NewDefaultConfig() - cfg.API.Address = "foo" - assert.NoError(t, err, InitFSRepo(repoPath, 42, cfg)) - - expSnpsht, err := ioutil.ReadFile(filepath.Join(repoPath, configFilename)) - require.NoError(t, err) - - r1, err := OpenFSRepo(repoPath, 42) - assert.NoError(t, err) - - newCfg := config.NewDefaultConfig() - newCfg.API.Address = "bar" - - assert.NoError(t, r1.ReplaceConfig(newCfg)) - assert.Equal(t, "bar", r1.Config().API.Address) - assert.NoError(t, r1.Close()) - - r2, err := OpenFSRepo(repoPath, 42) - assert.NoError(t, err) - assert.Equal(t, "bar", r2.Config().API.Address) - assert.NoError(t, r2.Close()) - - // assert that a single snapshot was created when replacing the config - // get the snapshot file name - snpFiles := getSnapshotFilenames(t, filepath.Join(repoPath, snapshotStorePrefix)) - require.Equal(t, 1, len(snpFiles)) - - snpsht, err := ioutil.ReadFile(filepath.Join(repoPath, snapshotStorePrefix, snpFiles[0])) - require.NoError(t, err) - assert.Equal(t, string(expSnpsht), string(snpsht)) -} - -func TestRepoLock(t *testing.T) { - tf.UnitTest(t) - - container, err := ioutil.TempDir("", "container") - require.NoError(t, err) - defer RequireRemoveAll(t, container) - repoPath := path.Join(container, "repo") - - cfg := config.NewDefaultConfig() - assert.NoError(t, err, InitFSRepo(repoPath, 42, cfg)) - - r, err := OpenFSRepo(repoPath, 42) - assert.NoError(t, err) - assert.FileExists(t, filepath.Join(repoPath, lockFile)) - _, err = OpenFSRepo(repoPath, 42) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to take repo lock") - assert.NoError(t, r.Close()) - - _, err = os.Lstat(filepath.Join(repoPath, lockFile)) - assert.True(t, os.IsNotExist(err)) -} - -func TestRepoLockFail(t *testing.T) { - tf.UnitTest(t) - - container, err := ioutil.TempDir("", "container") - require.NoError(t, err) - defer RequireRemoveAll(t, container) - repoPath := path.Join(container, "repo") - - cfg := config.NewDefaultConfig() - assert.NoError(t, err, InitFSRepo(repoPath, 42, cfg)) - - // set invalid version, to make opening the repo fail - assert.NoError(t, - ioutil.WriteFile(filepath.Join(repoPath, versionFilename), []byte("hello"), 0644), - ) - - _, err = OpenFSRepo(repoPath, 42) - assert.Error(t, err) - - _, err = os.Lstat(filepath.Join(repoPath, lockFile)) - assert.True(t, os.IsNotExist(err)) -} - -func TestRepoAPIFile(t *testing.T) { - tf.UnitTest(t) - - t.Run("APIAddr returns last value written to API file", func(t *testing.T) { - withFSRepo(t, func(r *FSRepo) { - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - - addr := mustGetAPIAddr(t, r) - assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", addr) - - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/4567") - - addr = mustGetAPIAddr(t, r) - assert.Equal(t, "/ip4/127.0.0.1/tcp/4567", addr) - }) - }) - - t.Run("SetAPIAddr is idempotent", func(t *testing.T) { - withFSRepo(t, func(r *FSRepo) { - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - - addr := mustGetAPIAddr(t, r) - assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", addr) - - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - - addr = mustGetAPIAddr(t, r) - assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", addr) - }) - }) - - t.Run("APIAddr fails if called before SetAPIAddr", func(t *testing.T) { - withFSRepo(t, func(r *FSRepo) { - addr, err := r.APIAddr() - assert.Error(t, err) - assert.Equal(t, "", addr) - }) - }) - - t.Run("Close deletes API file", func(t *testing.T) { - withFSRepo(t, func(r *FSRepo) { - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - - info, err := os.Stat(filepath.Join(r.path, apiFile)) - assert.NoError(t, err) - assert.Equal(t, apiFile, info.Name()) - - require.NoError(t, r.Close()) - - _, err = os.Stat(filepath.Join(r.path, apiFile)) - assert.Error(t, err) - }) - }) - - t.Run("Close will succeed in spite of missing API file", func(t *testing.T) { - withFSRepo(t, func(r *FSRepo) { - mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") - - err := os.Remove(filepath.Join(r.path, apiFile)) - assert.NoError(t, err) - - assert.NoError(t, r.Close()) - }) - }) - - t.Run("SetAPI fails if unable to create API file", func(t *testing.T) { - withFSRepo(t, func(r *FSRepo) { - // create a file with permission bits that prevent us from truncating - err := ioutil.WriteFile(filepath.Join(r.path, apiFile), []byte("/ip4/127.0.0.1/tcp/9999"), 0000) - assert.NoError(t, err) - - // try to os.Create to same path - will see a failure - err = r.SetAPIAddr("/ip4/127.0.0.1/tcp/1234") - assert.Error(t, err) - }) - }) -} - -func checkNewRepoFiles(t *testing.T, path string, version uint) { - content, err := ioutil.ReadFile(filepath.Join(path, configFilename)) - assert.NoError(t, err) - - t.Log("snapshot path was created during FSRepo Init") - exists, err := fileExists(filepath.Join(path, snapshotStorePrefix)) - assert.NoError(t, err) - assert.True(t, exists) - - // Asserting the exact content here is gonna get old real quick - t.Log("config file matches expected value") - config.SanityCheck(t, string(content)) - - actualVersion, err := ioutil.ReadFile(filepath.Join(path, versionFilename)) - assert.NoError(t, err) - assert.Equal(t, strconv.FormatUint(uint64(version), 10), string(actualVersion)) -} - -func getSnapshotFilenames(t *testing.T, dir string) []string { - files, err := ioutil.ReadDir(dir) - require.NoError(t, err) - - var snpFiles []string - for _, f := range files { - if strings.Contains(f.Name(), "snapshot") { - snpFiles = append(snpFiles, f.Name()) - } - } - return snpFiles -} - -func withFSRepo(t *testing.T, f func(*FSRepo)) { - dir, err := ioutil.TempDir("", "") - require.NoError(t, err) - defer RequireRemoveAll(t, dir) - - cfg := config.NewDefaultConfig() - require.NoError(t, err, InitFSRepoDirect(dir, 42, cfg)) - - r, err := OpenFSRepo(dir, 42) - require.NoError(t, err) - - f(r) -} - -func mustGetAPIAddr(t *testing.T, r *FSRepo) string { - addr, err := r.APIAddr() - require.NoError(t, err) - - return addr -} - -func mustSetAPIAddr(t *testing.T, r *FSRepo, addr string) { - require.NoError(t, r.SetAPIAddr(addr)) -} - -func ConfigExists(dir string) bool { - _, err := os.Stat(filepath.Join(dir, "config.json")) - if os.IsNotExist(err) { - return false - } - return err == nil -} diff --git a/internal/pkg/repo/mem.go b/internal/pkg/repo/mem.go deleted file mode 100644 index 0d8c3931b7..0000000000 --- a/internal/pkg/repo/mem.go +++ /dev/null @@ -1,116 +0,0 @@ -package repo - -import ( - "sync" - - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - "github.com/ipfs/go-ipfs-keystore" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" -) - -// MemRepo is an in-memory implementation of the Repo interface. -type MemRepo struct { - // lk guards the config - lk sync.RWMutex - C *config.Config - D Datastore - Ks keystore.Keystore - W Datastore - Chain Datastore - DealsDs Datastore - version uint - apiAddress string -} - -var _ Repo = (*MemRepo)(nil) - -// NewInMemoryRepo makes a new instance of MemRepo -func NewInMemoryRepo() *MemRepo { - return &MemRepo{ - C: config.NewDefaultConfig(), - D: dss.MutexWrap(datastore.NewMapDatastore()), - Ks: keystore.MutexWrap(keystore.NewMemKeystore()), - W: dss.MutexWrap(datastore.NewMapDatastore()), - Chain: dss.MutexWrap(datastore.NewMapDatastore()), - DealsDs: dss.MutexWrap(datastore.NewMapDatastore()), - version: Version, - } -} - -// Config returns the configuration object. -func (mr *MemRepo) Config() *config.Config { - mr.lk.RLock() - defer mr.lk.RUnlock() - - return mr.C -} - -// ReplaceConfig replaces the current config with the newly passed in one. -func (mr *MemRepo) ReplaceConfig(cfg *config.Config) error { - mr.lk.Lock() - defer mr.lk.Unlock() - - mr.C = cfg - - return nil -} - -// Datastore returns the datastore. -func (mr *MemRepo) Datastore() datastore.Batching { - return mr.D -} - -// Keystore returns the keystore. -func (mr *MemRepo) Keystore() keystore.Keystore { - return mr.Ks -} - -// WalletDatastore returns the wallet datastore. -func (mr *MemRepo) WalletDatastore() Datastore { - return mr.W -} - -// ChainDatastore returns the chain datastore. -func (mr *MemRepo) ChainDatastore() Datastore { - return mr.Chain -} - -// DealsDatastore returns the deals datastore for miners. -func (mr *MemRepo) DealsDatastore() Datastore { - return mr.DealsDs -} - -// Version returns the version of the repo. -func (mr *MemRepo) Version() uint { - return mr.version -} - -// Close deletes the temporary directories which hold staged piece data and -// sealed sectors. -func (mr *MemRepo) Close() error { - return nil -} - -// SetAPIAddr writes the address of the running API to memory. -func (mr *MemRepo) SetAPIAddr(addr string) error { - mr.apiAddress = addr - return nil -} - -// APIAddr reads the address of the running API from memory. -func (mr *MemRepo) APIAddr() (string, error) { - return mr.apiAddress, nil -} - -// Path returns the default path. -func (mr *MemRepo) Path() (string, error) { - return paths.GetRepoPath("") -} - -// JournalPath returns a string to satisfy the repo interface. -func (mr *MemRepo) JournalPath() string { - return "in_memory_filecoin_journal_path" -} diff --git a/internal/pkg/repo/repo.go b/internal/pkg/repo/repo.go deleted file mode 100644 index 46afed7894..0000000000 --- a/internal/pkg/repo/repo.go +++ /dev/null @@ -1,58 +0,0 @@ -package repo - -import ( - "github.com/ipfs/go-datastore" - ds "github.com/ipfs/go-datastore" - keystore "github.com/ipfs/go-ipfs-keystore" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" -) - -// Version is the version of repo schema that this code understands. -const Version uint = 2 - -// Datastore is the datastore interface provided by the repo -type Datastore interface { - // NB: there are other more featureful interfaces we could require here, we - // can either force it, or just do hopeful type checks. Not all datastores - // implement every feature. - datastore.Batching -} - -// Repo is a representation of all persistent data in a filecoin node. -type Repo interface { - Config() *config.Config - // ReplaceConfig replaces the current config, with the newly passed in one. - ReplaceConfig(cfg *config.Config) error - - // Datastore is a general storage solution for things like blocks. - Datastore() ds.Batching - Keystore() keystore.Keystore - - // WalletDatastore is a specific storage solution, only used to store sensitive wallet information. - WalletDatastore() Datastore - - // ChainDatastore is a specific storage solution, only used to store already validated chain data. - ChainDatastore() Datastore - - // DealsDatastore holds deals data. - DealsDatastore() Datastore - - // SetAPIAddr sets the address of the running API. - SetAPIAddr(string) error - - // APIAddr returns the address of the running API. - APIAddr() (string, error) - - // Version returns the current repo version. - Version() uint - - // Path returns the repo path. - Path() (string, error) - - // JournalPath returns the journal path. - JournalPath() string - - // Close shuts down the repo. - Close() error -} diff --git a/internal/pkg/repo/testing.go b/internal/pkg/repo/testing.go deleted file mode 100644 index c15c54f5ac..0000000000 --- a/internal/pkg/repo/testing.go +++ /dev/null @@ -1,38 +0,0 @@ -package repo - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -// RequireMakeTempDir ensures that a temporary directory is created -func RequireMakeTempDir(t *testing.T, dirname string) string { - newdir, err := ioutil.TempDir("", dirname) - require.NoError(t, err) - return newdir -} - -// RequireRemoveAll ensures that the error condition is checked when we clean up -// after creating a temporary directory. -func RequireRemoveAll(t *testing.T, path string) { - require.NoError(t, os.RemoveAll(path)) -} - -// RequireOpenTempFile is a shortcut for opening a given temp file with a given -// suffix, then returning both a filename and a file pointer. -func RequireOpenTempFile(t *testing.T, suffix string) (*os.File, string) { - file, err := ioutil.TempFile("", suffix) - require.NoError(t, err) - name := file.Name() - return file, name -} - -// RequireReadLink reads a symlink that is expected to resolve successfully. -func RequireReadLink(t *testing.T, path string) string { - target, err := os.Readlink(path) - require.NoError(t, err) - return target -} diff --git a/internal/pkg/slashing/check.go b/internal/pkg/slashing/check.go deleted file mode 100644 index aa90b212a6..0000000000 --- a/internal/pkg/slashing/check.go +++ /dev/null @@ -1,137 +0,0 @@ -package slashing - -import ( - "bytes" - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" -) - -type FaultStateView interface { - state.AccountStateView - MinerControlAddresses(ctx context.Context, maddr address.Address) (owner, worker address.Address, err error) -} - -// Chain state required for checking consensus fault reports. -type chainReader interface { - GetTipSet(block.TipSetKey) (block.TipSet, error) -} - -// Checks the validity of reported consensus faults. -type ConsensusFaultChecker struct { - chain chainReader -} - -func NewFaultChecker(chain chainReader) *ConsensusFaultChecker { - return &ConsensusFaultChecker{chain: chain} -} - -// Checks the validity of a consensus fault reported by serialized block headers h1, h2, and optional -// common-ancestor witness h3. -func (s *ConsensusFaultChecker) VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, head block.TipSetKey, view FaultStateView) (*runtime.ConsensusFault, error) { - if bytes.Equal(h1, h2) { - return nil, fmt.Errorf("no consensus fault: blocks identical") - } - - var b1, b2, b3 block.Block - innerErr := encoding.Decode(h1, &b1) - if innerErr != nil { - return nil, errors.Wrapf(innerErr, "failed to decode h1") - } - innerErr = encoding.Decode(h2, &b2) - if innerErr != nil { - return nil, errors.Wrapf(innerErr, "failed to decode h2") - } - - // Block syntax is not validated. This implements the strictest check possible, and is also the simplest check - // possible. - // This means that blocks that could never have been included in the chain (e.g. with an empty parent state) - // are still fault-able. - - if b1.Miner != b2.Miner { - return nil, fmt.Errorf("no consensus fault: miners differ") - } - if b1.Height > b2.Height { - return nil, fmt.Errorf("no consensus fault: first block is higher than second") - } - - // Check the basic fault conditions first, defer the (expensive) signature and chain history check until last. - var fault *runtime.ConsensusFault - - // Double-fork mining fault: two blocks at the same epoch. - // It is not necessary to present a common ancestor of the blocks. - if b1.Height == b2.Height { - fault = &runtime.ConsensusFault{ - Target: b1.Miner, - Epoch: b2.Height, - Type: runtime.ConsensusFaultDoubleForkMining, - } - } - // Time-offset mining fault: two blocks with the same parent but different epochs. - // The height check is redundant at time of writing, but included for robustness to future changes to this method. - // The blocks have a common ancestor by definition (the parent). - if b1.Parents.Equals(b2.Parents) && b1.Height != b2.Height { - fault = &runtime.ConsensusFault{ - Target: b1.Miner, - Epoch: b2.Height, - Type: runtime.ConsensusFaultTimeOffsetMining, - } - } - // Parent-grinding fault: one block’s parent is a tipset that provably should have included some block but does not. - // The provable case is that two blocks are mined and the later one does not include the - // earlier one as a parent even though it could have. - // B3 must prove that the higher block (B2) could have been included in B1's tipset. - if len(extra) > 0 { - innerErr = encoding.Decode(extra, &b3) - if innerErr != nil { - return nil, errors.Wrapf(innerErr, "failed to decode extra") - } - if b1.Height == b3.Height && b3.Parents.Equals(b1.Parents) && !b2.Parents.Has(b1.Cid()) && b2.Parents.Has(b3.Cid()) { - fault = &runtime.ConsensusFault{ - Target: b1.Miner, - Epoch: b2.Height, - Type: runtime.ConsensusFaultParentGrinding, - } - } - } - - if fault == nil { - return nil, fmt.Errorf("no consensus fault: blocks are ok") - } - - // Expensive validation: signatures. - - err := verifyBlockSignature(ctx, view, b1) - if err != nil { - return nil, err - } - err = verifyBlockSignature(ctx, view, b2) - if err != nil { - return nil, err - } - - return fault, nil -} - -// Checks whether a block header is correctly signed in the context of the parent state to which it refers. -func verifyBlockSignature(ctx context.Context, view FaultStateView, blk block.Block) error { - _, worker, err := view.MinerControlAddresses(ctx, blk.Miner) - if err != nil { - panic(errors.Wrapf(err, "failed to inspect miner addresses")) - } - if blk.BlockSig == nil { - return errors.Errorf("no consensus fault: block %s has nil signature", blk.Cid()) - } - err = state.NewSignatureValidator(view).ValidateSignature(ctx, blk.SignatureData(), worker, *blk.BlockSig) - if err != nil { - return errors.Wrapf(err, "no consensus fault: block %s signature invalid", blk.Cid()) - } - return err -} diff --git a/internal/pkg/slashing/monitor.go b/internal/pkg/slashing/monitor.go deleted file mode 100644 index 6ae34a6c62..0000000000 --- a/internal/pkg/slashing/monitor.go +++ /dev/null @@ -1,67 +0,0 @@ -package slashing - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" -) - -// ConsensusFaultDetector detects consensus faults -- misbehavior conditions where a single -// party produces multiple blocks at the same time. -type ConsensusFaultDetector struct { - // minerIndex tracks witnessed blocks by miner address and epoch - minerIndex map[address.Address]map[abi.ChainEpoch]*block.Block - // sender sends messages on behalf of the slasher - faultCh chan ConsensusFault -} - -// ConsensusFault is the information needed to submit a consensus fault -type ConsensusFault struct { - // Block1 and Block2 are two distinct blocks from an overlapping interval - // signed by the same miner - Block1, Block2 *block.Block -} - -// NewConsensusFaultDetector returns a fault detector given a fault channel -func NewConsensusFaultDetector(faultCh chan ConsensusFault) *ConsensusFaultDetector { - return &ConsensusFaultDetector{ - minerIndex: make(map[address.Address]map[abi.ChainEpoch]*block.Block), - faultCh: faultCh, - } - -} - -// CheckBlock records a new block and checks for faults -// Preconditions: the signature is already checked and p is the parent -func (detector *ConsensusFaultDetector) CheckBlock(b *block.Block, p block.TipSet) error { - latest := b.Height - parentHeight, err := p.Height() - if err != nil { - return err - } - earliest := parentHeight + 1 - - // Find per-miner index - blockByEpoch, tracked := detector.minerIndex[b.Miner] - if !tracked { - blockByEpoch = make(map[abi.ChainEpoch]*block.Block) - detector.minerIndex[b.Miner] = blockByEpoch - } - - // Add this epoch to the miner's index, emitting any detected faults - for e := earliest; e <= latest; e++ { - collision, tracked := blockByEpoch[e] - if tracked { - // Exact duplicates are not faults - if collision.Cid().Equals(b.Cid()) { - continue - } - // Emit all faults, any special handling of duplicates belongs downstream - detector.faultCh <- ConsensusFault{b, collision} - } - // In case of collision overwrite with most recent - blockByEpoch[e] = b - } - return nil -} diff --git a/internal/pkg/slashing/monitor_test.go b/internal/pkg/slashing/monitor_test.go deleted file mode 100644 index 97ee69a2de..0000000000 --- a/internal/pkg/slashing/monitor_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package slashing_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - . "github.com/filecoin-project/go-filecoin/internal/pkg/slashing" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" -) - -func assertEmptyCh(t *testing.T, faultCh chan ConsensusFault) { - select { - case <-faultCh: - t.Fail() - default: - } -} - -func TestNoFaults(t *testing.T) { - tf.UnitTest(t) - addrGetter := vmaddr.NewForTestGetter() - minerAddr1 := addrGetter() - minerAddr2 := addrGetter() - minerAddr3 := addrGetter() - - t.Run("blocks mined by different miners don't slash", func(t *testing.T) { - parentBlock := &block.Block{Height: 42} - parentTipSet := block.RequireNewTipSet(t, parentBlock) - - block1 := &block.Block{Miner: minerAddr1, Height: 43} - block2 := &block.Block{Miner: minerAddr2, Height: 43} - block3 := &block.Block{Miner: minerAddr3, Height: 43} - - faultCh := make(chan ConsensusFault, 1) - cfd := NewConsensusFaultDetector(faultCh) - assert.NoError(t, cfd.CheckBlock(block1, parentTipSet)) - assertEmptyCh(t, faultCh) - assert.NoError(t, cfd.CheckBlock(block2, parentTipSet)) - assertEmptyCh(t, faultCh) - assert.NoError(t, cfd.CheckBlock(block3, parentTipSet)) - assertEmptyCh(t, faultCh) - }) - - t.Run("blocks mined at different heights don't slash", func(t *testing.T) { - parent1Block := &block.Block{Height: 42} - parent1TipSet := block.RequireNewTipSet(t, parent1Block) - block1 := &block.Block{Miner: minerAddr1, Height: 43} - - parent2Block := &block.Block{Height: 55} - parent2TipSet := block.RequireNewTipSet(t, parent2Block) - block2 := &block.Block{Miner: minerAddr1, Height: 56} - - faultCh := make(chan ConsensusFault, 1) - cfd := NewConsensusFaultDetector(faultCh) - assert.NoError(t, cfd.CheckBlock(block1, parent1TipSet)) - assertEmptyCh(t, faultCh) - assert.NoError(t, cfd.CheckBlock(block2, parent2TipSet)) - assertEmptyCh(t, faultCh) - }) - - t.Run("blocks with non-overlapping null intervals don't slash", func(t *testing.T) { - parent1Block := &block.Block{Height: 42} - parent1TipSet := block.RequireNewTipSet(t, parent1Block) - block1 := &block.Block{Miner: minerAddr1, Height: 46} - - parent2TipSet := block.RequireNewTipSet(t, block1) - block2 := &block.Block{Miner: minerAddr1, Height: 56} - - faultCh := make(chan ConsensusFault, 1) - cfd := NewConsensusFaultDetector(faultCh) - assert.NoError(t, cfd.CheckBlock(block1, parent1TipSet)) - assertEmptyCh(t, faultCh) - assert.NoError(t, cfd.CheckBlock(block2, parent2TipSet)) - assertEmptyCh(t, faultCh) - }) - - t.Run("duplicate equal blocks don't slash", func(t *testing.T) { - parentBlock := &block.Block{Height: 42} - parentTipSet := block.RequireNewTipSet(t, parentBlock) - - block := &block.Block{Miner: minerAddr1, Height: 43} - faultCh := make(chan ConsensusFault, 1) - cfd := NewConsensusFaultDetector(faultCh) - assert.NoError(t, cfd.CheckBlock(block, parentTipSet)) - assertEmptyCh(t, faultCh) - assert.NoError(t, cfd.CheckBlock(block, parentTipSet)) - assertEmptyCh(t, faultCh) - }) -} - -func TestFault(t *testing.T) { - tf.UnitTest(t) - addrGetter := vmaddr.NewForTestGetter() - minerAddr1 := addrGetter() - - parentBlock := &block.Block{Height: 42} - parentTipSet := block.RequireNewTipSet(t, parentBlock) - - block1 := &block.Block{Miner: minerAddr1, Height: 43, StateRoot: e.NewCid(types.CidFromString(t, "some-state"))} - block2 := &block.Block{Miner: minerAddr1, Height: 43, StateRoot: e.NewCid(types.CidFromString(t, "some-other-state"))} - - faultCh := make(chan ConsensusFault, 1) - cfd := NewConsensusFaultDetector(faultCh) - assert.NoError(t, cfd.CheckBlock(block1, parentTipSet)) - assertEmptyCh(t, faultCh) // no collision here because index is empty - assert.NoError(t, cfd.CheckBlock(block2, parentTipSet)) - fault := <-faultCh - assert.Equal(t, fault.Block1, block2) - assert.Equal(t, fault.Block2, block1) -} - -func TestFaultNullBlocks(t *testing.T) { - tf.UnitTest(t) - addrGetter := vmaddr.NewForTestGetter() - minerAddr1 := addrGetter() - - t.Run("same base", func(t *testing.T) { - parentBlock := &block.Block{Height: 42} - parentTipSet := block.RequireNewTipSet(t, parentBlock) - - block1 := &block.Block{Miner: minerAddr1, Height: 45} - block2 := &block.Block{Miner: minerAddr1, Height: 49} - - faultCh := make(chan ConsensusFault, 3) - cfd := NewConsensusFaultDetector(faultCh) - assert.NoError(t, cfd.CheckBlock(block1, parentTipSet)) - assertEmptyCh(t, faultCh) - assert.NoError(t, cfd.CheckBlock(block2, parentTipSet)) - for i := 0; i < 3; i++ { - fault := <-faultCh - assert.Equal(t, fault.Block1, block2) - assert.Equal(t, fault.Block2, block1) - } - }) - -} diff --git a/internal/pkg/state/signer.go b/internal/pkg/state/signer.go deleted file mode 100644 index 2d4cdebab4..0000000000 --- a/internal/pkg/state/signer.go +++ /dev/null @@ -1,61 +0,0 @@ -package state - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" -) - -type chainHeadTracker interface { - GetHead() block.TipSetKey -} - -// Signer looks up non-signing addresses before signing -type Signer struct { - viewer *TipSetStateViewer - chainHead chainHeadTracker - wallet *wallet.Wallet -} - -// NewSigner creates a new signer -func NewSigner(viewer *TipSetStateViewer, chainHead chainHeadTracker, wallet *wallet.Wallet) *Signer { - return &Signer{ - viewer: viewer, - chainHead: chainHead, - wallet: wallet, - } -} - -// SignBytes creates a signature for the given data using either the given addr or its associated signing address -func (s *Signer) SignBytes(ctx context.Context, data []byte, addr address.Address) (crypto.Signature, error) { - signingAddr, err := s.signingAddress(ctx, addr) - if err != nil { - return crypto.Signature{}, err - } - return s.wallet.SignBytes(data, signingAddr) -} - -// HasAddress returns whether this signer can sign with the given address -func (s *Signer) HasAddress(ctx context.Context, addr address.Address) (bool, error) { - signingAddr, err := s.signingAddress(ctx, addr) - if err != nil { - return false, err - } - return s.wallet.HasAddress(signingAddr), nil -} - -func (s *Signer) signingAddress(ctx context.Context, addr address.Address) (address.Address, error) { - if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 { - // address is already a signing address. return it - return addr, nil - } - - view, err := s.viewer.StateView(s.chainHead.GetHead()) - if err != nil { - return address.Undef, err - } - return view.AccountSignerAddress(ctx, addr) -} diff --git a/internal/pkg/state/sigval.go b/internal/pkg/state/sigval.go deleted file mode 100644 index 796dd6c83d..0000000000 --- a/internal/pkg/state/sigval.go +++ /dev/null @@ -1,71 +0,0 @@ -package state - -import ( - "context" - - addr "github.com/filecoin-project/go-address" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -type AccountStateView interface { - AccountSignerAddress(ctx context.Context, a addr.Address) (addr.Address, error) -} - -// -// SignatureValidator resolves account actor addresses to their pubkey-style address for signature validation. -// -type SignatureValidator struct { - state AccountStateView -} - -func NewSignatureValidator(state AccountStateView) *SignatureValidator { - return &SignatureValidator{state: state} -} - -func (v *SignatureValidator) ValidateSignature(ctx context.Context, data []byte, signer addr.Address, sig crypto.Signature) error { - signerAddress, err := v.state.AccountSignerAddress(ctx, signer) - if err != nil { - return errors.Wrapf(err, "failed to load signer address for %v", signer) - } - return crypto.ValidateSignature(data, signerAddress, sig) -} - -func (v *SignatureValidator) ValidateMessageSignature(ctx context.Context, msg *types.SignedMessage) error { - mCid, err := msg.Message.Cid() - if err != nil { - return errors.Wrapf(err, "failed to take cid of message to check signature") - } - - return v.ValidateSignature(ctx, mCid.Bytes(), msg.Message.From, msg.Signature) -} - -func (v *SignatureValidator) ValidateBLSMessageAggregate(ctx context.Context, msgs []*types.UnsignedMessage, sig *crypto.Signature) error { - if sig == nil { - if len(msgs) > 0 { - return errors.New("Invalid empty BLS sig over messages") - } - return nil - } - pubKeys := [][]byte{} - encodedMsgCids := [][]byte{} - for _, msg := range msgs { - signerAddress, err := v.state.AccountSignerAddress(ctx, msg.From) - if err != nil { - return errors.Wrapf(err, "failed to load signer address for %v", msg.From) - } - pubKeys = append(pubKeys, signerAddress.Payload()) - mCid, err := msg.Cid() - if err != nil { - return err - } - encodedMsgCids = append(encodedMsgCids, mCid.Bytes()) - } - - if !crypto.VerifyBLSAggregate(pubKeys, encodedMsgCids, sig.Data) { - return errors.New("BLS signature invalid") - } - return nil -} diff --git a/internal/pkg/state/sigval_test.go b/internal/pkg/state/sigval_test.go deleted file mode 100644 index 5f8cc6cf5a..0000000000 --- a/internal/pkg/state/sigval_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package state - -import ( - "context" - "fmt" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -type fakeStateView struct { - keys map[address.Address]address.Address -} - -func (f *fakeStateView) AccountSignerAddress(_ context.Context, a address.Address) (address.Address, error) { - if a.Protocol() == address.SECP256K1 || a.Protocol() == address.BLS { - return a, nil - } - resolved, ok := f.keys[a] - if !ok { - return address.Undef, fmt.Errorf("not found") - } - return resolved, nil - -} - -func TestSignMessageOk(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - - ms, kis := types.NewMockSignersAndKeyInfo(1) - keyAddr, err := kis[0].Address() - require.NoError(t, err) - - t.Run("no resolution", func(t *testing.T) { - v := NewSignatureValidator(&fakeStateView{}) // No resolution needed. - msg := types.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(0), 0) - smsg, err := types.NewSignedMessage(ctx, *msg, ms) - require.NoError(t, err) - assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) - }) - t.Run("resolution required", func(t *testing.T) { - idAddress := vmaddr.RequireIDAddress(t, 1) - // Use ID address in message but sign with corresponding key address. - state := &fakeStateView{keys: map[address.Address]address.Address{ - idAddress: keyAddr, - }} - v := NewSignatureValidator(state) - msg := types.NewMeteredMessage(idAddress, idAddress, 1, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(0), 0) - msgCid, err := msg.Cid() - require.NoError(t, err) - sig, err := ms.SignBytes(ctx, msgCid.Bytes(), keyAddr) - require.NoError(t, err) - smsg := &types.SignedMessage{ - Message: *msg, - Signature: sig, - } - - assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) - }) -} - -// Signature is valid but signer does not match From Address. -func TestBadFrom(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - - signer, kis := types.NewMockSignersAndKeyInfo(2) - keyAddr, err := kis[0].Address() - require.NoError(t, err) - otherAddr, err := kis[1].Address() - require.NoError(t, err) - - t.Run("no resolution", func(t *testing.T) { - v := NewSignatureValidator(&fakeStateView{}) - - // Can't use NewSignedMessage constructor as it always signs with msg.From. - msg := types.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(0), gas.NewGas(0)) - bmsg, err := msg.Marshal() - require.NoError(t, err) - sig, err := signer.SignBytes(ctx, bmsg, otherAddr) // sign with addr != msg.From - require.NoError(t, err) - smsg := &types.SignedMessage{ - Message: *msg, - Signature: sig, - } - assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) - }) - t.Run("resolution required", func(t *testing.T) { - idAddress := vmaddr.RequireIDAddress(t, 1) - // Use ID address in message but sign with corresponding key address. - state := &fakeStateView{keys: map[address.Address]address.Address{ - idAddress: keyAddr, - }} - v := NewSignatureValidator(state) - - // Can't use NewSignedMessage constructor as it always signs with msg.From. - msg := types.NewMeteredMessage(idAddress, idAddress, 1, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(0), gas.NewGas(0)) - bmsg, err := msg.Marshal() - require.NoError(t, err) - sig, err := signer.SignBytes(ctx, bmsg, otherAddr) // sign with addr != msg.From (resolved) - require.NoError(t, err) - smsg := &types.SignedMessage{ - Message: *msg, - Signature: sig, - } - assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) - }) -} - -// Signature corrupted. -func TestSignedMessageBadSignature(t *testing.T) { - tf.UnitTest(t) - ctx := context.Background() - - signer, kis := types.NewMockSignersAndKeyInfo(1) - keyAddr, err := kis[0].Address() - require.NoError(t, err) - - v := NewSignatureValidator(&fakeStateView{}) // no resolution needed - msg := types.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(0), 0) - smsg, err := types.NewSignedMessage(ctx, *msg, signer) - require.NoError(t, err) - - assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) - smsg.Signature.Data[0] = smsg.Signature.Data[0] ^ 0xFF - assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) -} - -// Message corrupted. -func TestSignedMessageCorrupted(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - - signer, kis := types.NewMockSignersAndKeyInfo(1) - keyAddr, err := kis[0].Address() - require.NoError(t, err) - - v := NewSignatureValidator(&fakeStateView{}) // no resolution needed - msg := types.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroAttoFIL, builtin.MethodSend, nil, types.NewGasPrice(0), 0) - smsg, err := types.NewSignedMessage(ctx, *msg, signer) - require.NoError(t, err) - - assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) - smsg.Message.CallSeqNum = uint64(42) - assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) -} diff --git a/internal/pkg/state/testing.go b/internal/pkg/state/testing.go deleted file mode 100644 index 61023cb75c..0000000000 --- a/internal/pkg/state/testing.go +++ /dev/null @@ -1,174 +0,0 @@ -package state - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/pkg/errors" -) - -// FakeStateView is a fake state view. -type FakeStateView struct { - NetworkName string - Power *NetworkPower - Miners map[address.Address]*FakeMinerState -} - -// NewFakeStateView creates a new fake state view. -func NewFakeStateView(rawBytePower, qaPower abi.StoragePower, minerCount, minPowerMinerCount int64) *FakeStateView { - return &FakeStateView{ - Power: &NetworkPower{ - RawBytePower: rawBytePower, - QualityAdjustedPower: qaPower, - MinerCount: minerCount, - MinPowerMinerCount: minPowerMinerCount, - }, - Miners: make(map[address.Address]*FakeMinerState), - } -} - -// FakeMinerState is fake state for a single miner. -type FakeMinerState struct { - SectorConfiguration *MinerSectorConfiguration - Owner address.Address - Worker address.Address - PeerID peer.ID - ProvingPeriodStart abi.ChainEpoch - ProvingPeriodEnd abi.ChainEpoch - PoStFailures int - Sectors []miner.SectorOnChainInfo - Deadlines []*abi.BitField - ClaimedRawPower abi.StoragePower - ClaimedQAPower abi.StoragePower - PledgeRequirement abi.TokenAmount - PledgeBalance abi.TokenAmount -} - -// FakeSectorInfo fakes a subset of sector onchain info -type FakeSectorInfo struct { - ID abi.SectorNumber - SealedCID cid.Cid -} - -func (v *FakeStateView) InitNetworkName(_ context.Context) (string, error) { - return v.NetworkName, nil -} - -// MinerSectorConfiguration reports a miner's sector size. -func (v *FakeStateView) MinerSectorConfiguration(ctx context.Context, maddr address.Address) (*MinerSectorConfiguration, error) { - m, ok := v.Miners[maddr] - if !ok { - return nil, errors.Errorf("no miner %s", maddr) - } - return m.SectorConfiguration, nil -} - -// MinerSectorCount reports the number of sectors a miner has pledged -func (v *FakeStateView) MinerSectorCount(ctx context.Context, maddr address.Address) (uint64, error) { - m, ok := v.Miners[maddr] - if !ok { - return 0, errors.Errorf("no miner %s", maddr) - } - - return uint64(len(m.Sectors)), nil -} - -func (v *FakeStateView) MinerSectorStates(_ context.Context, maddr address.Address) (*MinerSectorStates, error) { - m, ok := v.Miners[maddr] - if !ok { - return nil, errors.Errorf("no miner %s", maddr) - } - return &MinerSectorStates{ - Deadlines: m.Deadlines, - Faults: abi.NewBitField(), - Recoveries: abi.NewBitField(), - NewSectors: abi.NewBitField(), - }, nil -} - -func (v *FakeStateView) MinerGetSector(_ context.Context, maddr address.Address, sectorNum abi.SectorNumber) (*miner.SectorOnChainInfo, bool, error) { - m, ok := v.Miners[maddr] - if !ok { - return nil, false, errors.Errorf("no miner %s", maddr) - } - for _, s := range m.Sectors { - if s.Info.SectorNumber == sectorNum { - return &s, true, nil - } - } - return nil, false, nil -} - -// MinerControlAddresses reports a miner's control addresses. -func (v *FakeStateView) MinerControlAddresses(_ context.Context, maddr address.Address) (owner, worker address.Address, err error) { - m, ok := v.Miners[maddr] - if !ok { - return address.Undef, address.Undef, errors.Errorf("no miner %s", maddr) - } - return m.Owner, m.Worker, nil -} - -func (v *FakeStateView) MinerExists(_ context.Context, _ address.Address) (bool, error) { - return true, nil -} - -func (v *FakeStateView) MinerPeerID(ctx context.Context, maddr address.Address) (peer.ID, error) { - m, ok := v.Miners[maddr] - if !ok { - return "", errors.Errorf("no miner %s", maddr) - } - return m.PeerID, nil -} - -func (v *FakeStateView) MinerProvingPeriod(ctx context.Context, maddr address.Address) (start abi.ChainEpoch, end abi.ChainEpoch, failureCount int, err error) { - m, ok := v.Miners[maddr] - if !ok { - return 0, 0, 0, errors.Errorf("no miner %s", maddr) - } - return m.ProvingPeriodStart, m.ProvingPeriodEnd, m.PoStFailures, nil -} - -func (v *FakeStateView) AccountSignerAddress(ctx context.Context, a address.Address) (address.Address, error) { - return a, nil -} - -func (v *FakeStateView) PowerNetworkTotal(_ context.Context) (*NetworkPower, error) { - return v.Power, nil -} - -func (v *FakeStateView) MinerClaimedPower(ctx context.Context, miner address.Address) (abi.StoragePower, abi.StoragePower, error) { - m, ok := v.Miners[miner] - if !ok { - return big.Zero(), big.Zero(), errors.Errorf("no miner %s", miner) - } - return m.ClaimedRawPower, m.ClaimedQAPower, nil -} - -func (v *FakeStateView) MinerPledgeCollateral(_ context.Context, maddr address.Address) (locked abi.TokenAmount, total abi.TokenAmount, err error) { - m, ok := v.Miners[maddr] - if !ok { - return big.Zero(), big.Zero(), errors.Errorf("no miner %s", maddr) - } - return m.PledgeRequirement, m.PledgeBalance, nil -} - -func (v *FakeStateView) MinerDeadlines(ctx context.Context, maddr address.Address) (*miner.Deadlines, error) { - return nil, nil -} - -func (v *FakeStateView) MinerInfo(ctx context.Context, maddr address.Address) (miner.MinerInfo, error) { - m, ok := v.Miners[maddr] - if !ok { - return miner.MinerInfo{}, errors.Errorf("no miner %s", maddr) - } - return miner.MinerInfo{ - Owner: m.Owner, - Worker: m.Worker, - PeerId: m.PeerID, - }, nil -} diff --git a/internal/pkg/state/tipset_viewer.go b/internal/pkg/state/tipset_viewer.go deleted file mode 100644 index b335d03db7..0000000000 --- a/internal/pkg/state/tipset_viewer.go +++ /dev/null @@ -1,36 +0,0 @@ -package state - -import ( - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" -) - -// Abstracts over a store of blockchain state. -type chainStateChainReader interface { - GetTipSetStateRoot(key block.TipSetKey) (cid.Cid, error) -} - -// TipSetStateViewer loads state views for tipsets. -type TipSetStateViewer struct { - // To get the head tipset state root. - chainReader chainStateChainReader - // To load the tree for the head tipset state root. - cst cbor.IpldStore -} - -// NewTipSetStateViewer constructs a TipSetStateViewer. -func NewTipSetStateViewer(chainReader chainStateChainReader, cst cbor.IpldStore) *TipSetStateViewer { - return &TipSetStateViewer{chainReader, cst} -} - -// StateView creates a state view after the application of a tipset's messages. -func (cs TipSetStateViewer) StateView(baseKey block.TipSetKey) (*View, error) { - root, err := cs.chainReader.GetTipSetStateRoot(baseKey) - if err != nil { - return nil, errors.Wrapf(err, "failed to get state root for %s", baseKey.String()) - } - return NewView(cs.cst, root), nil -} diff --git a/internal/pkg/state/view.go b/internal/pkg/state/view.go deleted file mode 100644 index 1c7c5b96b5..0000000000 --- a/internal/pkg/state/view.go +++ /dev/null @@ -1,688 +0,0 @@ -package state - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/sector-storage/ffiwrapper" - - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - notinit "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - paychActor "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" -) - -// Viewer builds state views from state root CIDs. -type Viewer struct { - ipldStore cbor.IpldStore -} - -// NewViewer creates a new state -func NewViewer(store cbor.IpldStore) *Viewer { - return &Viewer{store} -} - -// StateView returns a new state view. -func (c *Viewer) StateView(root cid.Cid) *View { - return NewView(c.ipldStore, root) -} - -// View is a read-only interface to a snapshot of application-level actor state. -// This object interprets the actor state, abstracting the concrete on-chain structures so as to -// hide the complications of protocol versions. -// Exported methods on this type avoid exposing concrete state structures (which may be subject to versioning) -// where possible. -type View struct { - ipldStore cbor.IpldStore - root cid.Cid -} - -// NewView creates a new state view -func NewView(store cbor.IpldStore, root cid.Cid) *View { - return &View{ - ipldStore: store, - root: root, - } -} - -// InitNetworkName Returns the network name from the init actor state. -func (v *View) InitNetworkName(ctx context.Context) (string, error) { - initState, err := v.loadInitActor(ctx) - if err != nil { - return "", err - } - return initState.NetworkName, nil -} - -// InitResolveAddress Returns ID address if public key address is given. -func (v *View) InitResolveAddress(ctx context.Context, a addr.Address) (addr.Address, error) { - if a.Protocol() == addr.ID { - return a, nil - } - - initState, err := v.loadInitActor(ctx) - if err != nil { - return addr.Undef, err - } - - state := ¬init.State{ - AddressMap: initState.AddressMap, - } - return state.ResolveAddress(v.adtStore(ctx), a) -} - -// Returns public key address if id address is given -func (v *View) AccountSignerAddress(ctx context.Context, a addr.Address) (addr.Address, error) { - if a.Protocol() == addr.SECP256K1 || a.Protocol() == addr.BLS { - return a, nil - } - - accountActorState, err := v.loadAccountActor(ctx, a) - if err != nil { - return addr.Undef, err - } - - return accountActorState.Address, nil -} - -// MinerControlAddresses returns the owner and worker addresses for a miner actor -func (v *View) MinerControlAddresses(ctx context.Context, maddr addr.Address) (owner, worker addr.Address, err error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return addr.Undef, addr.Undef, err - } - return minerState.Info.Owner, minerState.Info.Worker, nil -} - -// MinerPeerID returns the PeerID for a miner actor -func (v *View) MinerPeerID(ctx context.Context, maddr addr.Address) (peer.ID, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return "", err - } - return minerState.Info.PeerId, nil -} - -type MinerSectorConfiguration struct { - SealProofType abi.RegisteredProof - SectorSize abi.SectorSize - WindowPoStPartitionSectors uint64 -} - -// MinerSectorConfiguration returns the sector size for a miner actor -func (v *View) MinerSectorConfiguration(ctx context.Context, maddr addr.Address) (*MinerSectorConfiguration, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, err - } - return &MinerSectorConfiguration{ - SealProofType: minerState.Info.SealProofType, - SectorSize: minerState.Info.SectorSize, - WindowPoStPartitionSectors: minerState.Info.WindowPoStPartitionSectors, - }, nil -} - -// MinerSectorCount counts all the on-chain sectors -func (v *View) MinerSectorCount(ctx context.Context, maddr addr.Address) (uint64, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return 0, err - } - sectors, err := v.asArray(ctx, minerState.Sectors) - if err != nil { - return 0, err - } - length := sectors.Length() - return length, nil -} - -// Loads sector info from miner state. -func (v *View) MinerGetSector(ctx context.Context, maddr addr.Address, sectorNum abi.SectorNumber) (*miner.SectorOnChainInfo, bool, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, false, err - } - return minerState.GetSector(v.adtStore(ctx), sectorNum) -} - -// MinerDeadlineInfo returns information relevant to the current proving deadline -func (v *View) MinerDeadlineInfo(ctx context.Context, maddr addr.Address, epoch abi.ChainEpoch) (index uint64, open, close, challenge abi.ChainEpoch, _ error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return 0, 0, 0, 0, err - } - - deadlineInfo := minerState.DeadlineInfo(epoch) - return deadlineInfo.Index, deadlineInfo.Open, deadlineInfo.Close, deadlineInfo.Challenge, nil -} - -// MinerPartitionIndexesForDeadline returns all partitions that need to be proven in the proving period deadline for the given epoch -func (v *View) MinerPartitionIndicesForDeadline(ctx context.Context, maddr addr.Address, deadlineIndex uint64) ([]uint64, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, err - } - - deadlines, err := minerState.LoadDeadlines(v.adtStore(ctx)) - if err != nil { - return nil, err - } - - // compute first partition index - partitionSize := minerState.Info.WindowPoStPartitionSectors - start, sectorCount, err := miner.PartitionsForDeadline(deadlines, partitionSize, deadlineIndex) - if err != nil { - return nil, err - } - - // if deadline contains no sectors, return no partitions - if sectorCount == 0 { - return nil, nil - } - - // compute number of partitions - partitionCount, _, err := miner.DeadlineCount(deadlines, partitionSize, deadlineIndex) - if err != nil { - return nil, err - } - - partitions := make([]uint64, partitionCount) - for i := uint64(0); i < partitionCount; i++ { - partitions[i] = start + i - } - - return partitions, err -} - -// MinerSectorInfoForPartitions retrieves sector info for sectors needed to be proven over for the given proving window partitions. -// NOTE: exposes on-chain structures directly because specs-storage requires it. -func (v *View) MinerSectorInfoForDeadline(ctx context.Context, maddr addr.Address, deadlineIndex uint64, partitions []uint64) ([]abi.SectorInfo, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, err - } - - deadlines, err := minerState.LoadDeadlines(v.adtStore(ctx)) - if err != nil { - return nil, err - } - - // This is copied from miner.Actor SubmitWindowedPoSt. It should be logic in miner.State. - partitionSize := minerState.Info.WindowPoStPartitionSectors - partitionsSectors, err := miner.ComputePartitionsSectors(deadlines, partitionSize, deadlineIndex, partitions) - if err != nil { - return nil, err - } - - provenSectors, err := abi.BitFieldUnion(partitionsSectors...) - if err != nil { - return nil, err - } - - // Extract a fault set relevant to the sectors being submitted, for expansion into a map. - declaredFaults, err := bitfield.IntersectBitField(provenSectors, minerState.Faults) - if err != nil { - return nil, err - } - - declaredRecoveries, err := bitfield.IntersectBitField(declaredFaults, minerState.Recoveries) - if err != nil { - return nil, err - } - - expectedFaults, err := bitfield.SubtractBitField(declaredFaults, declaredRecoveries) - if err != nil { - return nil, err - } - - nonFaults, err := bitfield.SubtractBitField(provenSectors, expectedFaults) - if err != nil { - return nil, err - } - - empty, err := nonFaults.IsEmpty() - if err != nil { - return nil, err - } - - if empty { - return nil, fmt.Errorf("no non-faulty sectors in partitions %+v", partitions) - } - - // Select a non-faulty sector as a substitute for faulty ones. - goodSectorNo, err := nonFaults.First() - if err != nil { - return nil, err - } - - // Load sector infos for proof - sectors, err := minerState.LoadSectorInfosWithFaultMask(v.adtStore(ctx), provenSectors, expectedFaults, abi.SectorNumber(goodSectorNo)) - if err != nil { - return nil, err - } - - out := make([]abi.SectorInfo, len(sectors)) - for i, sector := range sectors { - out[i] = sector.AsSectorInfo() - } - - return out, nil -} - -// MinerSuccessfulPoSts counts how many successful window PoSts have been made this proving period so far. -func (v *View) MinerSuccessfulPoSts(ctx context.Context, maddr addr.Address) (uint64, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return 0, err - } - - return minerState.PostSubmissions.Count() -} - -// MinerDeadlines returns a bitfield of sectors in a proving period -// NOTE: exposes on-chain structures directly because it's referenced directly by the storage-fsm module. -// This is in conflict with the general goal of the state view of hiding the chain state representations from -// consumers in order to support versioning that representation through protocol upgrades. -// See https://github.com/filecoin-project/storage-fsm/issues/13 -func (v *View) MinerDeadlines(ctx context.Context, maddr addr.Address) (*miner.Deadlines, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, err - } - - return minerState.LoadDeadlines(v.adtStore(ctx)) -} - -type MinerSectorStates struct { - Deadlines []*abi.BitField - Faults *abi.BitField - Recoveries *abi.BitField - NewSectors *abi.BitField -} - -func (v *View) MinerSectorStates(ctx context.Context, maddr addr.Address) (*MinerSectorStates, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, err - } - - deadlines, err := minerState.LoadDeadlines(v.adtStore(ctx)) - if err != nil { - return nil, err - } - return &MinerSectorStates{ - Deadlines: deadlines.Due[:], - Faults: minerState.Faults, - Recoveries: minerState.Recoveries, - NewSectors: minerState.NewSectors, - }, nil -} - -// MinerInfo returns information about the next proving period -// NOTE: exposes on-chain structures directly (but not necessary to) -func (v *View) MinerInfo(ctx context.Context, maddr addr.Address) (miner.MinerInfo, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return miner.MinerInfo{}, err - } - - return minerState.Info, err -} - -func (v *View) MinerProvingPeriodStart(ctx context.Context, maddr addr.Address) (abi.ChainEpoch, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return 0, err - } - return minerState.ProvingPeriodStart, nil -} - -// MinerSectorsForEach Iterates over the sectors in a miner's proving set. -func (v *View) MinerSectorsForEach(ctx context.Context, maddr addr.Address, - f func(abi.SectorNumber, cid.Cid, abi.RegisteredProof, []abi.DealID) error) error { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return err - } - - sectors, err := v.asArray(ctx, minerState.Sectors) - if err != nil { - return err - } - - // This version for the new actors - var sector miner.SectorOnChainInfo - return sectors.ForEach(§or, func(secnum int64) error { - // Add more fields here as required by new callers. - return f(sector.Info.SectorNumber, sector.Info.SealedCID, sector.Info.RegisteredProof, sector.Info.DealIDs) - }) -} - -// MinerExists Returns true iff the miner exists. -func (v *View) MinerExists(ctx context.Context, maddr addr.Address) (bool, error) { - _, err := v.loadMinerActor(ctx, maddr) - if err == nil { - return true, nil - } - if err == types.ErrNotFound { - return false, nil - } - return false, err -} - -// MinerFaults Returns all sector ids that are faults -func (v *View) MinerFaults(ctx context.Context, maddr addr.Address) ([]uint64, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, err - } - - return minerState.Faults.All(miner.SectorsMax) -} - -// MinerGetPrecommittedSector Looks up info for a miners precommitted sector. -// NOTE: exposes on-chain structures directly for storage FSM API. -func (v *View) MinerGetPrecommittedSector(ctx context.Context, maddr addr.Address, sectorNum abi.SectorNumber) (*miner.SectorPreCommitOnChainInfo, bool, error) { - minerState, err := v.loadMinerActor(ctx, maddr) - if err != nil { - return nil, false, err - } - - return minerState.GetPrecommittedSector(v.adtStore(ctx), sectorNum) -} - -// MarketEscrowBalance looks up a token amount in the escrow table for the given address -func (v *View) MarketEscrowBalance(ctx context.Context, addr addr.Address) (found bool, amount abi.TokenAmount, err error) { - marketState, err := v.loadMarketActor(ctx) - if err != nil { - return false, abi.NewTokenAmount(0), err - } - - escrow, err := v.asMap(ctx, marketState.EscrowTable) - if err != nil { - return false, abi.NewTokenAmount(0), err - } - - var value abi.TokenAmount - found, err = escrow.Get(adt.AddrKey(addr), &value) - return -} - -// MarketComputeDataCommitment takes deal ids and uses associated commPs to compute commD for a sector that contains the deals -func (v *View) MarketComputeDataCommitment(ctx context.Context, registeredProof abi.RegisteredProof, dealIDs []abi.DealID) (cid.Cid, error) { - marketState, err := v.loadMarketActor(ctx) - if err != nil { - return cid.Undef, err - } - - deals, err := v.asArray(ctx, marketState.Proposals) - if err != nil { - return cid.Undef, err - } - - // map deals to pieceInfo - pieceInfos := make([]abi.PieceInfo, len(dealIDs)) - for i, id := range dealIDs { - var proposal market.DealProposal - found, err := deals.Get(uint64(id), &proposal) - if err != nil { - return cid.Undef, err - } - - if !found { - return cid.Undef, fmt.Errorf("Could not find deal id %d", id) - } - - pieceInfos[i].PieceCID = proposal.PieceCID - pieceInfos[i].Size = proposal.PieceSize - } - - return ffiwrapper.GenerateUnsealedCID(registeredProof, pieceInfos) -} - -// NOTE: exposes on-chain structures directly for storage FSM interface. -func (v *View) MarketDealProposal(ctx context.Context, dealID abi.DealID) (market.DealProposal, error) { - marketState, err := v.loadMarketActor(ctx) - if err != nil { - return market.DealProposal{}, err - } - - deals, err := v.asArray(ctx, marketState.Proposals) - if err != nil { - return market.DealProposal{}, err - } - - var proposal market.DealProposal - found, err := deals.Get(uint64(dealID), &proposal) - if err != nil { - return market.DealProposal{}, err - } - if !found { - return market.DealProposal{}, fmt.Errorf("Could not find deal id %d", dealID) - } - - return proposal, nil -} - -// NOTE: exposes on-chain structures directly for storage FSM and market module interfaces. -func (v *View) MarketDealState(ctx context.Context, dealID abi.DealID) (*market.DealState, bool, error) { - marketState, err := v.loadMarketActor(ctx) - if err != nil { - return nil, false, err - } - - dealStates, err := v.asDealStateArray(ctx, marketState.States) - if err != nil { - return nil, false, err - } - return dealStates.Get(dealID) -} - -// NOTE: exposes on-chain structures directly for market interface. -// The callback receives a pointer to a transient object; take a copy or drop the reference outside the callback. -func (v *View) MarketDealStatesForEach(ctx context.Context, f func(id abi.DealID, state *market.DealState) error) error { - marketState, err := v.loadMarketActor(ctx) - if err != nil { - return err - } - - dealStates, err := v.asDealStateArray(ctx, marketState.States) - if err != nil { - return err - } - - var ds market.DealState - return dealStates.ForEach(&ds, func(dealId int64) error { - return f(abi.DealID(dealId), &ds) - }) -} - -type NetworkPower struct { - RawBytePower abi.StoragePower - QualityAdjustedPower abi.StoragePower - MinerCount int64 - MinPowerMinerCount int64 -} - -// Returns the storage power actor's values for network total power. -func (v *View) PowerNetworkTotal(ctx context.Context) (*NetworkPower, error) { - st, err := v.loadPowerActor(ctx) - if err != nil { - return nil, err - } - return &NetworkPower{ - RawBytePower: st.TotalRawBytePower, - QualityAdjustedPower: st.TotalQualityAdjPower, - MinerCount: st.MinerCount, - MinPowerMinerCount: st.NumMinersMeetingMinPower, - }, nil -} - -// Returns the power of a miner's committed sectors. -func (v *View) MinerClaimedPower(ctx context.Context, miner addr.Address) (raw, qa abi.StoragePower, err error) { - minerResolved, err := v.InitResolveAddress(ctx, miner) - if err != nil { - return big.Zero(), big.Zero(), err - } - powerState, err := v.loadPowerActor(ctx) - if err != nil { - return big.Zero(), big.Zero(), err - } - claim, err := v.loadPowerClaim(ctx, powerState, minerResolved) - if err != nil { - return big.Zero(), big.Zero(), err - } - return claim.RawBytePower, claim.QualityAdjPower, nil -} - -// PaychActorParties returns the From and To addresses for the given payment channel -func (v *View) PaychActorParties(ctx context.Context, paychAddr addr.Address) (from, to addr.Address, err error) { - a, err := v.loadActor(ctx, paychAddr) - if err != nil { - return addr.Undef, addr.Undef, err - } - var state paychActor.State - err = v.ipldStore.Get(ctx, a.Head.Cid, &state) - if err != nil { - return addr.Undef, addr.Undef, err - } - return state.From, state.To, nil -} - -func (v *View) loadPowerClaim(ctx context.Context, powerState *power.State, miner addr.Address) (*power.Claim, error) { - claims, err := v.asMap(ctx, powerState.Claims) - if err != nil { - return nil, err - } - - var claim power.Claim - found, err := claims.Get(adt.AddrKey(miner), &claim) - if err != nil { - return nil, err - } - if !found { - return nil, types.ErrNotFound - } - return &claim, nil -} - -func (v *View) loadInitActor(ctx context.Context) (*notinit.State, error) { - actr, err := v.loadActor(ctx, builtin.InitActorAddr) - if err != nil { - return nil, err - } - var state notinit.State - err = v.ipldStore.Get(ctx, actr.Head.Cid, &state) - return &state, err -} - -func (v *View) loadMinerActor(ctx context.Context, address addr.Address) (*miner.State, error) { - resolvedAddr, err := v.InitResolveAddress(ctx, address) - if err != nil { - return nil, err - } - actr, err := v.loadActor(ctx, resolvedAddr) - if err != nil { - return nil, err - } - var state miner.State - err = v.ipldStore.Get(ctx, actr.Head.Cid, &state) - return &state, err -} - -func (v *View) loadPowerActor(ctx context.Context) (*power.State, error) { - actr, err := v.loadActor(ctx, builtin.StoragePowerActorAddr) - if err != nil { - return nil, err - } - var state power.State - err = v.ipldStore.Get(ctx, actr.Head.Cid, &state) - return &state, err -} - -func (v *View) loadMarketActor(ctx context.Context) (*market.State, error) { - actr, err := v.loadActor(ctx, builtin.StorageMarketActorAddr) - if err != nil { - return nil, err - } - var state market.State - err = v.ipldStore.Get(ctx, actr.Head.Cid, &state) - return &state, err -} - -func (v *View) loadAccountActor(ctx context.Context, a addr.Address) (*account.State, error) { - resolvedAddr, err := v.InitResolveAddress(ctx, a) - if err != nil { - return nil, err - } - actr, err := v.loadActor(ctx, resolvedAddr) - if err != nil { - return nil, err - } - var state account.State - err = v.ipldStore.Get(ctx, actr.Head.Cid, &state) - return &state, err -} - -func (v *View) loadActor(ctx context.Context, address addr.Address) (*actor.Actor, error) { - tree, err := v.asMap(ctx, v.root) - if err != nil { - return nil, err - } - - var actr actor.Actor - found, err := tree.Get(adt.AddrKey(address), &actr) - if !found { - return nil, types.ErrNotFound - } - - return &actr, err -} - -func (v *View) adtStore(ctx context.Context) adt.Store { - return StoreFromCbor(ctx, v.ipldStore) -} - -func (v *View) asArray(ctx context.Context, root cid.Cid) (*adt.Array, error) { - return adt.AsArray(v.adtStore(ctx), root) -} - -func (v *View) asMap(ctx context.Context, root cid.Cid) (*adt.Map, error) { - return adt.AsMap(v.adtStore(ctx), root) -} - -func (v *View) asDealStateArray(ctx context.Context, root cid.Cid) (*market.DealMetaArray, error) { - return market.AsDealStateArray(v.adtStore(ctx), root) -} - -func (v *View) asBalanceTable(ctx context.Context, root cid.Cid) (*adt.BalanceTable, error) { - return adt.AsBalanceTable(v.adtStore(ctx), root) -} - -// StoreFromCbor wraps a cbor ipldStore for ADT access. -func StoreFromCbor(ctx context.Context, ipldStore cbor.IpldStore) adt.Store { - return &cstore{ctx, ipldStore} -} - -type cstore struct { - ctx context.Context - cbor.IpldStore -} - -func (s *cstore) Context() context.Context { - return s.ctx -} diff --git a/internal/pkg/testhelpers/consensus.go b/internal/pkg/testhelpers/consensus.go deleted file mode 100644 index ad0f637b32..0000000000 --- a/internal/pkg/testhelpers/consensus.go +++ /dev/null @@ -1,174 +0,0 @@ -package testhelpers - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - cid "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// RequireSignedTestBlockFromTipSet creates a block with a valid signature by -// the passed in miner work and a Miner field set to the minerAddr. -func RequireSignedTestBlockFromTipSet(t *testing.T, baseTipSet block.TipSet, stateRootCid cid.Cid, receiptRootCid cid.Cid, height abi.ChainEpoch, minerAddr address.Address, minerWorker address.Address, signer types.Signer) *block.Block { - ticket := consensus.MakeFakeTicketForTest() - emptyBLSSig := crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: (*bls.Aggregate([]bls.Signature{}))[:], - } - - b := &block.Block{ - Miner: minerAddr, - Ticket: ticket, - Parents: baseTipSet.Key(), - ParentWeight: types.Uint64ToBig(uint64(height * 10000)), - Height: height, - StateRoot: e.NewCid(stateRootCid), - MessageReceipts: e.NewCid(receiptRootCid), - BLSAggregateSig: &emptyBLSSig, - } - sig, err := signer.SignBytes(context.TODO(), b.SignatureData(), minerWorker) - require.NoError(t, err) - b.BlockSig = &sig - - return b -} - -// FakeBlockValidator passes everything as valid -type FakeBlockValidator struct{} - -// NewFakeBlockValidator createas a FakeBlockValidator that passes everything as valid. -func NewFakeBlockValidator() *FakeBlockValidator { - return &FakeBlockValidator{} -} - -// ValidateHeaderSemantic does nothing. -func (fbv *FakeBlockValidator) ValidateHeaderSemantic(ctx context.Context, child *block.Block, parents block.TipSet) error { - return nil -} - -// ValidateSyntax does nothing. -func (fbv *FakeBlockValidator) ValidateSyntax(ctx context.Context, blk *block.Block) error { - return nil -} - -// ValidateMessagesSyntax does nothing -func (fbv *FakeBlockValidator) ValidateMessagesSyntax(ctx context.Context, messages []*types.SignedMessage) error { - return nil -} - -// ValidateUnsignedMessagesSyntax does nothing -func (fbv *FakeBlockValidator) ValidateUnsignedMessagesSyntax(ctx context.Context, messages []*types.UnsignedMessage) error { - return nil -} - -// ValidateReceiptsSyntax does nothing -func (fbv *FakeBlockValidator) ValidateReceiptsSyntax(ctx context.Context, receipts []vm.MessageReceipt) error { - return nil -} - -// StubBlockValidator is a mockable block validator. -type StubBlockValidator struct { - syntaxStubs map[cid.Cid]error - semanticStubs map[cid.Cid]error -} - -// NewStubBlockValidator creates a StubBlockValidator that allows errors to configured -// for blocks passed to the Validate* methods. -func NewStubBlockValidator() *StubBlockValidator { - return &StubBlockValidator{ - syntaxStubs: make(map[cid.Cid]error), - semanticStubs: make(map[cid.Cid]error), - } -} - -// ValidateHeaderSemantic returns nil or error for stubbed block `child`. -func (mbv *StubBlockValidator) ValidateSemantic(ctx context.Context, child *block.Block, parents *block.TipSet, _ uint64) error { - return mbv.semanticStubs[child.Cid()] -} - -// ValidateSyntax return nil or error for stubbed block `blk`. -func (mbv *StubBlockValidator) ValidateSyntax(ctx context.Context, blk *block.Block) error { - return mbv.syntaxStubs[blk.Cid()] -} - -// StubSyntaxValidationForBlock stubs an error when the ValidateSyntax is called -// on the with the given block. -func (mbv *StubBlockValidator) StubSyntaxValidationForBlock(blk *block.Block, err error) { - mbv.syntaxStubs[blk.Cid()] = err -} - -// StubSemanticValidationForBlock stubs an error when the ValidateHeaderSemantic is called -// on the with the given child block. -func (mbv *StubBlockValidator) StubSemanticValidationForBlock(child *block.Block, err error) { - mbv.semanticStubs[child.Cid()] = err -} - -// NewFakeProcessor creates a processor with a test validator and test rewarder -func NewFakeProcessor() *consensus.DefaultProcessor { - return consensus.NewConfiguredProcessor(vm.DefaultActors, &vm.FakeSyscalls{}, &consensus.FakeChainRandomness{}) -} - -// ApplyTestMessage sends a message directly to the vm, bypassing message -// validation -func ApplyTestMessage(st state.Tree, store vm.Storage, msg *types.UnsignedMessage, bh abi.ChainEpoch) (*consensus.ApplicationResult, error) { - return applyTestMessageWithAncestors(vm.DefaultActors, st, store, msg, bh, nil) -} - -// ApplyTestMessageWithActors sends a message directly to the vm with a given set of builtin actors -func ApplyTestMessageWithActors(actors vm.ActorCodeLoader, st state.Tree, store vm.Storage, msg *types.UnsignedMessage, bh abi.ChainEpoch) (*consensus.ApplicationResult, error) { - return applyTestMessageWithAncestors(actors, st, store, msg, bh, nil) -} - -// ApplyTestMessageWithGas uses the FakeBlockRewarder but the default SignedMessageValidator -func ApplyTestMessageWithGas(actors vm.ActorCodeLoader, st state.Tree, store vm.Storage, msg *types.UnsignedMessage, bh abi.ChainEpoch, minerOwner address.Address) (*consensus.ApplicationResult, error) { - applier := consensus.NewConfiguredProcessor(actors, &vm.FakeSyscalls{}, &consensus.FakeChainRandomness{}) - return newMessageApplier(msg, applier, st, store, bh, minerOwner, nil) -} - -func newMessageApplier(msg *types.UnsignedMessage, processor *consensus.DefaultProcessor, st state.Tree, vms vm.Storage, bh abi.ChainEpoch, minerOwner address.Address, ancestors []block.TipSet) (*consensus.ApplicationResult, error) { - return nil, nil -} - -// CreateAndApplyTestMessageFrom wraps the given parameters in a message and calls ApplyTestMessage. -func CreateAndApplyTestMessageFrom(t *testing.T, st state.Tree, vms vm.Storage, from address.Address, to address.Address, val, bh uint64, method abi.MethodNum, ancestors []block.TipSet, params ...interface{}) (*consensus.ApplicationResult, error) { - t.Helper() - - pdata, err := encoding.Encode(params) - if err != nil { - panic(err) - } - msg := types.NewUnsignedMessage(from, to, 0, types.NewAttoFILFromFIL(val), method, pdata) - return applyTestMessageWithAncestors(vm.DefaultActors, st, vms, msg, abi.ChainEpoch(bh), ancestors) -} - -// CreateAndApplyTestMessage wraps the given parameters in a message and calls -// CreateAndApplyTestMessageFrom sending the message from address.TestAddress -func CreateAndApplyTestMessage(t *testing.T, st state.Tree, vms vm.Storage, to address.Address, val, bh uint64, method abi.MethodNum, ancestors []block.TipSet, params ...interface{}) (*consensus.ApplicationResult, error) { - return CreateAndApplyTestMessageFrom(t, st, vms, address.TestAddress, to, val, bh, method, ancestors, params...) -} - -func applyTestMessageWithAncestors(actors vm.ActorCodeLoader, st state.Tree, store vm.Storage, msg *types.UnsignedMessage, bh abi.ChainEpoch, ancestors []block.TipSet) (*consensus.ApplicationResult, error) { - msg.GasPrice = types.NewGasPrice(1) - msg.GasLimit = gas.NewGas(300) - - ta := newTestApplier(actors) - return newMessageApplier(msg, ta, st, store, bh, address.Undef, ancestors) -} - -func newTestApplier(actors vm.ActorCodeLoader) *consensus.DefaultProcessor { - return consensus.NewConfiguredProcessor(actors, &vm.FakeSyscalls{}, &consensus.FakeChainRandomness{}) -} diff --git a/internal/pkg/testhelpers/core.go b/internal/pkg/testhelpers/core.go deleted file mode 100644 index 3e114be4f5..0000000000 --- a/internal/pkg/testhelpers/core.go +++ /dev/null @@ -1,94 +0,0 @@ -package testhelpers - -import ( - "context" - "errors" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// RequireMakeStateTree takes a map of addresses to actors and stores them on -// the state tree, requiring that all its steps succeed. -func RequireMakeStateTree(t *testing.T, cst cbor.IpldStore, acts map[address.Address]*actor.Actor) (cid.Cid, *state.State) { - ctx := context.Background() - tree := state.NewState(cst) - - for addr, act := range acts { - err := tree.SetActor(ctx, addr, act) - require.NoError(t, err) - } - - c, err := tree.Commit(ctx) - require.NoError(t, err) - - return c, tree -} - -// RequireNewMinerActor creates a new miner actor with the given owner, pledge, and collateral, -// and requires that its steps succeed. -func RequireNewMinerActor(ctx context.Context, t *testing.T, st state.Tree, vms vm.Storage, owner address.Address, pledge uint64, pid peer.ID, coll types.AttoFIL) (*actor.Actor, address.Address) { - // Dragons: re-write using the new actor states structures directly - - return nil, address.Undef -} - -// RequireLookupActor converts the given address to an id address before looking up the actor in the state tree -func RequireLookupActor(ctx context.Context, t *testing.T, st state.Tree, vms vm.Storage, actorAddr address.Address) (*actor.Actor, address.Address) { - // Dragons: delete, nothing outside the vm should be concerned about actor id indexes - - return nil, address.Undef -} - -// RequireNewFakeActor instantiates and returns a new fake actor and requires -// that its steps succeed. -func RequireNewFakeActor(t *testing.T, vms vm.Storage, addr address.Address, codeCid cid.Cid) *actor.Actor { - return RequireNewFakeActorWithTokens(t, vms, addr, codeCid, types.NewAttoFILFromFIL(100)) -} - -// RequireNewFakeActorWithTokens instantiates and returns a new fake actor and requires -// that its steps succeed. -func RequireNewFakeActorWithTokens(t *testing.T, vms vm.Storage, addr address.Address, codeCid cid.Cid, amt types.AttoFIL) *actor.Actor { - - return nil -} - -// RequireNewInitActor instantiates and returns a new init actor -func RequireNewInitActor(t *testing.T, vms vm.Storage) *actor.Actor { - - return nil -} - -// RequireRandomPeerID returns a new libp2p peer ID or panics. -func RequireRandomPeerID(t *testing.T) peer.ID { - pid, err := RandPeerID() - require.NoError(t, err) - return pid -} - -// MockMessagePoolValidator is a mock validator -type MockMessagePoolValidator struct { - Valid bool -} - -// NewMockMessagePoolValidator creates a MockMessagePoolValidator -func NewMockMessagePoolValidator() *MockMessagePoolValidator { - return &MockMessagePoolValidator{Valid: true} -} - -// Validate returns true if the mock validator is set to validate the message -func (v *MockMessagePoolValidator) ValidateSignedMessageSyntax(ctx context.Context, msg *types.SignedMessage) error { - if v.Valid { - return nil - } - return errors.New("mock validation error") -} diff --git a/internal/pkg/testhelpers/iptbtester/genesis.go b/internal/pkg/testhelpers/iptbtester/genesis.go deleted file mode 100644 index 2d89bd4b18..0000000000 --- a/internal/pkg/testhelpers/iptbtester/genesis.go +++ /dev/null @@ -1,169 +0,0 @@ -package iptbtester - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/require" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" -) - -// GenesisInfo chains require information to start a single node with funds -type GenesisInfo struct { - GenesisFile string - KeyFile string - WalletAddress string - MinerAddress string - SectorsDir string - PresealedSectorDir string -} - -type idResult struct { - ID string -} - -// RequireGenesisFromSetup constructs the required information and files to build a single -// filecoin node from a genesis configuration file. The GenesisInfo can be used with MustImportGenesisMiner -func RequireGenesisFromSetup(t *testing.T, dir string, setupPath string) *GenesisInfo { - cfg := ReadSetup(t, setupPath) - return RequireGenesis(t, dir, cfg) -} - -// RequireGenerateGenesis constructs the required information and files to build a single -// filecoin node with the provided funds. The GenesisInfo can be used with MustImportGenesisMiner -func RequireGenerateGenesis(t *testing.T, funds int64, dir string, genesisTime time.Time) *GenesisInfo { - // Setup, generate a genesis and key file - commCfgs, err := gengen.MakeCommitCfgs(1) - require.NoError(t, err) - cfg := &gengen.GenesisCfg{ - Seed: 0, - KeysToGen: 1, - PreallocatedFunds: []string{ - strconv.FormatInt(funds, 10), - }, - Miners: []*gengen.CreateStorageMinerConfig{ - { - Owner: 0, - CommittedSectors: commCfgs, - SealProofType: constants.DevSealProofType, - }, - }, - Network: "gfctest", - Time: uint64(genesisTime.Unix()), - } - - return RequireGenesis(t, dir, cfg) -} - -// ReadSetup reads genesis config from a setup file -func ReadSetup(t *testing.T, setupPath string) *gengen.GenesisCfg { - configFile, err := os.Open(setupPath) - if err != nil { - t.Errorf("failed to open config file %s: %s", setupPath, err) - } - defer configFile.Close() // nolint: errcheck - - var cfg gengen.GenesisCfg - if err := json.NewDecoder(configFile).Decode(&cfg); err != nil { - t.Errorf("failed to parse config: %s", err) - } - return &cfg -} - -// RequireGenesis generates a genesis block and metadata from config -func RequireGenesis(t *testing.T, dir string, cfg *gengen.GenesisCfg) *GenesisInfo { - genfile, err := ioutil.TempFile(dir, "genesis.*.car") - if err != nil { - t.Fatal(err) - } - - keyfile, err := ioutil.TempFile(dir, "wallet.*.key") - if err != nil { - t.Fatal(err) - } - - info, err := gengen.GenGenesisCar(cfg, genfile) - if err != nil { - t.Fatal(err) - } - - minerCfg := info.Miners[0] - minerKeyIndex := minerCfg.Owner - - var wsr commands.WalletSerializeResult - wsr.KeyInfo = append(wsr.KeyInfo, info.Keys[minerKeyIndex]) - if err := json.NewEncoder(keyfile).Encode(wsr); err != nil { - t.Fatal(err) - } - - walletAddr, err := info.Keys[minerKeyIndex].Address() - if err != nil { - t.Fatal(err) - } - - minerAddr := minerCfg.Address - - return &GenesisInfo{ - GenesisFile: genfile.Name(), - KeyFile: keyfile.Name(), - WalletAddress: walletAddr.String(), - MinerAddress: minerAddr.String(), - } -} - -// MustImportGenesisMiner configures a node from the GenesisInfo and starts it mining. -// The node should already be initialized with the GenesisFile, and be should started. -func MustImportGenesisMiner(tn *TestNode, gi *GenesisInfo) { - ctx := context.Background() - - tn.MustRunCmd(ctx, "go-filecoin", "config", "mining.minerAddress", fmt.Sprintf("\"%s\"", gi.MinerAddress)) - - tn.MustRunCmd(ctx, "go-filecoin", "wallet", "import", gi.KeyFile) - - tn.MustRunCmd(ctx, "go-filecoin", "config", "wallet.defaultAddress", fmt.Sprintf("\"%s\"", gi.WalletAddress)) - - // Get node id - id := idResult{} - tn.MustRunCmdJSON(ctx, &id, "go-filecoin", "id") - - // Update miner - tn.MustRunCmd(ctx, "go-filecoin", "miner", "update-peerid", "--from="+gi.WalletAddress, "--gas-price=1", "--gas-limit=300", gi.MinerAddress, id.ID) -} - -// MustInitWithGenesis init TestNode, passing in the `--genesisfile` flag, by calling MustInit -func (tn *TestNode) MustInitWithGenesis(ctx context.Context, genesisinfo *GenesisInfo, args ...string) *TestNode { - genesisfileFlag := fmt.Sprintf("--genesisfile=%s", genesisinfo.GenesisFile) - args = append(args, genesisfileFlag) - - if genesisinfo.KeyFile != "" { - keyfileFlag := fmt.Sprintf("--wallet-keyfile=%s", genesisinfo.KeyFile) - args = append(args, keyfileFlag) - } - - if genesisinfo.MinerAddress != "" { - minerActorAddressFlag := fmt.Sprintf("--miner-actor-address=%s", genesisinfo.MinerAddress) - args = append(args, minerActorAddressFlag) - } - - if genesisinfo.PresealedSectorDir != "" { - presealedSectorDirFlag := fmt.Sprintf("--presealed-sectordir=%s", genesisinfo.PresealedSectorDir) - args = append(args, presealedSectorDirFlag) - } - - if genesisinfo.SectorsDir != "" { - sectorDirFlag := fmt.Sprintf("--sectordir=%s", genesisinfo.SectorsDir) - args = append(args, sectorDirFlag) - } - - tn.MustInit(ctx, args...) - return tn -} diff --git a/internal/pkg/testhelpers/iptbtester/wrapper.go b/internal/pkg/testhelpers/iptbtester/wrapper.go deleted file mode 100644 index b4654067ae..0000000000 --- a/internal/pkg/testhelpers/iptbtester/wrapper.go +++ /dev/null @@ -1,221 +0,0 @@ -package iptbtester - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "reflect" - "testing" - - "github.com/stretchr/testify/require" - - logging "github.com/ipfs/go-log/v2" - testbedi "github.com/ipfs/iptb/testbed/interfaces" - - iptb "github.com/ipfs/iptb/testbed" - - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - - localplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" -) - -var log = logging.Logger("iptbtester") - -func init() { - _, err := iptb.RegisterPlugin(iptb.IptbPlugin{ - From: "", - NewNode: localplugin.NewNode, - PluginName: localplugin.PluginName, - BuiltIn: true, - }, false) - - if err != nil { - panic(err) - } -} - -// NewIPTBTestbed creates an iptb testebed of size `count`. "localfilecoin" or "dockerfilecoin" may be passed -// for `type`. -func NewIPTBTestbed(count int, typ, dir string, attrs map[string]string) (iptb.Testbed, error) { - log.Infof("Creating IPTB Testbed. count: %d, type: %s, dir: %s", count, typ, dir) - tbd, err := ioutil.TempDir("", dir) - if err != nil { - return nil, err - } - - testbed := iptb.NewTestbed(tbd) - - nodeSpecs, err := iptb.BuildSpecs(testbed.Dir(), count, typ, attrs) - if err != nil { - return nil, err - } - - if err := iptb.WriteNodeSpecs(testbed.Dir(), nodeSpecs); err != nil { - return nil, err - } - - return &testbed, nil -} - -// TestNode is a wrapper around an iptb core node interface. -type TestNode struct { - iptb.Testbed - testbedi.Core - - T *testing.T -} - -// NewTestNodes returns `count` TestNodes, and error is returned if a failure is -// encoundered. -func NewTestNodes(t *testing.T, count int, attrs map[string]string) ([]*TestNode, error) { - - if attrs == nil { - attrs = make(map[string]string) - } - - if _, ok := attrs[localplugin.AttrFilecoinBinary]; !ok { - binaryPath, err := th.GetFilecoinBinary() - if err != nil { - return nil, err - } - - attrs[localplugin.AttrFilecoinBinary] = binaryPath - } - - // create a testbed - tb, err := NewIPTBTestbed(count, "localfilecoin", "iptb-testnode", attrs) - if err != nil { - return nil, err - } - - // get the nodes from the testbed - nodes, err := tb.Nodes() - if err != nil { - return nil, err - } - - // we should fail if and ERROR is written to the daemons stderr - - var testnodes []*TestNode - for _, n := range nodes { - - tn := &TestNode{ - Testbed: tb, - Core: n, - T: t, - } - testnodes = append(testnodes, tn) - } - return testnodes, nil -} - -// MustInit inits TestNode, passing `args` to the init command. testing.Fatal is called if initing fails, or exits with -// and exitcode > 0. -func (tn *TestNode) MustInit(ctx context.Context, args ...string) *TestNode { - tn.T.Logf("TestNode[%s] Init with args: %s", tn.String(), args) - out, err := tn.Init(ctx, args...) - // Did IPTB fail to function correctly? - if err != nil { - stdout := new(bytes.Buffer) - _, err := stdout.ReadFrom(out.Stdout()) - require.NoError(tn.T, err) - tn.T.Logf("STDOUT:\n%s\n\n", stdout.String()) - stderr := new(bytes.Buffer) - _, err = stderr.ReadFrom(out.Stderr()) - require.NoError(tn.T, err) - tn.T.Logf("STDERR:\n%s\n\n", stderr.String()) - tn.T.Fatalf("IPTB init function failed: %s", err) - } - // did the command exit with nonstandard exit code? - if out.ExitCode() > 0 { - stdout := new(bytes.Buffer) - _, err := stdout.ReadFrom(out.Stdout()) - require.NoError(tn.T, err) - tn.T.Logf("STDOUT:\n%s\n\n", stdout.String()) - stderr := new(bytes.Buffer) - _, err = stderr.ReadFrom(out.Stderr()) - require.NoError(tn.T, err) - tn.T.Fatalf("TestNode command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - return tn -} - -// MustStart starts TestNode, testing.Fatal is called if starting fails, or exits with -// and exitcode > 0. -func (tn *TestNode) MustStart(ctx context.Context, args ...string) *TestNode { - tn.T.Logf("TestNode[%s] Start with args: %s", tn.String(), args) - out, err := tn.Start(ctx, true, args...) - if err != nil { - tn.T.Fatalf("IPTB start function failed: %s", err) - } - // did the command exit with nonstandard exit code? - if out.ExitCode() > 0 { - tn.T.Fatalf("TestNode command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - return tn -} - -// MustStop stops TestNode, testing.Fatal is called if stopping fails. -func (tn *TestNode) MustStop(ctx context.Context) { - tn.T.Logf("TestNode[%s] Stop", tn.String()) - if err := tn.Stop(ctx); err != nil { - tn.T.Fatalf("IPTB stop function failed: %s", err) - } -} - -// MustConnect will connect TestNode to TestNode `peer`, testing.Fatal will be called -// if connecting fails. -func (tn *TestNode) MustConnect(ctx context.Context, peer *TestNode) { - tn.T.Logf("TestNode[%s] Connect to peer: %s", tn.String(), peer.String()) - if err := tn.Connect(ctx, peer); err != nil { - tn.T.Fatalf("IPTB connect function failed: %s", err) - } -} - -// MustRunCmd runs `args` against TestNode. MustRunCmd returns stderr and stdout after running the command successfully. -// If the command exits with an exitcode > 0, the MustRunCmd will call testing.Fatal and print the error. -func (tn *TestNode) MustRunCmd(ctx context.Context, args ...string) (stdout, stderr string) { - tn.T.Logf("TestNode[%s] RunCmd with args: %s", tn.String(), args) - out, err := tn.RunCmd(ctx, nil, args...) - if err != nil { - tn.T.Fatalf("IPTB runCmd function failed: %s", err) - } - - stdo, err := ioutil.ReadAll(out.Stdout()) - if err != nil { - tn.T.Fatal("Failed to read stdout") - } - stdos := string(stdo) - stde, err := ioutil.ReadAll(out.Stderr()) - if err != nil { - tn.T.Fatal("Failed to read stderr") - } - stdes := string(stde) - - // did the command exit with nonstandard exit code? - if out.ExitCode() > 0 { - tn.T.Fatalf("TestNode command: %s, exited with non-zero exitcode: %d\nStdout: %s\nStderr:%s\n", out.Args(), out.ExitCode(), stdos, stdes) - } - - return stdos, stdes -} - -// MustRunCmdJSON runs `args` against TestNode. The '--enc=json' flag is appened to the command specified by `args`, -// the result of the command is marshaled into `expOut`. -func (tn *TestNode) MustRunCmdJSON(ctx context.Context, expOut interface{}, args ...string) { - args = append(args, "--enc=json") - out, err := tn.RunCmd(ctx, nil, args...) - if err != nil { - tn.T.Fatalf("IPTB runCmd function failed: %s", err) - } - // did the command exit with nonstandard exit code? - if out.ExitCode() > 0 { - tn.T.Fatalf("TestNode command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - dec := json.NewDecoder(out.Stdout()) - if err := dec.Decode(expOut); err != nil { - tn.T.Fatalf("Failed to decode output from command: %s to struct: %s", out.Args(), reflect.TypeOf(expOut).Name()) - } -} diff --git a/internal/pkg/testhelpers/mining.go b/internal/pkg/testhelpers/mining.go deleted file mode 100644 index 7737106f8b..0000000000 --- a/internal/pkg/testhelpers/mining.go +++ /dev/null @@ -1,116 +0,0 @@ -package testhelpers - -import ( - "context" - "crypto/rand" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// BlockTimeTest is the block time used by workers during testing. -const BlockTimeTest = time.Second - -// FakeWorkerPorcelainAPI implements the WorkerPorcelainAPI> -type FakeWorkerPorcelainAPI struct { - blockTime time.Duration - stateView *state.FakeStateView - rnd consensus.ChainRandomness -} - -// NewDefaultFakeWorkerPorcelainAPI returns a FakeWorkerPorcelainAPI. -func NewDefaultFakeWorkerPorcelainAPI(signer address.Address, rnd consensus.ChainRandomness) *FakeWorkerPorcelainAPI { - return &FakeWorkerPorcelainAPI{ - blockTime: BlockTimeTest, - stateView: &state.FakeStateView{ - Power: &state.NetworkPower{ - RawBytePower: big.NewInt(1), - QualityAdjustedPower: big.NewInt(1), - MinerCount: 0, - MinPowerMinerCount: 0, - }, - Miners: map[address.Address]*state.FakeMinerState{}, - }, - rnd: rnd, - } -} - -// NewFakeWorkerPorcelainAPI produces an api suitable to use as the worker's porcelain api. -func NewFakeWorkerPorcelainAPI(rnd consensus.ChainRandomness, totalPower uint64, minerToWorker map[address.Address]address.Address) *FakeWorkerPorcelainAPI { - f := &FakeWorkerPorcelainAPI{ - blockTime: BlockTimeTest, - stateView: &state.FakeStateView{ - Power: &state.NetworkPower{ - RawBytePower: big.NewIntUnsigned(totalPower), - QualityAdjustedPower: big.NewIntUnsigned(totalPower), - MinerCount: 0, - MinPowerMinerCount: 0, - }, - Miners: map[address.Address]*state.FakeMinerState{}, - }, - rnd: rnd, - } - for k, v := range minerToWorker { - f.stateView.Miners[k] = &state.FakeMinerState{ - Owner: v, - Worker: v, - ClaimedRawPower: big.Zero(), - ClaimedQAPower: big.Zero(), - PledgeRequirement: big.Zero(), - PledgeBalance: big.Zero(), - } - } - return f -} - -// BlockTime returns the blocktime FakeWorkerPorcelainAPI is configured with. -func (t *FakeWorkerPorcelainAPI) BlockTime() time.Duration { - return t.blockTime -} - -// PowerStateView returns the state view. -func (t *FakeWorkerPorcelainAPI) PowerStateView(_ block.TipSetKey) (consensus.PowerStateView, error) { - return t.stateView, nil -} - -func (t *FakeWorkerPorcelainAPI) FaultsStateView(_ block.TipSetKey) (consensus.FaultStateView, error) { - return t.stateView, nil -} - -func (t *FakeWorkerPorcelainAPI) SampleChainRandomness(ctx context.Context, head block.TipSetKey, tag acrypto.DomainSeparationTag, - epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - return t.rnd.SampleChainRandomness(ctx, head, tag, epoch, entropy) -} - -// MakeCommitment creates a random commitment. -func MakeCommitment() []byte { - return MakeRandomBytes(32) -} - -// MakeCommitments creates three random commitments for constructing a -// types.Commitments. -func MakeCommitments() types.Commitments { - comms := types.Commitments{} - copy(comms.CommD[:], MakeCommitment()[:]) - copy(comms.CommR[:], MakeCommitment()[:]) - copy(comms.CommRStar[:], MakeCommitment()[:]) - return comms -} - -// MakeRandomBytes generates a randomized byte slice of size 'size' -func MakeRandomBytes(size int) []byte { - comm := make([]byte, size) - if _, err := rand.Read(comm); err != nil { - panic(err) - } - - return comm -} diff --git a/internal/pkg/testhelpers/net.go b/internal/pkg/testhelpers/net.go deleted file mode 100644 index 68f1a922f3..0000000000 --- a/internal/pkg/testhelpers/net.go +++ /dev/null @@ -1,209 +0,0 @@ -package testhelpers - -import ( - "context" - "crypto/rand" - "encoding/binary" - "fmt" - "testing" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/event" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/mux" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/peerstore" - "github.com/libp2p/go-libp2p-core/protocol" - ma "github.com/multiformats/go-multiaddr" - mh "github.com/multiformats/go-multihash" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -var _ host.Host = &FakeHost{} - -// FakeHost is a test host.Host -type FakeHost struct { - ConnectImpl func(context.Context, peer.AddrInfo) error -} - -// NewFakeHost constructs a FakeHost with no other parameters needed -func NewFakeHost() host.Host { - nopfunc := func(_ context.Context, _ peer.AddrInfo) error { return nil } - return &FakeHost{ConnectImpl: nopfunc} -} - -// minimal implementation of host.Host interface - -func (fh *FakeHost) Addrs() []ma.Multiaddr { panic("not implemented") } // nolint: golint -func (fh *FakeHost) Close() error { panic("not implemented") } // nolint: golint -func (fh *FakeHost) ConnManager() connmgr.ConnManager { panic("not implemented") } // nolint: golint -func (fh *FakeHost) Connect(ctx context.Context, pi peer.AddrInfo) error { // nolint: golint - return fh.ConnectImpl(ctx, pi) -} -func (fh *FakeHost) EventBus() event.Bus { panic("not implemented") } //nolint: golint -func (fh *FakeHost) ID() peer.ID { panic("not implemented") } // nolint: golint -func (fh *FakeHost) Network() inet.Network { panic("not implemented") } // nolint: golint -func (fh *FakeHost) Mux() protocol.Switch { panic("not implemented") } // nolint: golint -func (fh *FakeHost) Peerstore() peerstore.Peerstore { panic("not implemented") } // nolint: golint -func (fh *FakeHost) RemoveStreamHandler(protocol.ID) { panic("not implemented") } // nolint: golint -func (fh *FakeHost) SetStreamHandler(protocol.ID, inet.StreamHandler) { panic("not implemented") } // nolint: golint -func (fh *FakeHost) SetStreamHandlerMatch(protocol.ID, func(string) bool, inet.StreamHandler) { // nolint: golint - panic("not implemented") -} - -// NewStream is required for the host.Host interface; returns a new FakeStream. -func (fh *FakeHost) NewStream(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error) { // nolint: golint - return newFakeStream(), nil -} - -var _ inet.Dialer = &FakeDialer{} - -// FakeDialer is a test inet.Dialer -type FakeDialer struct { - PeersImpl func() []peer.ID -} - -// Minimal implementation of the inet.Dialer interface - -// Peers returns a fake inet.Dialer PeersImpl -func (fd *FakeDialer) Peers() []peer.ID { - return fd.PeersImpl() -} -func (fd *FakeDialer) Peerstore() peerstore.Peerstore { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) LocalPeer() peer.ID { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) DialPeer(context.Context, peer.ID) (inet.Conn, error) { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) ClosePeer(peer.ID) error { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) Connectedness(peer.ID) inet.Connectedness { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) Conns() []inet.Conn { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) ConnsToPeer(peer.ID) []inet.Conn { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) Notify(inet.Notifiee) { panic("not implemented") } // nolint: golint -func (fd *FakeDialer) StopNotify(inet.Notifiee) { panic("not implemented") } // nolint: golint - -// fakeStream is a test inet.Stream -type fakeStream struct { - _ mux.MuxedStream - pid protocol.ID -} - -var _ inet.Stream = &fakeStream{} - -func newFakeStream() fakeStream { return fakeStream{} } - -// Minimal implementation of the inet.Stream interface - -func (fs fakeStream) Protocol() protocol.ID { return fs.pid } // nolint: golint -func (fs fakeStream) SetProtocol(id protocol.ID) { fs.pid = id } // nolint: golint -func (fs fakeStream) Stat() inet.Stat { panic("not implemented") } // nolint: golint -func (fs fakeStream) Conn() inet.Conn { panic("not implemented") } // nolint: golint -func (fs fakeStream) Write(_ []byte) (int, error) { return 1, nil } // nolint: golint -func (fs fakeStream) Read(_ []byte) (int, error) { return 1, nil } // nolint: golint -func (fs fakeStream) Close() error { return nil } // nolint: golint -func (fs fakeStream) Reset() error { return nil } // nolint: golint -func (fs fakeStream) SetDeadline(_ time.Time) error { return nil } // nolint: golint -func (fs fakeStream) SetReadDeadline(_ time.Time) error { return nil } // nolint: golint -func (fs fakeStream) SetWriteDeadline(_ time.Time) error { return nil } // nolint: golint - -// RandPeerID is a libp2p random peer ID generator. -// These peer.ID generators were copied from libp2p/go-testutil. We didn't bring in the -// whole repo as a dependency because we only need this small bit. However if we find -// ourselves using more and more pieces we should just take a dependency on it. -func RandPeerID() (peer.ID, error) { - buf := make([]byte, 16) - if n, err := rand.Read(buf); n != 16 || err != nil { - if n != 16 && err == nil { - err = errors.New("couldnt read 16 random bytes") - } - panic(err) - } - h, _ := mh.Sum(buf, mh.SHA2_256, -1) - return peer.ID(h), nil -} - -// RequireIntPeerID takes in an integer and creates a unique peer id for it. -func RequireIntPeerID(t *testing.T, i int64) peer.ID { - buf := make([]byte, 16) - n := binary.PutVarint(buf, i) - h, err := mh.Sum(buf[:n], mh.IDENTITY, -1) - require.NoError(t, err) - pid, err := peer.IDFromBytes(h) - require.NoError(t, err) - return pid -} - -// TestFetcher is an object with the same method set as Fetcher plus a method -// for adding blocks to the source. It is used to implement an object that -// behaves like Fetcher but does not go to the network for use in tests. -type TestFetcher struct { - sourceBlocks map[string]*block.Block // sourceBlocks maps block cid strings to blocks. -} - -// NewTestFetcher returns a TestFetcher with no source blocks. -func NewTestFetcher() *TestFetcher { - return &TestFetcher{ - sourceBlocks: make(map[string]*block.Block), - } -} - -// AddSourceBlocks adds the input blocks to the fetcher source. -func (f *TestFetcher) AddSourceBlocks(blocks ...*block.Block) { - for _, block := range blocks { - f.sourceBlocks[block.Cid().String()] = block - } -} - -// FetchTipSets fetchs the tipset at `tsKey` from the network using the fetchers `sourceBlocks`. -func (f *TestFetcher) FetchTipSets(ctx context.Context, tsKey block.TipSetKey, from peer.ID, done func(t block.TipSet) (bool, error)) ([]block.TipSet, error) { - var out []block.TipSet - cur := tsKey - for { - res, err := f.GetBlocks(ctx, cur.ToSlice()) - if err != nil { - return nil, err - } - - ts, err := block.NewTipSet(res...) - if err != nil { - return nil, err - } - - out = append(out, ts) - ok, err := done(ts) - if err != nil { - return nil, err - } - if ok { - break - } - - cur, err = ts.Parents() - if err != nil { - return nil, err - } - - } - - return out, nil -} - -// FetchTipSetHeaders fetches the tipset at `tsKey` but not messages -func (f *TestFetcher) FetchTipSetHeaders(ctx context.Context, tsKey block.TipSetKey, from peer.ID, done func(t block.TipSet) (bool, error)) ([]block.TipSet, error) { - return f.FetchTipSets(ctx, tsKey, from, done) -} - -// GetBlocks returns any blocks in the source with matching cids. -func (f *TestFetcher) GetBlocks(ctx context.Context, cids []cid.Cid) ([]*block.Block, error) { - var ret []*block.Block - for _, c := range cids { - if block, ok := f.sourceBlocks[c.String()]; ok { - ret = append(ret, block) - } else { - return nil, fmt.Errorf("failed to fetch block: %s", c.String()) - } - } - return ret, nil -} diff --git a/internal/pkg/testhelpers/testflags/flags.go b/internal/pkg/testhelpers/testflags/flags.go deleted file mode 100644 index d0d067530c..0000000000 --- a/internal/pkg/testhelpers/testflags/flags.go +++ /dev/null @@ -1,87 +0,0 @@ -package testflags - -import ( - "flag" - "testing" -) - -// Test enablement flags -// Only run unit and integration tests by default, all others require their flags to be set. -var integrationTest = flag.Bool("integration", true, "Run the integration go tests") -var unitTest = flag.Bool("unit", true, "Run the unit go tests") -var functionalTest = flag.Bool("functional", false, "Run the functional go tests") -var sectorBuilderTest = flag.Bool("sectorbuilder", false, "Run the sector builder tests") -var deploymentTest = flag.String("deployment", "", "Run the deployment tests against a network") -var binaryPath = flag.String("binary-path", "", "Run forked processes tests using provided binary") - -// BinaryPath will return the path to the user provided binary. The call is expected to check if -// the return path points to an actual file. If the user did not provide a value an empty string -// will be returned along with a false for the second return value. -func BinaryPath() (string, bool) { - if len(*binaryPath) == 0 { - return "", false - } - - return *binaryPath, true -} - -// DeploymentTest will run the test its called from iff the `-deployment` flag -// is passed when calling `go test`. Otherwise the test will be skipped. DeploymentTest -// will run the test its called from in parallel. -// The network under test will be returned. -func DeploymentTest(t *testing.T) string { - if len(*deploymentTest) == 0 { - t.SkipNow() - } - t.Parallel() - - return *deploymentTest -} - -// FunctionalTest will run the test its called from iff the `-functional` flag -// is passed when calling `go test`. Otherwise the test will be skipped. FunctionalTest -// will run the test its called from in parallel. -func FunctionalTest(t *testing.T) { - if !*functionalTest { - t.SkipNow() - } - t.Parallel() -} - -// IntegrationTest will run the test its called from iff the `-integration` flag -// is passed when calling `go test`. Otherwise the test will be skipped. IntegrationTest -// will run the test its called from in parallel. -func IntegrationTest(t *testing.T) { - if !*integrationTest { - t.SkipNow() - } - //t.Parallel() -} - -// UnitTest will run the test its called from iff the `-unit` or `-short` flag -// is passed when calling `go test`. Otherwise the test will be skipped. UnitTest -// will run the test its called from in parallel. -func UnitTest(t *testing.T) { - if !*unitTest && !testing.Short() { - t.SkipNow() - } - // t.Parallel() -} - -// BadUnitTestWithSideEffects will run the test its called from iff the -// `-unit` or `-short` flag is passed when calling `go test`. Otherwise the test -// will be skipped. BadUnitTestWithSideEffects will run the test its called -// serially. Tests that use this flag are bad an should feel bad. -func BadUnitTestWithSideEffects(t *testing.T) { - if !*unitTest && !testing.Short() { - t.SkipNow() - } -} - -// SectorBuilderTest will run the test its called from iff the `-sectorbuilder` flag -// is passed when calling `go test`. Otherwise the test will be skipped. -func SectorBuilderTest(t *testing.T) { - if !*sectorBuilderTest { - t.SkipNow() - } -} diff --git a/internal/pkg/testhelpers/util.go b/internal/pkg/testhelpers/util.go deleted file mode 100644 index d98e2ddbaa..0000000000 --- a/internal/pkg/testhelpers/util.go +++ /dev/null @@ -1,95 +0,0 @@ -package testhelpers - -import ( - "fmt" - "net" - "os" - "sync" - "time" - - "github.com/filecoin-project/go-filecoin/build/project" - "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -// GetFreePort gets a free port from the kernel -// Credit: https://github.com/phayes/freeport -func GetFreePort() (int, error) { - addr, err := net.ResolveTCPAddr("tcp", "0.0.0.0:0") - if err != nil { - return 0, err - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return 0, err - } - defer l.Close() // nolint: errcheck - return l.Addr().(*net.TCPAddr).Port, nil -} - -// MustGetFilecoinBinary returns the path where the filecoin binary will be if it has been built and panics otherwise. -func MustGetFilecoinBinary() string { - path, err := GetFilecoinBinary() - if err != nil { - panic(err) - } - - return path -} - -// GetFilecoinBinary returns the path where the filecoin binary will be if it has been built -func GetFilecoinBinary() (string, error) { - bin, provided := testflags.BinaryPath() - if !provided { - bin = project.Root("go-filecoin") - } - - _, err := os.Stat(bin) - if err != nil { - return "", err - } - - if os.IsNotExist(err) { - return "", err - } - - return bin, nil -} - -// WaitForIt waits until the given callback returns true. -func WaitForIt(count int, delay time.Duration, cb func() (bool, error)) error { - var done bool - var err error - for i := 0; i < count; i++ { - done, err = cb() - if err != nil { - return err - } - if done { - break - } - time.Sleep(delay) - } - - if !done { - return fmt.Errorf("timeout waiting for it") - } - - return nil -} - -// WaitTimeout waits for the waitgroup for the specified max timeout. -// Returns true if waiting timed out. -func WaitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { - c := make(chan struct{}) - go func() { - defer close(c) - wg.Wait() - }() - select { - case <-c: - return false // completed normally - case <-time.After(timeout): - return true // timed out - } -} diff --git a/internal/pkg/types/atto_fil.go b/internal/pkg/types/atto_fil.go deleted file mode 100644 index f069737611..0000000000 --- a/internal/pkg/types/atto_fil.go +++ /dev/null @@ -1,104 +0,0 @@ -package types - -import ( - "fmt" - "math/big" - "strings" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/specs-actors/actors/abi" - specsabi "github.com/filecoin-project/specs-actors/actors/abi" - specsbig "github.com/filecoin-project/specs-actors/actors/abi/big" -) - -var attoPower = 18 -var tenToTheEighteen = specsbig.Exp(specsbig.NewInt(10), specsbig.NewInt(18)) - -// ZeroAttoFIL is the zero value for an AttoFIL, exported for consistency in construction of AttoFILs -var ZeroAttoFIL = specsbig.Zero() - -// AttoFIL represents a signed multi-precision integer quantity of -// attofilecoin (atto is metric for 10**-18). The zero value for -// AttoFIL represents the value 0. -// -// Reasons for embedding a big.Int instead of *big.Int: -// - We don't have check for nil in every method that does calculations. -// - Serialization "symmetry" when serializing AttoFIL{}. -type AttoFIL = specsabi.TokenAmount - -// NewAttoFIL allocates and returns a new AttoFIL set to x. -func NewAttoFIL(x *big.Int) AttoFIL { - return specsbig.Int{Int: x} -} - -// NewAttoFILFromFIL returns a new AttoFIL representing a quantity -// of attofilecoin equal to x filecoin. -func NewAttoFILFromFIL(x uint64) AttoFIL { - xAsBigInt := specsbig.NewIntUnsigned(x) - return specsbig.Mul(xAsBigInt, tenToTheEighteen) -} - -var tenToTheEighteenTokens = specsbig.Exp(specsbig.NewInt(10), specsbig.NewInt(18)) - -// NewAttoTokenFromToken should be moved when we cleanup the types -// Dragons: clean up and likely move to specs-actors -func NewAttoTokenFromToken(x uint64) abi.TokenAmount { - xAsBigInt := abi.NewTokenAmount(0) - xAsBigInt.SetUint64(x) - return specsbig.Mul(xAsBigInt, tenToTheEighteenTokens) -} - -// NewAttoFILFromBytes allocates and returns a new AttoFIL set -// to the value of buf as the bytes of a big-endian unsigned integer. -func NewAttoFILFromBytes(buf []byte) (AttoFIL, error) { - var af AttoFIL - err := encoding.Decode(buf, &af) - if err != nil { - return af, err - } - return af, nil -} - -// NewAttoFILFromFILString allocates a new AttoFIL set to the value of s filecoin, -// interpreted as a decimal in base 10, and returns it and a boolean indicating success. -func NewAttoFILFromFILString(s string) (AttoFIL, bool) { - splitNumber := strings.Split(s, ".") - // If '.' is absent from string, add an empty string to become the decimal part - if len(splitNumber) == 1 { - splitNumber = append(splitNumber, "") - } - intPart := splitNumber[0] - decPart := splitNumber[1] - // A decimal part longer than 18 digits should be an error - if len(decPart) > attoPower || len(splitNumber) > 2 { - return ZeroAttoFIL, false - } - // The decimal is right padded with 0's if it less than 18 digits long - for len(decPart) < attoPower { - decPart += "0" - } - - return NewAttoFILFromString(intPart+decPart, 10) -} - -// NewAttoFILFromString allocates a new AttoFIL set to the value of s attofilecoin, -// interpreted in the given base, and returns it and a boolean indicating success. -func NewAttoFILFromString(s string, base int) (AttoFIL, bool) { - out := specsbig.NewInt(0) - _, isErr := out.Int.SetString(s, base) - return out, isErr -} - -// BigToUint64 converts a big Int to a uint64. It will error if -// the Int is too big to fit into 64 bits or is negative -func BigToUint64(bi specsbig.Int) (uint64, error) { - if !bi.Int.IsUint64() { - return 0, fmt.Errorf("Int: %s could not be represented as uint64", bi.String()) - } - return bi.Uint64(), nil -} - -// Uint64ToBig converts a uint64 to a big Int. Precodition: don't overflow int64. -func Uint64ToBig(u uint64) specsbig.Int { - return specsbig.NewInt(int64(u)) -} diff --git a/internal/pkg/types/atto_fil_test.go b/internal/pkg/types/atto_fil_test.go deleted file mode 100644 index e454a2eb49..0000000000 --- a/internal/pkg/types/atto_fil_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package types - -import ( - "encoding/json" - "math/big" - "math/rand" - "testing" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - specsbig "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func BigIntFromString(s string) big.Int { - bigInt, _ := new(big.Int).SetString(s, 10) - return *bigInt -} - -func TestFILToAttoFIL(t *testing.T) { - tf.UnitTest(t) - - x := NewAttoFILFromFIL(2) - v := big.NewInt(10) - v = v.Exp(v, big.NewInt(18), nil) - v = v.Mul(v, big.NewInt(2)) - assert.True(t, NewAttoFIL(v).Equals(x)) -} - -func TestAttoFILCreation(t *testing.T) { - tf.UnitTest(t) - - a := NewAttoFILFromFIL(123) - assert.IsType(t, AttoFIL{}, a) - - ab, err := encoding.Encode(a) - require.NoError(t, err) - b, err := NewAttoFILFromBytes(ab) - require.NoError(t, err) - assert.Equal(t, a, b) - - as := a.String() - assert.Equal(t, as, "123000000000000000000") - c, ok := NewAttoFILFromString(as, 10) - assert.True(t, ok) - assert.Equal(t, a, c) - - _, ok = NewAttoFILFromFILString("asdf") - assert.False(t, ok) -} - -func TestZeroAttoFIL(t *testing.T) { - tf.UnitTest(t) - - z := NewAttoFILFromFIL(0) - assert.True(t, ZeroAttoFIL.Equals(z)) -} - -func TestAttoFILComparison(t *testing.T) { - tf.UnitTest(t) - - a := NewAttoFILFromFIL(123) - b := NewAttoFILFromFIL(123) - c := NewAttoFILFromFIL(456) - - t.Run("handles comparison", func(t *testing.T) { - assert.True(t, a.Equals(b)) - assert.True(t, b.Equals(a)) - - assert.False(t, a.Equals(c)) - assert.False(t, c.Equals(a)) - - assert.True(t, a.LessThan(c)) - assert.True(t, a.LessThanEqual(c)) - assert.True(t, c.GreaterThan(a)) - assert.True(t, c.GreaterThanEqual(a)) - assert.True(t, a.GreaterThanEqual(b)) - assert.True(t, a.LessThanEqual(b)) - }) - - t.Run("treats ZeroAttoFIL as zero", func(t *testing.T) { - d := specsbig.Sub(ZeroAttoFIL, a) - zeroValue := NewAttoFILFromFIL(0) - - assert.True(t, zeroValue.Equals(ZeroAttoFIL)) - assert.True(t, ZeroAttoFIL.Equals(zeroValue)) - assert.True(t, d.LessThan(zeroValue)) - assert.True(t, zeroValue.GreaterThan(d)) - assert.True(t, c.GreaterThan(zeroValue)) - assert.True(t, zeroValue.LessThan(c)) - }) -} - -func TestAttoFILAddition(t *testing.T) { - tf.UnitTest(t) - - a := NewAttoFILFromFIL(123) - b := NewAttoFILFromFIL(456) - - t.Run("handles addition", func(t *testing.T) { - aStr := a.String() - bStr := b.String() - sum := specsbig.Add(a, b) - - assert.Equal(t, NewAttoFILFromFIL(579), sum) - - // Storage is not reused - assert.NotEqual(t, &a, &sum) - assert.NotEqual(t, &b, &sum) - - // Values have not changed. - assert.Equal(t, aStr, a.String()) - assert.Equal(t, bStr, b.String()) - }) - - t.Run("treats ZeroAttoFIL as zero", func(t *testing.T) { - assert.True(t, specsbig.Add(ZeroAttoFIL, a).Equals(a)) - assert.True(t, specsbig.Add(a, ZeroAttoFIL).Equals(a)) - }) -} - -func TestAttoFILSubtraction(t *testing.T) { - tf.UnitTest(t) - - a := NewAttoFILFromFIL(456) - b := NewAttoFILFromFIL(123) - - t.Run("handles subtraction", func(t *testing.T) { - aStr := a.String() - bStr := b.String() - delta := specsbig.Sub(a, b) - - assert.Equal(t, delta, NewAttoFILFromFIL(333)) - - // Storage is not reused - assert.NotEqual(t, &a, &delta) - assert.NotEqual(t, &b, &delta) - - // Values have not changed. - assert.Equal(t, aStr, a.String()) - assert.Equal(t, bStr, b.String()) - }) - - t.Run("treats ZeroAttoFIL as zero", func(t *testing.T) { - assert.True(t, specsbig.Sub(a, ZeroAttoFIL).Equals(a)) - assert.True(t, specsbig.Sub(ZeroAttoFIL, ZeroAttoFIL).Equals(ZeroAttoFIL)) - }) -} - -func TestAttoFILCborMarshaling(t *testing.T) { - tf.UnitTest(t) - - t.Run("CBOR decode(encode(AttoFIL)) == identity(AttoFIL)", func(t *testing.T) { - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - for i := 0; i < 100; i++ { - preEncode := NewAttoFILFromFIL(rng.Uint64()) - postDecode := AttoFIL{} - - out, err := encoding.Encode(preEncode) - assert.NoError(t, err) - - err = encoding.Decode(out, &postDecode) - assert.NoError(t, err) - - assert.True(t, preEncode.Equals(postDecode), "pre: %s post: %s", preEncode.String(), postDecode.String()) - } - }) - t.Run("CBOR encodes zero val as ZeroAttoFIL", func(t *testing.T) { - var np AttoFIL - - out, err := encoding.Encode(np) - assert.NoError(t, err) - - out2, err := encoding.Encode(ZeroAttoFIL) - assert.NoError(t, err) - - assert.Equal(t, out, out2) - }) -} - -func TestAttoFILJsonMarshaling(t *testing.T) { - tf.UnitTest(t) - - t.Run("JSON unmarshal(marshal(AttoFIL)) == identity(AttoFIL)", func(t *testing.T) { - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - for i := 0; i < 100; i++ { - toBeMarshaled := NewAttoFILFromFIL(rng.Uint64()) - - marshaled, err := json.Marshal(toBeMarshaled) - assert.NoError(t, err) - - var unmarshaled AttoFIL - err = json.Unmarshal(marshaled, &unmarshaled) - assert.NoError(t, err) - - assert.True(t, toBeMarshaled.Equals(unmarshaled), "should be equal - toBeMarshaled: %s unmarshaled: %s)", toBeMarshaled.String(), unmarshaled.String()) - } - }) - - t.Run("unmarshal(marshal(AttoFIL)) == AttoFIL for decimal FIL", func(t *testing.T) { - toBeMarshaled, _ := NewAttoFILFromFILString("912129289198393.123456789012345678") - - marshaled, err := json.Marshal(toBeMarshaled) - assert.NoError(t, err) - - var unmarshaled AttoFIL - err = json.Unmarshal(marshaled, &unmarshaled) - assert.NoError(t, err) - - assert.True(t, toBeMarshaled.Equals(unmarshaled), "should be equal - toBeMarshaled: %s unmarshaled: %s)", toBeMarshaled.String(), unmarshaled.String()) - }) - - t.Run("cannot JSON marshall nil as *AttoFIL", func(t *testing.T) { - var np *AttoFIL - - out, err := json.Marshal(np) - assert.NoError(t, err) - - out2, err := json.Marshal(ZeroAttoFIL) - assert.NoError(t, err) - - assert.NotEqual(t, out, out2) - }) -} - -func TestAttoFILIsZero(t *testing.T) { - tf.UnitTest(t) - - p := NewAttoFILFromFIL(100) // positive - z := NewAttoFILFromFIL(0) // zero - n := specsbig.Sub(NewAttoFILFromFIL(0), p) // negative - - t.Run("returns true if zero token", func(t *testing.T) { - assert.True(t, z.IsZero()) - assert.True(t, ZeroAttoFIL.IsZero()) - }) - - t.Run("returns false if greater than zero token", func(t *testing.T) { - assert.False(t, p.IsZero()) - }) - - t.Run("returns false if less than zero token", func(t *testing.T) { - assert.False(t, n.IsZero()) - }) -} - -func TestString(t *testing.T) { - tf.UnitTest(t) - - // A very large number of attoFIL - attoFIL, _ := new(big.Int).SetString("912129289198393123456789012345678", 10) - assert.Equal(t, "912129289198393123456789012345678", NewAttoFIL(attoFIL).String()) - - // A multiple of 1000 attoFIL - attoFIL, _ = new(big.Int).SetString("9123372036854775000", 10) - assert.Equal(t, "9123372036854775000", NewAttoFIL(attoFIL).String()) - - // Less than 10^18 attoFIL - attoFIL, _ = new(big.Int).SetString("36854775878", 10) - assert.Equal(t, "36854775878", NewAttoFIL(attoFIL).String()) - - // A multiple of 100 attFIL that is less than 10^18 - attoFIL, _ = new(big.Int).SetString("36854775800", 10) - assert.Equal(t, "36854775800", NewAttoFIL(attoFIL).String()) - - // A number of attFIL that is an integer number of FIL - attoFIL, _ = new(big.Int).SetString("123000000000000000000", 10) - assert.Equal(t, "123000000000000000000", NewAttoFIL(attoFIL).String()) -} - -func TestNewAttoFILFromFILString(t *testing.T) { - tf.UnitTest(t) - - t.Run("parses legitimate values correctly", func(t *testing.T) { - attoFIL, _ := NewAttoFILFromFILString(".12345") - assert.Equal(t, BigIntFromString("123450000000000000"), *attoFIL.Int) - - attoFIL, _ = NewAttoFILFromFILString("000000.000000") - assert.Equal(t, BigIntFromString("0"), *attoFIL.Int) - - attoFIL, _ = NewAttoFILFromFILString("0000.12345") - assert.Equal(t, BigIntFromString("123450000000000000"), *attoFIL.Int) - - attoFIL, _ = NewAttoFILFromFILString("12345.0") - assert.Equal(t, BigIntFromString("12345000000000000000000"), *attoFIL.Int) - - attoFIL, _ = NewAttoFILFromFILString("12345") - assert.Equal(t, BigIntFromString("12345000000000000000000"), *attoFIL.Int) - }) - - t.Run("rejects nonsense values", func(t *testing.T) { - _, ok := NewAttoFILFromFILString("notanumber") - assert.False(t, ok) - - _, ok = NewAttoFILFromFILString("384042.wat") - assert.False(t, ok) - - _, ok = NewAttoFILFromFILString("78wat") - assert.False(t, ok) - - _, ok = NewAttoFILFromFILString("1234567890abcde") - assert.False(t, ok) - - _, ok = NewAttoFILFromFILString("127.0.0.1") - assert.False(t, ok) - }) -} diff --git a/internal/pkg/types/commitments.go b/internal/pkg/types/commitments.go deleted file mode 100644 index 0e44090802..0000000000 --- a/internal/pkg/types/commitments.go +++ /dev/null @@ -1,35 +0,0 @@ -package types - -// Commitments is a struct containing the replica and data commitments produced -// when sealing a sector. -type Commitments struct { - CommD *CommD - CommR *CommR - CommRStar *CommRStar -} - -// PoStChallengeSeedBytesLen is the number of bytes in the Proof of SpaceTime challenge seed. -const PoStChallengeSeedBytesLen uint = 32 - -// CommitmentBytesLen is the number of bytes in a CommR, CommD, CommP, and CommRStar. -const CommitmentBytesLen uint = 32 - -// PoStChallengeSeed is an input to the proof-of-spacetime generation and verification methods. -type PoStChallengeSeed [PoStChallengeSeedBytesLen]byte - -// CommR is the merkle root of the replicated data. It is an output of the -// sector sealing (PoRep) process. -type CommR [CommitmentBytesLen]byte - -// CommD is the merkle root of the original user data. It is an output of the -// sector sealing (PoRep) process. -type CommD [CommitmentBytesLen]byte - -// CommP is the merkle root of a piece of data included within the original user data. It is -// generated by the client, and the miner must generated a piece inclusion proof from CommP -// to CommD. -type CommP [CommitmentBytesLen]byte - -// CommRStar is a hash of intermediate layers. It is an output of the sector -// sealing (PoRep) process. -type CommRStar [CommitmentBytesLen]byte diff --git a/internal/pkg/types/commitments_test.go b/internal/pkg/types/commitments_test.go deleted file mode 100644 index 1737ea84c9..0000000000 --- a/internal/pkg/types/commitments_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package types_test - -import ( - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - . "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/util/convert" - - "github.com/stretchr/testify/assert" -) - -func TestEncodingZeroVal(t *testing.T) { - t.Skip("cbor fix") - comms := Commitments{} - data, err := encoding.Encode(comms) - assert.NoError(t, err) - var newComms Commitments - err = encoding.Decode(data, &newComms) - assert.NoError(t, err) -} - -func TestEncoding(t *testing.T) { - t.Skip("cbor fix") - var comms Commitments - - commR := CommR(convert.To32ByteArray([]byte{0xf})) - commD := CommD(convert.To32ByteArray([]byte{0xa})) - commRStar := CommRStar(convert.To32ByteArray([]byte{0xc})) - - comms.CommR = &commR - comms.CommD = &commD - comms.CommRStar = &commRStar - - data, err := encoding.Encode(comms) - assert.NoError(t, err) - var newComms Commitments - err = encoding.Decode(data, &newComms) - assert.NoError(t, err) -} diff --git a/internal/pkg/types/message.go b/internal/pkg/types/message.go deleted file mode 100644 index dfc0f1f751..0000000000 --- a/internal/pkg/types/message.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math/big" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/specs-actors/actors/abi" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" - errPkg "github.com/pkg/errors" - typegen "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -const MessageVersion = 0 - -// BlockGasLimit is the maximum amount of gas that can be used to execute messages in a single block. -var BlockGasLimit = gas.NewGas(100e6) - -// EmptyMessagesCID is the cid of an empty collection of messages. -var EmptyMessagesCID cid.Cid - -// EmptyReceiptsCID is the cid of an empty collection of receipts. -var EmptyReceiptsCID cid.Cid - -// EmptyTxMetaCID is the cid of a TxMeta wrapping empty cids -var EmptyTxMetaCID cid.Cid - -func init() { - tmpCst := cborutil.NewIpldStore(blockstore.NewBlockstore(datastore.NewMapDatastore())) - emptyAMTCid, err := amt.FromArray(context.Background(), tmpCst, []typegen.CBORMarshaler{}) - if err != nil { - panic("could not create CID for empty AMT") - } - EmptyMessagesCID = emptyAMTCid - EmptyReceiptsCID = emptyAMTCid - EmptyTxMetaCID, err = tmpCst.Put(context.Background(), TxMeta{SecpRoot: e.NewCid(EmptyMessagesCID), BLSRoot: e.NewCid(EmptyMessagesCID)}) - if err != nil { - panic("could not create CID for empty TxMeta") - } -} - -// UnsignedMessage is an exchange of information between two actors modeled -// as a function call. -type UnsignedMessage struct { - // control field for encoding struct as an array - _ struct{} `cbor:",toarray"` - - Version int64 `json:"version"` - - To address.Address `json:"to"` - From address.Address `json:"from"` - // When receiving a message from a user account the nonce in - // the message must match the expected nonce in the from actor. - // This prevents replay attacks. - CallSeqNum uint64 `json:"callSeqNum"` - - Value AttoFIL `json:"value"` - - GasPrice AttoFIL `json:"gasPrice"` - GasLimit gas.Unit `json:"gasLimit"` - - Method abi.MethodNum `json:"method"` - Params []byte `json:"params"` - // Pay attention to Equals() if updating this struct. -} - -// NewUnsignedMessage creates a new message. -func NewUnsignedMessage(from, to address.Address, nonce uint64, value AttoFIL, method abi.MethodNum, params []byte) *UnsignedMessage { - return &UnsignedMessage{ - Version: MessageVersion, - To: to, - From: from, - CallSeqNum: nonce, - Value: value, - Method: method, - Params: params, - } -} - -// NewMeteredMessage adds gas price and gas limit to the message -func NewMeteredMessage(from, to address.Address, nonce uint64, value AttoFIL, method abi.MethodNum, params []byte, price AttoFIL, limit gas.Unit) *UnsignedMessage { - return &UnsignedMessage{ - Version: MessageVersion, - To: to, - From: from, - CallSeqNum: nonce, - Value: value, - GasPrice: price, - GasLimit: limit, - Method: method, - Params: params, - } -} - -// Unmarshal a message from the given bytes. -func (msg *UnsignedMessage) Unmarshal(b []byte) error { - return encoding.Decode(b, msg) -} - -// Marshal the message into bytes. -func (msg *UnsignedMessage) Marshal() ([]byte, error) { - return encoding.Encode(msg) -} - -// ToNode converts the Message to an IPLD node. -func (msg *UnsignedMessage) ToNode() (ipld.Node, error) { - data, err := encoding.Encode(msg) - if err != nil { - return nil, err - } - c, err := constants.DefaultCidBuilder.Sum(data) - if err != nil { - return nil, err - } - - blk, err := blocks.NewBlockWithCid(data, c) - if err != nil { - return nil, err - } - obj, err := cbor.DecodeBlock(blk) - if err != nil { - return nil, err - } - - return obj, nil -} - -// Cid returns the canonical CID for the message. -// TODO: can we avoid returning an error? -func (msg *UnsignedMessage) Cid() (cid.Cid, error) { - obj, err := msg.ToNode() - if err != nil { - return cid.Undef, errPkg.Wrap(err, "failed to marshal to cbor") - } - - return obj.Cid(), nil -} - -// OnChainLen returns the amount of bytes used to represent the message on chain. -func (msg *UnsignedMessage) OnChainLen() int { - bits, err := encoding.Encode(msg) - if err != nil { - panic(err) - } - return len(bits) -} - -func (msg *UnsignedMessage) String() string { - errStr := "(error encoding Message)" - cid, err := msg.Cid() - if err != nil { - return errStr - } - js, err := json.MarshalIndent(msg, "", " ") - if err != nil { - return errStr - } - return fmt.Sprintf("Message cid=[%v]: %s", cid, string(js)) -} - -// Equals tests whether two messages are equal -func (msg *UnsignedMessage) Equals(other *UnsignedMessage) bool { - return msg.To == other.To && - msg.From == other.From && - msg.CallSeqNum == other.CallSeqNum && - msg.Value.Equals(other.Value) && - msg.Method == other.Method && - msg.GasPrice.Equals(other.GasPrice) && - msg.GasLimit == other.GasLimit && - bytes.Equal(msg.Params, other.Params) -} - -// NewGasPrice constructs a gas price (in AttoFIL) from the given number. -func NewGasPrice(price int64) AttoFIL { - return NewAttoFIL(big.NewInt(price)) -} - -// TxMeta tracks the merkleroots of both secp and bls messages separately -type TxMeta struct { - _ struct{} `cbor:",toarray"` - BLSRoot e.Cid `json:"blsRoot"` - SecpRoot e.Cid `json:"secpRoot"` -} - -// String returns a readable printing string of TxMeta -func (m TxMeta) String() string { - return fmt.Sprintf("secp: %s, bls: %s", m.SecpRoot.String(), m.BLSRoot.String()) -} diff --git a/internal/pkg/types/message_test.go b/internal/pkg/types/message_test.go deleted file mode 100644 index 9587ca8bf0..0000000000 --- a/internal/pkg/types/message_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package types - -import ( - "reflect" - "testing" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -func TestMessageMarshal(t *testing.T) { - tf.UnitTest(t) - - addrGetter := vmaddr.NewForTestGetter() - msg := NewMeteredMessage( - addrGetter(), - addrGetter(), - 42, - NewAttoFILFromFIL(17777), - builtin.MethodSend, - []byte("foobar"), - NewAttoFILFromFIL(3), - gas.NewGas(4), - ) - - // This check requests that you add a non-zero value for new fields above, - // then update the field count below. - require.Equal(t, 10, reflect.TypeOf(*msg).NumField()) - - marshalled, err := msg.Marshal() - assert.NoError(t, err) - - msgBack := UnsignedMessage{} - assert.False(t, msg.Equals(&msgBack)) - - err = msgBack.Unmarshal(marshalled) - assert.NoError(t, err) - - assert.Equal(t, msg.Version, msgBack.Version) - assert.Equal(t, msg.To, msgBack.To) - assert.Equal(t, msg.From, msgBack.From) - assert.Equal(t, msg.Value, msgBack.Value) - assert.Equal(t, msg.Method, msgBack.Method) - assert.Equal(t, msg.Params, msgBack.Params) - assert.Equal(t, msg.GasLimit, msgBack.GasLimit) - assert.Equal(t, msg.GasPrice, msgBack.GasPrice) - assert.True(t, msg.Equals(&msgBack)) -} - -func TestMessageCid(t *testing.T) { - tf.UnitTest(t) - - addrGetter := vmaddr.NewForTestGetter() - - msg1 := NewUnsignedMessage( - addrGetter(), - addrGetter(), - 0, - NewAttoFILFromFIL(999), - builtin.MethodSend, - nil, - ) - - msg2 := NewUnsignedMessage( - addrGetter(), - addrGetter(), - 0, - NewAttoFILFromFIL(4004), - builtin.MethodSend, - nil, - ) - - c1, err := msg1.Cid() - assert.NoError(t, err) - c2, err := msg2.Cid() - assert.NoError(t, err) - - assert.NotEqual(t, c1.String(), c2.String()) -} - -func TestMessageString(t *testing.T) { - tf.UnitTest(t) - - addrGetter := vmaddr.NewForTestGetter() - - msg := NewUnsignedMessage( - addrGetter(), - addrGetter(), - 0, - NewAttoFILFromFIL(999), - builtin.MethodSend, - nil, - ) - - cid, err := msg.Cid() - require.NoError(t, err) - - got := msg.String() - assert.Contains(t, got, cid.String()) -} diff --git a/internal/pkg/types/not_found.go b/internal/pkg/types/not_found.go deleted file mode 100644 index de9476b87d..0000000000 --- a/internal/pkg/types/not_found.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -import "fmt" - -// ErrNotFound is not the error you are looking for. -var ErrNotFound = fmt.Errorf("Not found") diff --git a/internal/pkg/types/signed_message.go b/internal/pkg/types/signed_message.go deleted file mode 100644 index 1c6cf203bd..0000000000 --- a/internal/pkg/types/signed_message.go +++ /dev/null @@ -1,123 +0,0 @@ -package types - -import ( - "context" - "encoding/json" - "fmt" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -// SignedMessage contains a message and its signature -// TODO do not export these fields as it increases the chances of producing a -// `SignedMessage` with an empty signature. -type SignedMessage struct { - // control field for encoding struct as an array - _ struct{} `cbor:",toarray"` - - Message UnsignedMessage `json:"meteredMessage"` - Signature crypto.Signature `json:"signature"` - // Pay attention to Equals() if updating this struct. -} - -// NewSignedMessage accepts a message `msg` and a signer `s`. NewSignedMessage returns a `SignedMessage` containing -// a signature derived from the serialized `msg` and `msg.From` -// NOTE: this method can only sign message with From being a public-key type address, not an ID address. -// We should deprecate this and move to more explicit signing via an address resolver. -func NewSignedMessage(ctx context.Context, msg UnsignedMessage, s Signer) (*SignedMessage, error) { - msgCid, err := msg.Cid() - if err != nil { - return nil, err - } - - sig, err := s.SignBytes(ctx, msgCid.Bytes(), msg.From) - if err != nil { - return nil, err - } - - return &SignedMessage{ - Message: msg, - Signature: sig, - }, nil -} - -// Unmarshal a SignedMessage from the given bytes. -func (smsg *SignedMessage) Unmarshal(b []byte) error { - return encoding.Decode(b, smsg) -} - -// Marshal the SignedMessage into bytes. -func (smsg *SignedMessage) Marshal() ([]byte, error) { - return encoding.Encode(smsg) -} - -// Cid returns the canonical CID for the SignedMessage. -func (smsg *SignedMessage) Cid() (cid.Cid, error) { - obj, err := smsg.ToNode() - if err != nil { - return cid.Undef, errors.Wrap(err, "failed to marshal to cbor") - } - - return obj.Cid(), nil -} - -// ToNode converts the SignedMessage to an IPLD node. -func (smsg *SignedMessage) ToNode() (ipld.Node, error) { - data, err := encoding.Encode(smsg) - if err != nil { - return nil, err - } - c, err := constants.DefaultCidBuilder.Sum(data) - if err != nil { - return nil, err - } - - blk, err := blocks.NewBlockWithCid(data, c) - if err != nil { - return nil, err - } - obj, err := cbor.DecodeBlock(blk) - if err != nil { - return nil, err - } - - return obj, nil - -} - -// OnChainLen returns the amount of bytes used to represent the message on chain. -// TODO we can save this redundant encoding if we plumbed the size through from when the message was originally decoded from the network. -func (smsg *SignedMessage) OnChainLen() int { - bits, err := encoding.Encode(smsg) - if err != nil { - panic(err) - } - return len(bits) -} - -func (smsg *SignedMessage) String() string { - errStr := "(error encoding SignedMessage)" - cid, err := smsg.Cid() - if err != nil { - return errStr - } - js, err := json.MarshalIndent(smsg, "", " ") - if err != nil { - return errStr - } - return fmt.Sprintf("SignedMessage cid=[%v]: %s", cid, string(js)) -} - -// Equals tests whether two signed messages are equal. -func (smsg *SignedMessage) Equals(other *SignedMessage) bool { - return smsg.Message.Equals(&other.Message) && - smsg.Signature.Equals(&other.Signature) -} diff --git a/internal/pkg/types/signed_message_test.go b/internal/pkg/types/signed_message_test.go deleted file mode 100644 index 1d94e55c18..0000000000 --- a/internal/pkg/types/signed_message_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package types - -import ( - "context" - "reflect" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -var mockSigner = NewMockSigner(MustGenerateKeyInfo(1, 42)) - -func TestSignedMessageString(t *testing.T) { - tf.UnitTest(t) - - smsg := makeMessage(t, mockSigner, 42) - cid, err := smsg.Cid() - require.NoError(t, err) - - got := smsg.String() - assert.Contains(t, got, cid.String()) -} - -func TestSignedMessageMarshal(t *testing.T) { - tf.UnitTest(t) - - smsg := makeMessage(t, mockSigner, 42) - - marshalled, err := smsg.Marshal() - assert.NoError(t, err) - - smsgBack := SignedMessage{} - assert.False(t, smsg.Equals(&smsgBack)) - - err = smsgBack.Unmarshal(marshalled) - assert.NoError(t, err) - - assert.Equal(t, smsg.Message, smsgBack.Message) - assert.Equal(t, smsg.Signature, smsgBack.Signature) - assert.True(t, smsg.Equals(&smsgBack)) -} - -func TestSignedMessageCid(t *testing.T) { - tf.UnitTest(t) - - smsg1 := makeMessage(t, mockSigner, 41) - smsg2 := makeMessage(t, mockSigner, 42) - - c1, err := smsg1.Cid() - assert.NoError(t, err) - c2, err := smsg2.Cid() - assert.NoError(t, err) - - assert.NotEqual(t, c1.String(), c2.String()) - -} - -func TestSignedMessageCidToNode(t *testing.T) { - tf.UnitTest(t) - - smsg := makeMessage(t, mockSigner, 41) - - c, err := smsg.Cid() - require.NoError(t, err) - - n, err := smsg.ToNode() - require.NoError(t, err) - - assert.Equal(t, c, n.Cid()) - -} - -func makeMessage(t *testing.T, signer MockSigner, nonce uint64) *SignedMessage { - newAddr, err := address.NewSecp256k1Address([]byte("receiver")) - require.NoError(t, err) - - msg := NewMeteredMessage( - signer.Addresses[0], - newAddr, - nonce, - NewAttoFILFromFIL(2), - abi.MethodNum(2352), - []byte("params"), - NewGasPrice(1000), - gas.NewGas(100)) - smsg, err := NewSignedMessage(context.TODO(), *msg, &signer) - require.NoError(t, err) - - // This check requests that you add a non-zero value for new fields above, - // then update the field count below. - require.Equal(t, 3, reflect.TypeOf(*smsg).NumField()) - - return smsg -} diff --git a/internal/pkg/types/signer.go b/internal/pkg/types/signer.go deleted file mode 100644 index a18da4989d..0000000000 --- a/internal/pkg/types/signer.go +++ /dev/null @@ -1,14 +0,0 @@ -package types - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/crypto" -) - -// Signer signs data with a private key obtained internally from a provided address. -type Signer interface { - SignBytes(ctx context.Context, data []byte, addr address.Address) (crypto.Signature, error) - HasAddress(ctx context.Context, addr address.Address) (bool, error) -} diff --git a/internal/pkg/types/testing.go b/internal/pkg/types/testing.go deleted file mode 100644 index 90f3be758c..0000000000 --- a/internal/pkg/types/testing.go +++ /dev/null @@ -1,348 +0,0 @@ -package types - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// MockSigner implements the Signer interface -type MockSigner struct { - AddrKeyInfo map[address.Address]crypto.KeyInfo - Addresses []address.Address - PubKeys [][]byte -} - -// NewMockSigner returns a new mock signer, capable of signing data with -// keys (addresses derived from) in keyinfo -func NewMockSigner(kis []crypto.KeyInfo) MockSigner { - var ms MockSigner - ms.AddrKeyInfo = make(map[address.Address]crypto.KeyInfo) - for _, k := range kis { - // extract public key - pub := k.PublicKey() - - var newAddr address.Address - var err error - if k.SigType == crypto.SigTypeSecp256k1 { - newAddr, err = address.NewSecp256k1Address(pub) - } else if k.SigType == crypto.SigTypeBLS { - newAddr, err = address.NewBLSAddress(pub) - } - if err != nil { - panic(err) - } - ms.Addresses = append(ms.Addresses, newAddr) - ms.AddrKeyInfo[newAddr] = k - ms.PubKeys = append(ms.PubKeys, pub) - } - return ms -} - -// NewMockSignersAndKeyInfo is a convenience function to generate a mock -// signers with some keys. -func NewMockSignersAndKeyInfo(numSigners int) (MockSigner, []crypto.KeyInfo) { - ki := MustGenerateKeyInfo(numSigners, 42) - signer := NewMockSigner(ki) - return signer, ki -} - -// MustGenerateMixedKeyInfo produces m bls keys and n secp keys. -// BLS and Secp will be interleaved. The keys will be valid, but not deterministic. -func MustGenerateMixedKeyInfo(m int, n int) []crypto.KeyInfo { - info := []crypto.KeyInfo{} - for m > 0 && n > 0 { - if m > 0 { - ki, err := crypto.NewBLSKeyFromSeed(rand.Reader) - if err != nil { - panic(err) - } - info = append(info, ki) - m-- - } - - if n > 0 { - ki, err := crypto.NewSecpKeyFromSeed(rand.Reader) - if err != nil { - panic(err) - } - info = append(info, ki) - n-- - } - } - return info -} - -// MustGenerateBLSKeyInfo produces n distinct BLS keyinfos. -func MustGenerateBLSKeyInfo(n int, seed byte) []crypto.KeyInfo { - token := bytes.Repeat([]byte{seed}, 512) - var keyinfos []crypto.KeyInfo - for i := 0; i < n; i++ { - token[0] = byte(i) - ki, err := crypto.NewBLSKeyFromSeed(bytes.NewReader(token)) - if err != nil { - panic(err) - } - keyinfos = append(keyinfos, ki) - } - return keyinfos -} - -// MustGenerateKeyInfo generates `n` distinct keyinfos using seed `seed`. -// The result is deterministic (for stable tests), don't use this for real keys! -func MustGenerateKeyInfo(n int, seed byte) []crypto.KeyInfo { - token := bytes.Repeat([]byte{seed}, 512) - var keyinfos []crypto.KeyInfo - for i := 0; i < n; i++ { - token[0] = byte(i) - ki, err := crypto.NewSecpKeyFromSeed(bytes.NewReader(token)) - if err != nil { - panic(err) - } - keyinfos = append(keyinfos, ki) - } - return keyinfos -} - -// SignBytes cryptographically signs `data` using the Address `addr`. -func (ms MockSigner) SignBytes(_ context.Context, data []byte, addr address.Address) (crypto.Signature, error) { - ki, ok := ms.AddrKeyInfo[addr] - if !ok { - return crypto.Signature{}, errors.New("unknown address") - } - return crypto.Sign(data, ki.Key(), ki.SigType) -} - -// HasAddress returns whether the signer can sign with this address -func (ms MockSigner) HasAddress(_ context.Context, addr address.Address) (bool, error) { - return true, nil -} - -// GetAddressForPubKey looks up a KeyInfo address associated with a given PublicKeyForSecpSecretKey for a MockSigner -func (ms MockSigner) GetAddressForPubKey(pk []byte) (address.Address, error) { - var addr address.Address - - for _, ki := range ms.AddrKeyInfo { - testPk := ki.PublicKey() - - if bytes.Equal(testPk, pk) { - addr, err := ki.Address() - if err != nil { - return addr, errors.New("could not fetch address") - } - return addr, nil - } - } - return addr, errors.New("public key not found in wallet") -} - -// NewSignedMessageForTestGetter returns a closure that returns a SignedMessage unique to that invocation. -// The message is unique wrt the closure returned, not globally. You can use this function -// in tests instead of manually creating messages -- it both reduces duplication and gives us -// exactly one place to create valid messages for tests if messages require validation in the -// future. -// TODO support chosing from address -func NewSignedMessageForTestGetter(ms MockSigner) func() *SignedMessage { - i := 0 - return func() *SignedMessage { - s := fmt.Sprintf("smsg%d", i) - i++ - newAddr, err := address.NewSecp256k1Address([]byte(s + "-to")) - if err != nil { - panic(err) - } - msg := NewMeteredMessage( - ms.Addresses[0], // from needs to be an address from the signer - newAddr, - 0, - ZeroAttoFIL, - builtin.MethodSend, - []byte("params"), - ZeroAttoFIL, - gas.Zero, - ) - smsg, err := NewSignedMessage(context.TODO(), *msg, &ms) - if err != nil { - panic(err) - } - return smsg - } -} - -// Type-related test helpers. - -// CidFromString generates Cid from string input -func CidFromString(t *testing.T, input string) cid.Cid { - c, err := constants.DefaultCidBuilder.Sum([]byte(input)) - require.NoError(t, err) - return c -} - -// NewCidForTestGetter returns a closure that returns a Cid unique to that invocation. -// The Cid is unique wrt the closure returned, not globally. You can use this function -// in tests. -func NewCidForTestGetter() func() cid.Cid { - i := 31337 - return func() cid.Cid { - obj, err := cbor.WrapObject([]int{i}, constants.DefaultHashFunction, -1) - if err != nil { - panic(err) - } - i++ - return obj.Cid() - } -} - -// NewMessageForTestGetter returns a closure that returns a message unique to that invocation. -// The message is unique wrt the closure returned, not globally. You can use this function -// in tests instead of manually creating messages -- it both reduces duplication and gives us -// exactly one place to create valid messages for tests if messages require validation in the -// future. -func NewMessageForTestGetter() func() *UnsignedMessage { - i := 0 - return func() *UnsignedMessage { - s := fmt.Sprintf("msg%d", i) - i++ - from, err := address.NewSecp256k1Address([]byte(s + "-from")) - if err != nil { - panic(err) - } - to, err := address.NewSecp256k1Address([]byte(s + "-to")) - if err != nil { - panic(err) - } - return NewUnsignedMessage( - from, - to, - 0, - ZeroAttoFIL, - abi.MethodNum(10000+i), - nil) - } -} - -// NewMsgs returns n messages. The messages returned are unique to this invocation -// but are not unique globally (ie, a second call to NewMsgs will return the same -// set of messages). -func NewMsgs(n int) []*UnsignedMessage { - newMsg := NewMessageForTestGetter() - msgs := make([]*UnsignedMessage, n) - for i := 0; i < n; i++ { - msgs[i] = newMsg() - msgs[i].CallSeqNum = uint64(i) - } - return msgs -} - -// NewSignedMsgs returns n signed messages. The messages returned are unique to this invocation -// but are not unique globally (ie, a second call to NewSignedMsgs will return the same -// set of messages). -func NewSignedMsgs(n uint, ms MockSigner) []*SignedMessage { - var err error - newMsg := NewMessageForTestGetter() - smsgs := make([]*SignedMessage, n) - for i := uint(0); i < n; i++ { - msg := newMsg() - msg.From = ms.Addresses[0] - msg.CallSeqNum = uint64(i) - msg.GasPrice = ZeroAttoFIL // NewGasPrice(1) - msg.GasLimit = gas.NewGas(0) - smsgs[i], err = NewSignedMessage(context.TODO(), *msg, ms) - if err != nil { - panic(err) - } - } - return smsgs -} - -// SignMsgs returns a slice of signed messages where the original messages -// are `msgs`, if signing one of the `msgs` fails an error is returned -func SignMsgs(ms MockSigner, msgs []*UnsignedMessage) ([]*SignedMessage, error) { - var smsgs []*SignedMessage - for _, m := range msgs { - s, err := NewSignedMessage(context.TODO(), *m, &ms) - if err != nil { - return nil, err - } - smsgs = append(smsgs, s) - } - return smsgs, nil -} - -// MsgCidsEqual returns true if the message cids are equal. It panics if -// it can't get their cid. -func MsgCidsEqual(m1, m2 *UnsignedMessage) bool { - m1Cid, err := m1.Cid() - if err != nil { - panic(err) - } - m2Cid, err := m2.Cid() - if err != nil { - panic(err) - } - return m1Cid.Equals(m2Cid) -} - -// SmsgCidsEqual returns true if the SignedMessage cids are equal. It panics if -// it can't get their cid. -func SmsgCidsEqual(m1, m2 *SignedMessage) bool { - m1Cid, err := m1.Cid() - if err != nil { - panic(err) - } - m2Cid, err := m2.Cid() - if err != nil { - panic(err) - } - return m1Cid.Equals(m2Cid) -} - -// NewMsgsWithAddrs returns a slice of `n` messages who's `From` field's are pulled -// from `a`. This method should be used when the addresses returned are to be signed -// at a later point. -func NewMsgsWithAddrs(n int, a []address.Address) []*UnsignedMessage { - if n > len(a) { - panic("cannot create more messages than there are addresess for") - } - newMsg := NewMessageForTestGetter() - msgs := make([]*UnsignedMessage, n) - for i := 0; i < n; i++ { - msgs[i] = newMsg() - msgs[i].From = a[i] - } - return msgs -} - -// HasCid allows two values with CIDs to be compared. -type HasCid interface { - Cid() cid.Cid -} - -// AssertHaveSameCid asserts that two values have identical CIDs. -func AssertHaveSameCid(t *testing.T, m HasCid, n HasCid) { - if !m.Cid().Equals(n.Cid()) { - assert.Fail(t, "CIDs don't match", "not equal %v %v", m.Cid(), n.Cid()) - } -} - -// AssertCidsEqual asserts that two CIDS are identical. -func AssertCidsEqual(t *testing.T, m cid.Cid, n cid.Cid) { - if !m.Equals(n) { - assert.Fail(t, "CIDs don't match", "not equal %v %v", m, n) - } -} diff --git a/internal/pkg/types/testing_test.go b/internal/pkg/types/testing_test.go deleted file mode 100644 index a487b5d2a3..0000000000 --- a/internal/pkg/types/testing_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestCidForTestGetter(t *testing.T) { - tf.UnitTest(t) - - newCid := NewCidForTestGetter() - c1 := newCid() - c2 := newCid() - assert.False(t, c1.Equals(c2)) - assert.False(t, c1.Equals(CidFromString(t, "somecid"))) // Just in case. -} - -func TestNewMessageForTestGetter(t *testing.T) { - tf.UnitTest(t) - - newMsg := NewMessageForTestGetter() - m1 := newMsg() - c1, _ := m1.Cid() - m2 := newMsg() - c2, _ := m2.Cid() - assert.False(t, c1.Equals(c2)) -} diff --git a/internal/pkg/version/protocol_version_table.go b/internal/pkg/version/protocol_version_table.go deleted file mode 100644 index da2d771dd5..0000000000 --- a/internal/pkg/version/protocol_version_table.go +++ /dev/null @@ -1,119 +0,0 @@ -package version - -import ( - "sort" - "strings" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/pkg/errors" -) - -// protocolVersion specifies that a particular protocol version goes into effect at a particular block height -type protocolVersion struct { - Version uint64 - EffectiveAt abi.ChainEpoch -} - -// ProtocolVersionTable is a data structure capable of specifying which protocol versions are active at which block heights. -// It must be constructed with the ProtocolVersionTableBuilder which enforces that the table has at least one -// entry at block height zero and that all the versions are sorted. -type ProtocolVersionTable struct { - versions []protocolVersion -} - -// VersionAt returns the protocol versions at the given block height for this PVT's network. -func (pvt *ProtocolVersionTable) VersionAt(height abi.ChainEpoch) (uint64, error) { - // find index of first version that is not yet active (or len(versions) if they are all active. - idx := sort.Search(len(pvt.versions), func(i int) bool { - return height < pvt.versions[i].EffectiveAt - }) - - // providing a height less than the first version is an error - if idx == 0 { - if len(pvt.versions) == 0 { - return 0, errors.Errorf("no protocol versions") - } - return 0, errors.Errorf("chain height %d is less than effective start of first version %d", - height, pvt.versions[0].EffectiveAt) - } - - // return the version just prior to the index to get the last version in effect. - return pvt.versions[idx-1].Version, nil -} - -// ProtocolVersionTableBuilder constructs a protocol version table -type ProtocolVersionTableBuilder struct { - network string - versions protocolVersionsByEffectiveAt -} - -// NewProtocolVersionTableBuilder creates a new ProtocolVersionTable that only tracks versions for the given network -func NewProtocolVersionTableBuilder(network string) *ProtocolVersionTableBuilder { - // ignore anything following a dash (including the dash) - networkPrefix := strings.Split(network, "-")[0] - - return &ProtocolVersionTableBuilder{ - network: networkPrefix, - versions: []protocolVersion{}, - } -} - -// Add configures an version for a network. If the network doesn't match the current network, this version will be ignored. -func (pvtb *ProtocolVersionTableBuilder) Add(network string, version uint64, effectiveAt abi.ChainEpoch) *ProtocolVersionTableBuilder { - // ignore version if not part of our network - if network != pvtb.network { - return pvtb - } - - protocolVersion := protocolVersion{ - Version: version, - EffectiveAt: effectiveAt, - } - - pvtb.versions = append(pvtb.versions, protocolVersion) - - return pvtb -} - -// Build constructs a protocol version table populated with properly sorted versions. -// It is an error to build whose first version is not at block height 0. -func (pvtb *ProtocolVersionTableBuilder) Build() (*ProtocolVersionTable, error) { - // sort versions in place - sort.Sort(pvtb.versions) - - // copy to insure an Add doesn't alter the table - versions := make([]protocolVersion, len(pvtb.versions)) - copy(versions, pvtb.versions) - - // enforce that the current network has an entry at block height zero - if len(versions) == 0 { - return nil, errors.Errorf("no protocol versions specified for network %s", pvtb.network) - } - if versions[0].EffectiveAt != abi.ChainEpoch(0) { - return nil, errors.Errorf("no protocol version at genesis for network %s", pvtb.network) - } - - // enforce that version numbers increase monotonically with effective at - lastVersion := versions[0].Version - for _, version := range versions[1:] { - if version.Version <= lastVersion { - return nil, errors.Errorf("protocol version %d effective at %d is not greater than previous version, %d", - version.Version, version.EffectiveAt, lastVersion) - } - lastVersion = version.Version - } - - return &ProtocolVersionTable{versions: versions}, nil -} - -// sort methods for protocolVersion slice -type protocolVersionsByEffectiveAt []protocolVersion - -func (a protocolVersionsByEffectiveAt) Len() int { return len(a) } -func (a protocolVersionsByEffectiveAt) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a protocolVersionsByEffectiveAt) Less(i, j int) bool { - if a[i].EffectiveAt == a[j].EffectiveAt { - return a[i].Version < a[j].Version - } - return a[i].EffectiveAt < a[j].EffectiveAt -} diff --git a/internal/pkg/version/protocol_version_table_test.go b/internal/pkg/version/protocol_version_table_test.go deleted file mode 100644 index 62598ab09d..0000000000 --- a/internal/pkg/version/protocol_version_table_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package version - -import ( - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - - "testing" -) - -const network = "testnetwork" - -func TestUpgradeTable(t *testing.T) { - tf.UnitTest(t) - - t.Run("add single upgrade", func(t *testing.T) { - version := uint64(3) - put, err := NewProtocolVersionTableBuilder(network). - Add(network, version, abi.ChainEpoch(0)). - Build() - require.NoError(t, err) - - versionAtHeight, err := put.VersionAt(abi.ChainEpoch(0)) - require.NoError(t, err) - - assert.Equal(t, version, versionAtHeight) - - versionAtHeight, err = put.VersionAt(abi.ChainEpoch(1000)) - require.NoError(t, err) - - assert.Equal(t, version, versionAtHeight) - }) - - t.Run("finds correct version", func(t *testing.T) { - // add out of order and expect table to sort - put, err := NewProtocolVersionTableBuilder(network). - Add(network, 2, abi.ChainEpoch(20)). - Add(network, 4, abi.ChainEpoch(40)). - Add(network, 3, abi.ChainEpoch(30)). - Add(network, 1, abi.ChainEpoch(10)). - Add(network, 0, abi.ChainEpoch(0)). - Build() - require.NoError(t, err) - - for i := uint64(0); i < 50; i++ { - version, err := put.VersionAt(abi.ChainEpoch(i)) - require.NoError(t, err) - - assert.Equal(t, i/10, version) - } - }) - - t.Run("constructing a table with no versions is an error", func(t *testing.T) { - _, err := NewProtocolVersionTableBuilder(network).Build() - require.Error(t, err) - assert.Contains(t, err.Error(), "no protocol versions specified for network testnetwork") - }) - - t.Run("constructing a table with no version at genesis is an error", func(t *testing.T) { - _, err := NewProtocolVersionTableBuilder(network). - Add(network, 2, abi.ChainEpoch(20)). - Build() - require.Error(t, err) - assert.Contains(t, err.Error(), "no protocol version at genesis for network testnetwork") - }) - - t.Run("ignores versions from wrong network", func(t *testing.T) { - otherNetwork := "othernetwork" - - put, err := NewProtocolVersionTableBuilder(network). - Add(network, 0, abi.ChainEpoch(0)). - Add(otherNetwork, 1, abi.ChainEpoch(10)). - Add(otherNetwork, 2, abi.ChainEpoch(20)). - Add(network, 3, abi.ChainEpoch(30)). - Add(otherNetwork, 4, abi.ChainEpoch(40)). - Build() - require.NoError(t, err) - - for i := uint64(0); i < 50; i++ { - version, err := put.VersionAt(abi.ChainEpoch(i)) - require.NoError(t, err) - - expectedVersion := uint64(0) - if i >= 30 { - expectedVersion = 3 - } - assert.Equal(t, expectedVersion, version) - } - }) - - t.Run("version table name can be a prefix of network name", func(t *testing.T) { - network := "localnet-270a8688-1b23-4508-b675-444cb1e6f05d" - versionName := "localnet" - - put, err := NewProtocolVersionTableBuilder(network). - Add(versionName, 0, abi.ChainEpoch(0)). - Add(versionName, 1, abi.ChainEpoch(10)). - Build() - require.NoError(t, err) - - for i := uint64(0); i < 20; i++ { - version, err := put.VersionAt(abi.ChainEpoch(i)) - require.NoError(t, err) - - expectedVersion := uint64(0) - if i >= 10 { - expectedVersion = 1 - } - assert.Equal(t, expectedVersion, version) - } - }) - - t.Run("does not permit the same version number twice", func(t *testing.T) { - _, err := NewProtocolVersionTableBuilder(network). - Add(network, 0, abi.ChainEpoch(0)). - Add(network, 1, abi.ChainEpoch(10)). - Add(network, 2, abi.ChainEpoch(20)). - Add(network, 2, abi.ChainEpoch(30)). // wrong - Add(network, 4, abi.ChainEpoch(40)). - Build() - require.Error(t, err) - assert.Contains(t, err.Error(), "protocol version 2 effective at 30 is not greater than previous version, 2") - }) - - t.Run("does not permit version numbers to decline", func(t *testing.T) { - _, err := NewProtocolVersionTableBuilder(network). - Add(network, 4, abi.ChainEpoch(0)). - Add(network, 3, abi.ChainEpoch(10)). - Add(network, 2, abi.ChainEpoch(20)). - Add(network, 1, abi.ChainEpoch(30)). - Add(network, 0, abi.ChainEpoch(40)). - Build() - require.Error(t, err) - assert.Contains(t, err.Error(), "protocol version 3 effective at 10 is not greater than previous version, 4") - }) -} diff --git a/internal/pkg/version/protocol_versions.go b/internal/pkg/version/protocol_versions.go deleted file mode 100644 index af63897a7e..0000000000 --- a/internal/pkg/version/protocol_versions.go +++ /dev/null @@ -1,24 +0,0 @@ -package version - -import ( - "github.com/filecoin-project/specs-actors/actors/abi" -) - -// TEST is the network name for internal tests -const TEST = "gfctest" - -// Protocol0 is the first protocol version -const Protocol0 = 0 - -// ConfigureProtocolVersions configures all protocol upgrades for all known networks. -// TODO: support arbitrary network names at "latest" protocol version so that only coordinated -// network upgrades need to be represented here. See #3491. -func ConfigureProtocolVersions(network string) (*ProtocolVersionTable, error) { - return NewProtocolVersionTableBuilder(network). - Add("alpha2", Protocol0, abi.ChainEpoch(0)). - Add("interop", Protocol0, abi.ChainEpoch(0)). - Add("localnet", Protocol0, abi.ChainEpoch(0)). - Add("testnet", Protocol0, abi.ChainEpoch(0)). - Add(TEST, Protocol0, abi.ChainEpoch(0)). - Build() -} diff --git a/internal/pkg/vm/actor/actor.go b/internal/pkg/vm/actor/actor.go deleted file mode 100644 index b9fcfe5a64..0000000000 --- a/internal/pkg/vm/actor/actor.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package actor implements tooling to write and manipulate actors in go. -package actor - -import ( - "fmt" - "io" - "io/ioutil" - - fxamackercbor "github.com/fxamacker/cbor/v2" - "github.com/ipfs/go-cid" - - "github.com/pkg/errors" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" -) - -// DefaultGasCost is default gas cost for the actor calls. -const DefaultGasCost = 100 - -// Actor is the central abstraction of entities in the system. -// -// Both individual accounts, as well as contracts (user & system level) are -// represented as actors. An actor has the following core functionality implemented on a system level: -// - track a Filecoin balance, using the `Balance` field -// - execute code stored in the `Code` field -// - read & write memory -// - replay protection, using the `Nonce` field -// -// Value sent to a non-existent address will be tracked as an empty actor that has a Balance but -// nil Code and Memory. You must nil check Code cids before comparing them. -// -// More specific capabilities for individual accounts or contract specific must be implemented -// inside the code. -// -// Not safe for concurrent access. -type Actor struct { - _ struct{} `cbor:",toarray"` - // Code is a CID of the VM code for this actor's implementation (or a constant for actors implemented in Go code). - // Code may be nil for an uninitialized actor (which exists because it has received a balance). - Code e.Cid - // Head is the CID of the root of the actor's state tree. - Head e.Cid - // CallSeqNum is the number expected on the next message from this actor. - // Messages are processed in strict, contiguous order. - CallSeqNum uint64 - // Balance is the amount of attoFIL in the actor's account. - Balance abi.TokenAmount -} - -// NewActor constructs a new actor. -func NewActor(code cid.Cid, balance abi.TokenAmount, head cid.Cid) *Actor { - return &Actor{ - Code: e.NewCid(code), - CallSeqNum: 0, - Balance: balance, - Head: e.NewCid(head), - } -} - -// Empty tests whether the actor's code is defined. -func (a *Actor) Empty() bool { - return !a.Code.Defined() -} - -// IncrementSeqNum increments the seq number. -func (a *Actor) IncrementSeqNum() { - a.CallSeqNum = a.CallSeqNum + 1 -} - -// UnmarshalCBOR must implement cbg.Unmarshaller to insert this into a hamt. -func (a *Actor) UnmarshalCBOR(r io.Reader) error { - bs, err := ioutil.ReadAll(r) - if err != nil { - return err - } - return fxamackercbor.Unmarshal(bs, a) -} - -// MarshalCBOR must implement cbg.Marshaller to insert this into a hamt. -func (a *Actor) MarshalCBOR(w io.Writer) error { - bs, err := fxamackercbor.Marshal(a) - if err != nil { - return err - } - _, err = w.Write(bs) - return err -} - -// Format implements fmt.Formatter. -func (a *Actor) Format(f fmt.State, c rune) { - f.Write([]byte(fmt.Sprintf("<%s (%p); balance: %v; nonce: %d>", builtin.ActorNameByCode(a.Code.Cid), a, a.Balance, a.CallSeqNum))) // nolint: errcheck -} - -// NextNonce returns the nonce value for an account actor, which is the nonce expected on the -// next message to be sent from that actor. -// Returns zero for a nil actor, which is the value expected on the first message. -func NextNonce(actor *Actor) (uint64, error) { - if actor == nil { - return 0, nil - } - if !(actor.Empty() || actor.Code.Equals(builtin.AccountActorCodeID)) { - return 0, errors.New("next nonce only defined for account or empty actors") - } - return actor.CallSeqNum, nil -} diff --git a/internal/pkg/vm/actor/actor_test.go b/internal/pkg/vm/actor/actor_test.go deleted file mode 100644 index e9ca1e0e6f..0000000000 --- a/internal/pkg/vm/actor/actor_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package actor_test - -import ( - "fmt" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - - . "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestActorFormat(t *testing.T) { - tf.UnitTest(t) - - accountActor := NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(5), cid.Undef) - - formatted := fmt.Sprintf("%v", accountActor) - assert.Contains(t, formatted, "account") - assert.Contains(t, formatted, "balance: 5") - assert.Contains(t, formatted, "nonce: 0") - - minerActor := NewActor(builtin.StorageMinerActorCodeID, abi.NewTokenAmount(5), cid.Undef) - formatted = fmt.Sprintf("%v", minerActor) - assert.Contains(t, formatted, "miner") - - storageMarketActor := NewActor(builtin.StorageMarketActorCodeID, abi.NewTokenAmount(5), cid.Undef) - formatted = fmt.Sprintf("%v", storageMarketActor) - assert.Contains(t, formatted, "market") -} diff --git a/internal/pkg/vm/actor/builtin/default.go b/internal/pkg/vm/actor/builtin/default.go deleted file mode 100644 index e19696d334..0000000000 --- a/internal/pkg/vm/actor/builtin/default.go +++ /dev/null @@ -1,35 +0,0 @@ -package builtin - -import ( - specs "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - "github.com/filecoin-project/specs-actors/actors/builtin/system" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/dispatch" -) - -// DefaultActors is list of all actors that ship with Filecoin. -// They are indexed by their CID. -// Dragons: add the rest of the actors -var DefaultActors = dispatch.NewBuilder(). - Add(specs.InitActorCodeID, &init_.Actor{}). - Add(specs.AccountActorCodeID, &account.Actor{}). - Add(specs.MultisigActorCodeID, &multisig.Actor{}). - Add(specs.PaymentChannelActorCodeID, &paych.Actor{}). - Add(specs.StoragePowerActorCodeID, &power.Actor{}). - Add(specs.StorageMarketActorCodeID, &market.Actor{}). - Add(specs.StorageMinerActorCodeID, &miner.Actor{}). - Add(specs.SystemActorCodeID, &system.Actor{}). - Add(specs.RewardActorCodeID, &reward.Actor{}). - Add(specs.CronActorCodeID, &cron.Actor{}). - Add(specs.VerifiedRegistryActorCodeID, &verifreg.Actor{}). - Build() diff --git a/internal/pkg/vm/address/testing.go b/internal/pkg/vm/address/testing.go deleted file mode 100644 index 4e97e7cd5c..0000000000 --- a/internal/pkg/vm/address/testing.go +++ /dev/null @@ -1,31 +0,0 @@ -package address - -import ( - "fmt" - "testing" - - "github.com/filecoin-project/go-address" -) - -func RequireIDAddress(t *testing.T, i int) address.Address { - a, err := address.NewIDAddress(uint64(i)) - if err != nil { - t.Fatalf("failed to make address: %v", err) - } - return a -} - -// NewForTestGetter returns a closure that returns an address unique to that invocation. -// The address is unique wrt the closure returned, not globally. -func NewForTestGetter() func() address.Address { - i := 0 - return func() address.Address { - s := fmt.Sprintf("address%d", i) - i++ - newAddr, err := address.NewSecp256k1Address([]byte(s)) - if err != nil { - panic(err) - } - return newAddr - } -} diff --git a/internal/pkg/vm/gas/gas.go b/internal/pkg/vm/gas/gas.go deleted file mode 100644 index 6436f8994e..0000000000 --- a/internal/pkg/vm/gas/gas.go +++ /dev/null @@ -1,32 +0,0 @@ -package gas - -import ( - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" -) - -// Unit is the unit of gas. -// This type is signed by design; it is possible for operations to consume negative gas. -type Unit int64 - -// Zero is the zero value for Gas. -var Zero = NewGas(0) - -// SystemGasLimit is the maximum gas for implicit system messages. -var SystemGasLimit = NewGas(1000000000000000000) // 10^18 - -// NewGas creates a gas value object. -func NewGas(value int64) Unit { - return Unit(value) -} - -// AsBigInt returns the internal value as a `big.Int` -func (gas Unit) AsBigInt() big.Int { - return big.NewInt(int64(gas)) -} - -// ToTokens returns the cost of the gas given the price. -func (gas Unit) ToTokens(price abi.TokenAmount) abi.TokenAmount { - // cost = gas * price - return big.Mul(gas.AsBigInt(), price) -} diff --git a/internal/pkg/vm/internal/dispatch/dispatch.go b/internal/pkg/vm/internal/dispatch/dispatch.go deleted file mode 100644 index e38b79d539..0000000000 --- a/internal/pkg/vm/internal/dispatch/dispatch.go +++ /dev/null @@ -1,114 +0,0 @@ -package dispatch - -import ( - "fmt" - "reflect" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" -) - -// Actor is the interface all actors have to implement. -type Actor interface { - // Exports has a list of method available on the actor. - Exports() []interface{} -} - -// Dispatcher allows for dynamic method dispatching on an actor. -type Dispatcher interface { - // Dispatch will call the given method on the actor and pass the arguments. - // - // - The `ctx` argument will be coerced to the type the method expects in its first argument. - // - If arg1 is `[]byte`, it will attempt to decode the value based on second argument in the target method. - Dispatch(method abi.MethodNum, ctx interface{}, arg1 interface{}) (interface{}, error) - // Signature is a helper function that returns the signature for a given method. - // - // Note: This is intended to be used by tests and tools. - Signature(method abi.MethodNum) (MethodSignature, error) -} - -type actorDispatcher struct { - code cid.Cid - actor Actor -} - -type method interface { - Call(in []reflect.Value) []reflect.Value - Type() reflect.Type -} - -var _ Dispatcher = (*actorDispatcher)(nil) - -// Dispatch implements `Dispatcher`. -func (d *actorDispatcher) Dispatch(methodNum abi.MethodNum, ctx interface{}, arg1 interface{}) (interface{}, error) { - // get method signature - m, err := d.signature(methodNum) - if err != nil { - return nil, err - } - - // build args to pass to the method - args := []reflect.Value{ - // the ctx will be automatically coerced - reflect.ValueOf(ctx), - } - - // Dragons: simplify this to arginterface - if arg1 == nil { - args = append(args, m.ArgNil()) - } else if raw, ok := arg1.([]byte); ok { - obj, err := m.ArgInterface(raw) - if err != nil { - return nil, err - } - args = append(args, reflect.ValueOf(obj)) - } else if raw, ok := arg1.(runtime.CBORBytes); ok { - obj, err := m.ArgInterface(raw) - if err != nil { - return nil, err - } - args = append(args, reflect.ValueOf(obj)) - } else { - args = append(args, reflect.ValueOf(arg1)) - } - - // invoke the method - out := m.method.Call(args) - - // Note: we only support single objects being returned - if len(out) > 1 { - return nil, fmt.Errorf("actor method returned more than one object. method: %d, code: %s", methodNum, d.code) - } - - // method returns unit - // Note: we need to check for `IsNill()` here because Go doesnt work if you do `== nil` on the interface - if len(out) == 0 || (out[0].Kind() != reflect.Struct && out[0].IsNil()) { - return nil, nil - } - - // forward return - return out[0].Interface(), nil -} - -func (d *actorDispatcher) signature(methodID abi.MethodNum) (*methodSignature, error) { - exports := d.actor.Exports() - - // get method entry - methodIdx := (uint64)(methodID) - if len(exports) < (int)(methodIdx) { - return nil, fmt.Errorf("Method undefined. method: %d, code: %s", methodID, d.code) - } - entry := exports[methodIdx] - if entry == nil { - return nil, fmt.Errorf("Method undefined. method: %d, code: %s", methodID, d.code) - } - - ventry := reflect.ValueOf(entry) - return &methodSignature{method: ventry}, nil -} - -// Signature implements `Dispatcher`. -func (d *actorDispatcher) Signature(methodNum abi.MethodNum) (MethodSignature, error) { - return d.signature(methodNum) -} diff --git a/internal/pkg/vm/internal/dispatch/loader.go b/internal/pkg/vm/internal/dispatch/loader.go deleted file mode 100644 index 860734eeec..0000000000 --- a/internal/pkg/vm/internal/dispatch/loader.go +++ /dev/null @@ -1,42 +0,0 @@ -package dispatch - -import ( - "fmt" - - "github.com/ipfs/go-cid" -) - -// CodeLoader allows you to load an actor's code based on its id an epoch. -type CodeLoader struct { - actors map[cid.Cid]Actor -} - -// GetActorImpl returns executable code for an actor by code cid at a specific protocol version -func (cl CodeLoader) GetActorImpl(code cid.Cid) (Dispatcher, error) { - actor, ok := cl.actors[code] - if !ok { - return nil, fmt.Errorf("Actor code not found. code: %s", code) - } - return &actorDispatcher{code: code, actor: actor}, nil -} - -// CodeLoaderBuilder helps you build a CodeLoader. -type CodeLoaderBuilder struct { - actors map[cid.Cid]Actor -} - -// NewBuilder creates a builder to generate a builtin.Actor data structure -func NewBuilder() *CodeLoaderBuilder { - return &CodeLoaderBuilder{actors: map[cid.Cid]Actor{}} -} - -// Add lets you add an actor dispatch table for a given version. -func (b *CodeLoaderBuilder) Add(code cid.Cid, actor Actor) *CodeLoaderBuilder { - b.actors[code] = actor - return b -} - -// Build builds the code loader. -func (b *CodeLoaderBuilder) Build() CodeLoader { - return CodeLoader{actors: b.actors} -} diff --git a/internal/pkg/vm/internal/dispatch/signature.go b/internal/pkg/vm/internal/dispatch/signature.go deleted file mode 100644 index 5bb59912f4..0000000000 --- a/internal/pkg/vm/internal/dispatch/signature.go +++ /dev/null @@ -1,70 +0,0 @@ -package dispatch - -import ( - "bytes" - "reflect" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/pkg/errors" -) - -// MethodSignature wraps a specific method and allows you to encode/decodes input/output bytes into concrete types. -type MethodSignature interface { - // ArgNil returns a nil interface for the typed argument expected by the actor method. - ArgNil() reflect.Value - // ArgInterface returns the typed argument expected by the actor method. - ArgInterface(argBytes []byte) (interface{}, error) - // ReturnInterface returns the methods typed return. - ReturnInterface(returnBytes []byte) (interface{}, error) -} - -type methodSignature struct { - method method -} - -var _ MethodSignature = (*methodSignature)(nil) - -func (ms *methodSignature) ArgNil() reflect.Value { - t := ms.method.Type().In(1) - v := reflect.New(t) - return v.Elem() -} - -func (ms *methodSignature) ArgInterface(argBytes []byte) (interface{}, error) { - // decode arg1 (this is the payload for the actor method) - t := ms.method.Type().In(1) - v := reflect.New(t) - - // This would be better fixed in then encoding library. - obj := v.Elem().Interface() - if _, ok := obj.(runtime.CBORUnmarshaler); ok { - buf := bytes.NewBuffer(argBytes) - auxv := reflect.New(t.Elem()) - obj = auxv.Interface() - - unmarsh := obj.(runtime.CBORUnmarshaler) - if err := unmarsh.UnmarshalCBOR(buf); err != nil { - return nil, err - } - return unmarsh, nil - } - - if err := encoding.Decode(argBytes, v.Interface()); err != nil { - return nil, errors.Wrap(err, "failed to decode bytes as method argument") - } - - // dereference the extra pointer created by `reflect.New()` - return v.Elem().Interface(), nil -} - -func (ms *methodSignature) ReturnInterface(returnBytes []byte) (interface{}, error) { - // decode arg1 (this is the payload for the actor method) - t := ms.method.Type().Out(0) - v := reflect.New(t) - if err := encoding.Decode(returnBytes, v.Interface()); err != nil { - return nil, errors.Wrap(err, "failed to decode return bytes for method") - } - - return v.Interface(), nil -} diff --git a/internal/pkg/vm/internal/dispatch/signature_test.go b/internal/pkg/vm/internal/dispatch/signature_test.go deleted file mode 100644 index bd1c05105e..0000000000 --- a/internal/pkg/vm/internal/dispatch/signature_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package dispatch - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -type fakeActor struct{} - -type SimpleParams struct { - Name string -} - -type SimpleReturn struct { - someValue uint64 -} - -func (*fakeActor) simpleMethod(ctx interface{}, params SimpleParams) SimpleReturn { - return SimpleReturn{someValue: 3} -} - -func (*fakeActor) pointerParam(ctx interface{}, params *SimpleParams) SimpleReturn { - return SimpleReturn{someValue: 3} -} - -func (*fakeActor) pointerReturn(ctx interface{}, params SimpleParams) *SimpleReturn { - return &SimpleReturn{someValue: 3} -} - -func (*fakeActor) noParams(ctx interface{}) SimpleReturn { - return SimpleReturn{someValue: 3} -} - -func (*fakeActor) noReturn(ctx interface{}, params *SimpleParams) { - /* empty */ -} - -func (*fakeActor) minimalist(ctx interface{}) { - /* empty */ -} - -func TestArgInterface(t *testing.T) { - tf.UnitTest(t) - - fa := fakeActor{} - - params := SimpleParams{Name: "tester"} - setup := func(method interface{}) (methodSignature, []byte) { - s := methodSignature{method: reflect.ValueOf(method)} - - encodedParams, err := encoding.Encode(params) - assert.NoError(t, err) - - return s, encodedParams - } - - assertArgInterface := func(s methodSignature, encodedParams []byte) interface{} { - ret, err := s.ArgInterface(encodedParams) - assert.NoError(t, err) - assert.NotNil(t, ret) - return ret - } - - t.Run("simpleMethod", func(t *testing.T) { - s, encodedParams := setup(fa.simpleMethod) - - ret := assertArgInterface(s, encodedParams) - - v, ok := ret.(SimpleParams) - assert.True(t, ok) - assert.Equal(t, params.Name, v.Name) - }) - - t.Run("pointerParam", func(t *testing.T) { - s, encodedParams := setup(fa.pointerParam) - - ret := assertArgInterface(s, encodedParams) - - v, ok := ret.(*SimpleParams) - assert.True(t, ok) - assert.Equal(t, params.Name, v.Name) - }) - - t.Run("noParams", func(t *testing.T) { - // Dragons: not supported, must panic - }) -} diff --git a/internal/pkg/vm/internal/errors/vminternal.go b/internal/pkg/vm/internal/errors/vminternal.go deleted file mode 100644 index 3ca7522e51..0000000000 --- a/internal/pkg/vm/internal/errors/vminternal.go +++ /dev/null @@ -1,10 +0,0 @@ -// Package internal has all the things vm and only vm need. -// -// This contents can be slowly placed on the vm internal. -package internal - -const ( - // ErrInsufficientGas indicates that an actor did not have sufficient gas to run a message - // Dragons: remove when new actors come in - ErrInsufficientGas = 36 -) diff --git a/internal/pkg/vm/internal/gascost/gascost.go b/internal/pkg/vm/internal/gascost/gascost.go deleted file mode 100644 index fc3245b5ec..0000000000 --- a/internal/pkg/vm/internal/gascost/gascost.go +++ /dev/null @@ -1,89 +0,0 @@ -package gascost - -import ( - "fmt" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/message" - "github.com/filecoin-project/specs-actors/actors/abi" -) - -// Pricelist provides prices for operations in the VM. -// -// Note: this interface should be APPEND ONLY since last chain checkpoint -type Pricelist interface { - // OnChainMessage returns the gas used for storing a message of a given size in the chain. - OnChainMessage(msgSize int) gas.Unit - // OnChainReturnValue returns the gas used for storing the response of a message in the chain. - OnChainReturnValue(receipt *message.Receipt) gas.Unit - - // OnMethodInvocation returns the gas used when invoking a method. - OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) gas.Unit - - // OnIpldGet returns the gas used for storing an object - OnIpldGet(dataSize int) gas.Unit - // OnIpldPut returns the gas used for storing an object - OnIpldPut(dataSize int) gas.Unit - - // OnCreateActor returns the gas used for creating an actor - OnCreateActor() gas.Unit - // OnDeleteActor returns the gas used for deleting an actor - OnDeleteActor() gas.Unit - - OnVerifySignature(sigType crypto.SigType, planTextSize int) (gas.Unit, error) - OnHashing(dataSize int) gas.Unit - OnComputeUnsealedSectorCid(proofType abi.RegisteredProof, pieces *[]abi.PieceInfo) gas.Unit - OnVerifySeal(info abi.SealVerifyInfo) gas.Unit - OnVerifyPoSt(info abi.WindowPoStVerifyInfo) gas.Unit - OnVerifyConsensusFault() gas.Unit -} - -var prices = map[abi.ChainEpoch]Pricelist{ - abi.ChainEpoch(0): &pricelistV0{ - // These message base/byte values must match those in message validation. - onChainMessageBase: gas.Zero, - onChainMessagePerByte: gas.NewGas(2), - onChainReturnValuePerByte: gas.NewGas(8), - sendBase: gas.NewGas(5), - sendTransferFunds: gas.NewGas(5), - sendInvokeMethod: gas.NewGas(10), - ipldGetBase: gas.NewGas(10), - ipldGetPerByte: gas.NewGas(1), - ipldPutBase: gas.NewGas(20), - ipldPutPerByte: gas.NewGas(2), - createActorBase: gas.NewGas(40), // IPLD put + 20 - createActorExtra: gas.NewGas(500), - deleteActor: gas.NewGas(-500), // -createActorExtra - // Dragons: this cost is not persistable, create a LinearCost{a,b} struct that has a `.Cost(x) -> ax + b` - verifySignature: map[crypto.SigType]func(gas.Unit) gas.Unit{ - crypto.SigTypeBLS: func(x gas.Unit) gas.Unit { return gas.NewGas(3)*x + gas.NewGas(2) }, - crypto.SigTypeSecp256k1: func(x gas.Unit) gas.Unit { return gas.NewGas(3)*x + gas.NewGas(2) }, - }, - hashingBase: gas.NewGas(5), - hashingPerByte: gas.NewGas(2), - computeUnsealedSectorCidBase: gas.NewGas(100), - verifySealBase: gas.NewGas(2000), - verifyPostBase: gas.NewGas(700), - verifyConsensusFault: gas.NewGas(10), - }, -} - -// PricelistByEpoch finds the latest prices for the given epoch -func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { - // since we are storing the prices as map or epoch to price - // we need to get the price with the highest epoch that is lower or equal to the `epoch` arg - bestEpoch := abi.ChainEpoch(0) - bestPrice := prices[bestEpoch] - for e, pl := range prices { - // if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch` - if e > bestEpoch && e <= epoch { - bestEpoch = e - bestPrice = pl - } - } - if bestPrice == nil { - panic(fmt.Sprintf("bad setup: no gas prices available for epoch %d", epoch)) - } - return bestPrice -} diff --git a/internal/pkg/vm/internal/gascost/pricelistV0.go b/internal/pkg/vm/internal/gascost/pricelistV0.go deleted file mode 100644 index 657497f21a..0000000000 --- a/internal/pkg/vm/internal/gascost/pricelistV0.go +++ /dev/null @@ -1,173 +0,0 @@ -package gascost - -import ( - "fmt" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/message" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" -) - -type pricelistV0 struct { - /////////////////////////////////////////////////////////////////////////// - // System operations - /////////////////////////////////////////////////////////////////////////// - - // Gas cost charged to the originator of an on-chain message (regardless of - // whether it succeeds or fails in application) is given by: - // OnChainMessageBase + len(serialized message)*OnChainMessagePerByte - // Together, these account for the cost of message propagation and validation, - // up to but excluding any actual processing by the VM. - // This is the cost a block producer burns when including an invalid message. - onChainMessageBase gas.Unit - onChainMessagePerByte gas.Unit - - // Gas cost charged to the originator of a non-nil return value produced - // by an on-chain message is given by: - // len(return value)*OnChainReturnValuePerByte - onChainReturnValuePerByte gas.Unit - - // Gas cost for any message send execution(including the top-level one - // initiated by an on-chain message). - // This accounts for the cost of loading sender and receiver actors and - // (for top-level messages) incrementing the sender's sequence number. - // Load and store of actor sub-state is charged separately. - sendBase gas.Unit - - // Gas cost charged, in addition to SendBase, if a message send - // is accompanied by any nonzero currency amount. - // Accounts for writing receiver's new balance (the sender's state is - // already accounted for). - sendTransferFunds gas.Unit - - // Gas cost charged, in addition to SendBase, if a message invokes - // a method on the receiver. - // Accounts for the cost of loading receiver code and method dispatch. - sendInvokeMethod gas.Unit - - // Gas cost (Base + len*PerByte) for any Get operation to the IPLD store - // in the runtime VM context. - ipldGetBase gas.Unit - ipldGetPerByte gas.Unit - - // Gas cost (Base + len*PerByte) for any Put operation to the IPLD store - // in the runtime VM context. - // - // Note: these costs should be significantly higher than the costs for Get - // operations, since they reflect not only serialization/deserialization - // but also persistent storage of chain data. - ipldPutBase gas.Unit - ipldPutPerByte gas.Unit - - // Gas cost for creating a new actor (via InitActor's Exec method). - // - // Note: this costs assume that the extra will be partially or totally refunded while - // the base is covering for the put. - createActorBase gas.Unit - createActorExtra gas.Unit - - // Gas cost for deleting an actor. - // - // Note: this partially refunds the create cost to incentivise the deletion of the actors. - deleteActor gas.Unit - - verifySignature map[crypto.SigType]func(gas.Unit) gas.Unit - - hashingBase gas.Unit - hashingPerByte gas.Unit - - computeUnsealedSectorCidBase gas.Unit - verifySealBase gas.Unit - verifyPostBase gas.Unit - verifyConsensusFault gas.Unit -} - -var _ Pricelist = (*pricelistV0)(nil) - -// OnChainMessage returns the gas used for storing a message of a given size in the chain. -func (pl *pricelistV0) OnChainMessage(msgSize int) gas.Unit { - return pl.onChainMessageBase + pl.onChainMessagePerByte*gas.Unit(msgSize) -} - -// OnChainReturnValue returns the gas used for storing the response of a message in the chain. -func (pl *pricelistV0) OnChainReturnValue(receipt *message.Receipt) gas.Unit { - return gas.Unit(len(receipt.ReturnValue)) * pl.onChainReturnValuePerByte -} - -// OnMethodInvocation returns the gas used when invoking a method. -func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) gas.Unit { - ret := pl.sendBase - if value != abi.NewTokenAmount(0) { - ret += pl.sendTransferFunds - } - if methodNum != builtin.MethodSend { - ret += pl.sendInvokeMethod - } - return ret -} - -// OnIpldGet returns the gas used for storing an object -func (pl *pricelistV0) OnIpldGet(dataSize int) gas.Unit { - return pl.ipldGetBase + gas.Unit(dataSize)*pl.ipldGetPerByte -} - -// OnIpldPut returns the gas used for storing an object -func (pl *pricelistV0) OnIpldPut(dataSize int) gas.Unit { - return pl.ipldPutBase + gas.Unit(dataSize)*pl.ipldPutPerByte -} - -// OnCreateActor returns the gas used for creating an actor -func (pl *pricelistV0) OnCreateActor() gas.Unit { - return pl.createActorBase + pl.createActorExtra -} - -// OnDeleteActor returns the gas used for deleting an actor -func (pl *pricelistV0) OnDeleteActor() gas.Unit { - return pl.deleteActor -} - -// OnVerifySignature -func (pl *pricelistV0) OnVerifySignature(sigType crypto.SigType, planTextSize int) (gas.Unit, error) { - costFn, ok := pl.verifySignature[sigType] - if !ok { - return 0, fmt.Errorf("cost function for signature type %d not supported", sigType) - } - return costFn(gas.Unit(planTextSize)), nil -} - -// OnHashing -func (pl *pricelistV0) OnHashing(dataSize int) gas.Unit { - return pl.hashingBase + gas.Unit(dataSize)*pl.hashingPerByte -} - -// OnComputeUnsealedSectorCid -func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredProof, pieces *[]abi.PieceInfo) gas.Unit { - // TODO: this needs more cost tunning, check with @lotus - return pl.computeUnsealedSectorCidBase -} - -// OnVerifySeal -func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) gas.Unit { - // TODO: this needs more cost tunning, check with @lotus - return pl.verifySealBase -} - -// OnVerifyWinningPoSt -func (pl *pricelistV0) OnVerifyWinningPoSt(info abi.WinningPoStVerifyInfo) gas.Unit { - // TODO: this needs more cost tunning, check with @lotus - return pl.verifyPostBase -} - -// OnVerifyPoSt -func (pl *pricelistV0) OnVerifyPoSt(info abi.WindowPoStVerifyInfo) gas.Unit { - // TODO: this needs more cost tunning, check with @lotus - return pl.verifyPostBase -} - -// OnVerifyConsensusFault -func (pl *pricelistV0) OnVerifyConsensusFault() gas.Unit { - return pl.verifyConsensusFault -} diff --git a/internal/pkg/vm/internal/interpreter/interpreter.go b/internal/pkg/vm/internal/interpreter/interpreter.go deleted file mode 100644 index a8040588ab..0000000000 --- a/internal/pkg/vm/internal/interpreter/interpreter.go +++ /dev/null @@ -1,26 +0,0 @@ -package interpreter - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/message" -) - -// VMInterpreter orchestrates the execution of messages from a tipset on that tipset’s parent state. -type VMInterpreter interface { - // ApplyTipSetMessages applies all the messages in a tipset. - // - // Note: any message processing error will be present as an `ExitCode` in the `MessageReceipt`. - ApplyTipSetMessages(blocks []BlockMessagesInfo, head block.TipSetKey, epoch abi.ChainEpoch, rnd crypto.RandomnessSource) ([]message.Receipt, error) -} - -// BlockMessagesInfo contains messages for one block in a tipset. -type BlockMessagesInfo struct { - BLSMessages []*types.UnsignedMessage - SECPMessages []*types.SignedMessage - Miner address.Address -} diff --git a/internal/pkg/vm/internal/message/result.go b/internal/pkg/vm/internal/message/result.go deleted file mode 100644 index d8e830b961..0000000000 --- a/internal/pkg/vm/internal/message/result.go +++ /dev/null @@ -1,60 +0,0 @@ -package message - -import ( - "encoding/json" - "fmt" - - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// Receipt is what is returned by executing a message on the vm. -type Receipt struct { - // control field for encoding struct as an array - _ struct{} `cbor:",toarray"` - ExitCode exitcode.ExitCode `json:"exitCode"` - ReturnValue []byte `json:"return"` - GasUsed gas.Unit `json:"gasUsed"` -} - -// Value returns a successful code with the value encoded. -// -// Callers do NOT need to encode the value before calling this method. -func Value(obj interface{}, gasUsed gas.Unit) Receipt { - code := exitcode.Ok - var aux []byte - if obj != nil { - var err error - aux, err = encoding.Encode(obj) - if err != nil { - code = exitcode.SysErrSerialization - } - } - - return Receipt{ - ExitCode: code, - ReturnValue: aux, - GasUsed: gasUsed, - } -} - -// Failure returns with a non-zero exit code. -func Failure(exitCode exitcode.ExitCode, gasAmount gas.Unit) Receipt { - return Receipt{ - ExitCode: exitCode, - ReturnValue: []byte{}, - GasUsed: gasAmount, - } -} - -func (r *Receipt) String() string { - errStr := "(error encoding MessageReceipt)" - - js, err := json.MarshalIndent(r, "", " ") - if err != nil { - return errStr - } - return fmt.Sprintf("MessageReceipt: %s", string(js)) -} diff --git a/internal/pkg/vm/internal/storage/storage.go b/internal/pkg/vm/internal/storage/storage.go deleted file mode 100644 index 4d94e71aee..0000000000 --- a/internal/pkg/vm/internal/storage/storage.go +++ /dev/null @@ -1,202 +0,0 @@ -package storage - -import ( - "context" - "errors" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" - ipld "github.com/ipfs/go-ipld-format" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" -) - -// TODO: limit memory footprint -// TODO: implement ipld.Store - -// VMStorage implements a content-addressable store for the VM. -type VMStorage struct { - blockstore blockstore.Blockstore - writeBuffer map[cid.Cid]ipld.Node - readCache map[cid.Cid]blocks.Block - readCacheEnabled bool -} - -// ErrNotFound is returned by storage when no object matches a requested Cid. -var ErrNotFound = errors.New("object not found") - -// SerializationError is returned by storage when de/serialization of the object fails. -type SerializationError struct { - error -} - -// NewStorage creates a new VMStorage. -func NewStorage(bs blockstore.Blockstore) VMStorage { - return VMStorage{ - blockstore: bs, - writeBuffer: map[cid.Cid]ipld.Node{}, - readCache: map[cid.Cid]blocks.Block{}, - readCacheEnabled: false, - } -} - -// SetReadCache enable/disables the read chache. -func (s *VMStorage) SetReadCache(enabled bool) { - s.readCacheEnabled = enabled -} - -// Put stores object and returns it's content-addressable ID. -func (s *VMStorage) Put(ctx context.Context, obj interface{}) (cid.Cid, int, error) { - nd, err := s.toNode(obj) - if err != nil { - return cid.Undef, 0, SerializationError{err} - } - - // append the object to the buffer - cid := nd.Cid() - s.writeBuffer[cid] = nd - - return cid, len(nd.RawData()), nil -} - -// CidOf returns the Cid of the object without storing it. -func (s *VMStorage) CidOf(obj interface{}) (cid.Cid, error) { - nd, err := s.toNode(obj) - if err != nil { - return cid.Undef, err - } - return nd.Cid(), nil -} - -// Get loads the object based on its content-addressable ID. -func (s *VMStorage) Get(ctx context.Context, cid cid.Cid, obj interface{}) (int, error) { - raw, err := s.GetRaw(ctx, cid) - if err != nil { - return 0, err - } - err = encoding.Decode(raw, obj) - if err != nil { - return 0, SerializationError{err} - } - return len(raw), nil -} - -// GetRaw retrieves the raw bytes stored, returns true if it exists. -func (s *VMStorage) GetRaw(ctx context.Context, cid cid.Cid) ([]byte, error) { - // attempt to read from write buffer first - n, ok := s.writeBuffer[cid] - if ok { - // decode the object - return n.RawData(), nil - } - - if s.readCacheEnabled { - // attempt to read from the read cache - n, ok := s.readCache[cid] - if ok { - // decode the object - return n.RawData(), nil - } - } - - // read from store - blk, err := s.blockstore.Get(cid) - if err != nil { - if err == blockstore.ErrNotFound { - return nil, ErrNotFound - } - return nil, err - } - - if s.readCacheEnabled { - // add object to read cache - s.readCache[cid] = blk - } - - return blk.RawData(), nil -} - -// Flush writes all the in-memory held objects down to the store. -// -// This will automatically clear the write buffer when returning without error. -// -// If the read cache is enabled, the flushed objects will be read from cache. -func (s *VMStorage) Flush() error { - // extract list of blocks for the underlying store from our internal map - blks := make([]blocks.Block, 0, len(s.writeBuffer)) - for _, nd := range s.writeBuffer { - blks = append(blks, nd) - } - - // From https://github.com/dgraph-io/badger/issues/441: "a txn should not exceed the size of a single memtable" - // Default badger.DefaultOptions.MaxTableSize is 64Mib - // Pushing this hard would require measuring the size of each block and also accounting for badger object overheads. - // 1024 would give us very generous room for 64Kib per object. - maxBatchSize := 4 * 1024 - - // Write at most maxBatchSize objects to store at a time - remaining := blks - for len(remaining) > 0 { - last := min(len(remaining), maxBatchSize) - if err := s.blockstore.PutMany(remaining[:last]); err != nil { - return err - } - remaining = remaining[last:] - } - - if s.readCacheEnabled { - // move objects to read cache - for cid, nd := range s.writeBuffer { - s.readCache[cid] = nd - } - } - // clear write buffer - s.ClearWriteBuffer() - return nil -} - -// ClearWriteBuffer drops all the pending writes. -func (s *VMStorage) ClearWriteBuffer() { - s.writeBuffer = map[cid.Cid]ipld.Node{} -} - -// Clear will clear all buffers and caches. -// -// WARNING: thil will NOT flush the pending writes to the store. -func (s *VMStorage) Clear() { - s.writeBuffer = map[cid.Cid]ipld.Node{} - s.readCache = map[cid.Cid]blocks.Block{} -} - -// Put adds a node to temporary storage by id. -func (s *VMStorage) toNode(v interface{}) (ipld.Node, error) { - var nd format.Node - var err error - if blk, ok := v.(blocks.Block); ok { - // optimize putting blocks - nd, err = cbor.DecodeBlock(blk) - } else { - var raw []byte - raw, err = encoding.Encode(v) - if err != nil { - return nil, err - } - - nd, err = cbor.Decode(raw, constants.DefaultHashFunction, -1) - } - if err != nil { - return nil, err - } - return nd, nil -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/internal/pkg/vm/internal/storage/storage_test.go b/internal/pkg/vm/internal/storage/storage_test.go deleted file mode 100644 index 9d6102bc0b..0000000000 --- a/internal/pkg/vm/internal/storage/storage_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package storage_test - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "testing" - - badger "github.com/ipfs/go-ds-badger2" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage" -) - -func TestBatchSize(t *testing.T) { - tf.IntegrationTest(t) - ctx := context.Background() - dir, err := ioutil.TempDir("", "storagetest") - require.NoError(t, err) - defer func() { - _ = os.RemoveAll(dir) - }() - ds, err := badger.NewDatastore(dir, &badger.DefaultOptions) - require.NoError(t, err) - bs := blockstore.NewBlockstore(ds) - store := storage.NewStorage(bs) - - // This iteration count was picked experimentally based on a badger default maxtablesize of 64 << 20. - // If the batching is disabled inside the store, this test should fail. - require.Equal(t, int64(64<<20), badger.DefaultOptions.MaxTableSize) - iterCount := int64(2) << 16 - - data := bytes.Repeat([]byte("badger"), 100) - for i := int64(0); i < iterCount; i++ { - _, _, err = store.Put(ctx, fmt.Sprintf("%s%d", data, i)) - require.NoError(t, err) - } - err = store.Flush() - require.NoError(t, err) -} diff --git a/internal/pkg/vm/internal/vmcontext/actor_state_handle.go b/internal/pkg/vm/internal/vmcontext/actor_state_handle.go deleted file mode 100644 index 98235af4b1..0000000000 --- a/internal/pkg/vm/internal/vmcontext/actor_state_handle.go +++ /dev/null @@ -1,98 +0,0 @@ -package vmcontext - -import ( - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" -) - -type actorStateHandle struct { - ctx actorStateHandleContext - // validations is a list of validations that the vm will execute after the actor code finishes. - // - // Any validation failure will result in the execution getting aborted. - validations []validateFn - // used_objs holds the pointers to objs that have been used with this handle and their expected state cid. - usedObjs map[interface{}]cid.Cid -} - -// validateFn returns True if it's valid. -type validateFn = func() bool - -type actorStateHandleContext interface { - AllowSideEffects(bool) - Create(obj specsruntime.CBORMarshaler) cid.Cid - Load(obj specsruntime.CBORUnmarshaler) cid.Cid - Replace(expected cid.Cid, obj specsruntime.CBORMarshaler) cid.Cid -} - -// NewActorStateHandle returns a new `ActorStateHandle` -// -// Note: just visible for testing. -func NewActorStateHandle(ctx actorStateHandleContext) specsruntime.StateHandle { - aux := newActorStateHandle(ctx) - return &aux -} - -func newActorStateHandle(ctx actorStateHandleContext) actorStateHandle { - return actorStateHandle{ - ctx: ctx, - validations: []validateFn{}, - usedObjs: map[interface{}]cid.Cid{}, - } -} - -var _ specsruntime.StateHandle = (*actorStateHandle)(nil) - -func (h *actorStateHandle) Create(obj specsruntime.CBORMarshaler) { - // Store the new state. - c := h.ctx.Create(obj) - // Store the expected CID of obj. - h.usedObjs[obj] = c -} - -// Readonly is the implementation of the ActorStateHandle interface. -func (h *actorStateHandle) Readonly(obj specsruntime.CBORUnmarshaler) { - // Load state to obj. - c := h.ctx.Load(obj) - // Track the state and expected CID used by the caller. - h.usedObjs[obj] = c -} - -// Transaction is the implementation of the ActorStateHandle interface. -func (h *actorStateHandle) Transaction(obj specsruntime.CBORer, f func() interface{}) interface{} { - if obj == nil { - runtime.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()") - } - - // Load state to obj. - prior := h.ctx.Load(obj) - - // Call user code allowing mutation but not side-effects - h.ctx.AllowSideEffects(false) - out := f() - h.ctx.AllowSideEffects(true) - - // Store the new state - newCid := h.ctx.Replace(prior, obj) - - // Record the expected state of obj - h.usedObjs[obj] = newCid - return out -} - -// Validate validates that the state was mutated properly. -// -// This method is not part of the public API, -// it is expected to be called by the runtime after each actor method. -func (h *actorStateHandle) Validate(cidFn func(interface{}) cid.Cid) { - for obj, head := range h.usedObjs { - // verify the obj has not changed - usedCid := cidFn(obj) - if usedCid != head { - runtime.Abortf(exitcode.SysErrorIllegalActor, "State mutated outside of Transaction() scope") - } - } -} diff --git a/internal/pkg/vm/internal/vmcontext/actor_state_handle_test.go b/internal/pkg/vm/internal/vmcontext/actor_state_handle_test.go deleted file mode 100644 index 8b8b52df29..0000000000 --- a/internal/pkg/vm/internal/vmcontext/actor_state_handle_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package vmcontext_test - -import ( - "fmt" - "io" - "testing" - - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/vmcontext" -) - -func init() { - encoding.RegisterIpldCborType(testActorStateHandleState{}) -} - -type testActorStateHandleState struct { - FieldA string -} - -func (t *testActorStateHandleState) MarshalCBOR(w io.Writer) error { - aux, err := encoding.Encode(t.FieldA) - if err != nil { - return err - } - if _, err := w.Write(aux); err != nil { - return err - } - return nil -} - -func (t *testActorStateHandleState) UnmarshalCBOR(r io.Reader) error { - bs := make([]byte, 1024) - n, err := r.Read(bs) - if err != nil { - return err - } - if err := encoding.Decode(bs[:n], &t.FieldA); err != nil { - return err - } - return nil -} - -func setup() testSetup { - initialstate := testActorStateHandleState{FieldA: "fakestate"} - - store := vm.NewTestStorage(&initialstate) - initialhead := store.CidOf(&initialstate) - ctx := fakeActorStateHandleContext{ - head: initialhead, - store: store, - allowSideEffects: true, - } - h := vmcontext.NewActorStateHandle(&ctx) - - cleanup := func() { - // the vmcontext is supposed to call validate after each actor method - implH := h.(extendedStateHandle) - implH.Validate(func(obj interface{}) cid.Cid { return store.CidOf(obj) }) - } - - return testSetup{ - initialstate: initialstate, - h: h, - cleanup: cleanup, - } -} - -func TestActorStateHandle(t *testing.T) { - tf.UnitTest(t) - - // this test case verifies that the `Validate` works when nothing was done with the state - t.Run("noop", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - }) - - t.Run("readonly", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - ts.h.Readonly(&out) - - assert.Equal(t, out, ts.initialstate) - }) - - t.Run("abort on mutating a readonly", func(t *testing.T) { - defer mustPanic(t) - - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - ts.h.Readonly(&out) - - out.FieldA = "changed!" - }) - - t.Run("readonly multiple times", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - ts.h.Readonly(&out) - ts.h.Readonly(&out) - - assert.Equal(t, out, ts.initialstate) - }) - - t.Run("readonly promotion", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - ts.h.Readonly(&out) - - ts.h.Transaction(&out, func() interface{} { - out.FieldA = "changed!" - return nil - }) - }) - - t.Run("transaction", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - expected := "new state" - - ts.h.Transaction(&out, func() interface{} { - // check state is not what we are going to use - assert.NotEqual(t, out.FieldA, expected) - out.FieldA = expected - - return nil - }) - // check that it changed - assert.Equal(t, out.FieldA, expected) - - ts.h.Readonly(&out) - // really check by loading it again - assert.Equal(t, out.FieldA, expected) - }) - - t.Run("transaction but no mutation", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - - // should work, mutating is not compulsory - ts.h.Transaction(&out, func() interface{} { - return nil - }) - - assert.Equal(t, out, ts.initialstate) - }) - - t.Run("transaction returning value", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - - v := ts.h.Transaction(&out, func() interface{} { - return out.FieldA - }) - - assert.Equal(t, v, ts.initialstate.FieldA) - }) - - t.Run("mutated after the transaction", func(t *testing.T) { - defer mustPanic(t) - - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - - ts.h.Transaction(&out, func() interface{} { - out.FieldA = "changed!" - return nil - }) - - out.FieldA = "changed again!" - }) - - t.Run("transaction double whammy", func(t *testing.T) { - ts := setup() - defer ts.cleanup() - - var out testActorStateHandleState - - ts.h.Transaction(&out, func() interface{} { - out.FieldA = "changed!" - return nil - }) - - v := ts.h.Transaction(&out, func() interface{} { - out.FieldA = "again!" - return out.FieldA - }) - - ts.h.Readonly(&out) - // really check by loading it again - assert.Equal(t, out.FieldA, v) - }) -} - -func TestActorStateHandleNilState(t *testing.T) { - tf.UnitTest(t) - - setup := func() (runtime.StateHandle, func()) { - store := vm.NewTestStorage(nil) - ctx := fakeActorStateHandleContext{ - store: store, - allowSideEffects: true, - } - - h := vmcontext.NewActorStateHandle(&ctx) - - cleanup := func() { - // the vmcontext is supposed to call validate after each actor method - implH := h.(extendedStateHandle) - implH.Validate(func(obj interface{}) cid.Cid { return store.CidOf(obj) }) - } - - return h, cleanup - } - - t.Run("readonly on nil state is not allowed", func(t *testing.T) { - defer mustPanic(t) - - h, cleanup := setup() - defer cleanup() - - var out testActorStateHandleState - h.Readonly(&out) - }) - - t.Run("transaction on nil state", func(t *testing.T) { - h, cleanup := setup() - defer cleanup() - - var out testActorStateHandleState - h.Transaction(&out, func() interface{} { - return nil - }) - }) - - t.Run("state initialized after transaction", func(t *testing.T) { - h, cleanup := setup() - defer cleanup() - - var out testActorStateHandleState - h.Transaction(&out, func() interface{} { - return nil - }) - - h.Readonly(&out) // should not fail - }) - - t.Run("readonly nil pointer to state", func(t *testing.T) { - defer mustPanic(t) - - h, cleanup := setup() - defer cleanup() - - h.Readonly(nil) - }) - - t.Run("transaction nil pointer to state", func(t *testing.T) { - defer mustPanic(t) - - h, cleanup := setup() - defer cleanup() - - h.Transaction(nil, func() interface{} { - return nil - }) - }) -} - -type extendedStateHandle interface { - Validate(func(interface{}) cid.Cid) -} - -type fakeActorStateHandleContext struct { - store runtime.Store - head cid.Cid - allowSideEffects bool -} - -func (ctx *fakeActorStateHandleContext) AllowSideEffects(allow bool) { - ctx.allowSideEffects = allow -} - -func (ctx *fakeActorStateHandleContext) Create(obj runtime.CBORMarshaler) cid.Cid { - ctx.head = ctx.store.Put(obj) - return ctx.head -} - -func (ctx *fakeActorStateHandleContext) Load(obj runtime.CBORUnmarshaler) cid.Cid { - found := ctx.store.Get(ctx.head, obj) - if !found { - panic("inconsistent state") - } - return ctx.head -} - -func (ctx *fakeActorStateHandleContext) Replace(expected cid.Cid, obj runtime.CBORMarshaler) cid.Cid { - if !ctx.head.Equals(expected) { - panic(fmt.Errorf("unexpected prior state %s expected %s", ctx.head, expected)) - } - ctx.head = ctx.store.Put(obj) - return ctx.head -} - -type testSetup struct { - initialstate testActorStateHandleState - h runtime.StateHandle - cleanup func() -} - -func mustPanic(t *testing.T) { - if r := recover(); r == nil { - t.Fail() - } -} diff --git a/internal/pkg/vm/internal/vmcontext/actor_store.go b/internal/pkg/vm/internal/vmcontext/actor_store.go deleted file mode 100644 index 5a73c490a7..0000000000 --- a/internal/pkg/vm/internal/vmcontext/actor_store.go +++ /dev/null @@ -1,81 +0,0 @@ -package vmcontext - -import ( - "context" - "fmt" - "reflect" - - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/gascost" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage" -) - -type vmStorage interface { - Get(ctx context.Context, cid cid.Cid, obj interface{}) (int, error) - Put(ctx context.Context, obj interface{}) (cid.Cid, int, error) -} - -// ActorStorage hides the storage methods from the actors and turns the errors into runtime panics. -type ActorStorage struct { - context context.Context - inner vmStorage - pricelist gascost.Pricelist - gasTank *GasTracker -} - -func NewActorStorage(ctx context.Context, inner vmStorage, gasTank *GasTracker, pricelist gascost.Pricelist) *ActorStorage { - return &ActorStorage{ - context: ctx, - inner: inner, - pricelist: pricelist, - gasTank: gasTank, - } -} - -// -// implement runtime.Store for ActorStorage -// - -var _ specsruntime.Store = (*ActorStorage)(nil) - -// Serialization technically belongs in the actor code, rather than inside the VM. -// The true VM storage interface is in terms of raw bytes and, when we have user-defined, -// serialization code will be directly in those contracts. -// Our present runtime interface is at a slightly higher level for convenience, but the exit code here is the -// actor, rather than system-level, error code. -const serializationErr = exitcode.ErrSerialization - -func (s *ActorStorage) Put(obj specsruntime.CBORMarshaler) cid.Cid { - cid, ln, err := s.inner.Put(s.context, obj) - if err != nil { - msg := fmt.Sprintf("failed to put object %s in store: %s", reflect.TypeOf(obj), err) - if _, ok := err.(storage.SerializationError); ok { - runtime.Abortf(serializationErr, msg) - } else { - panic(msg) - } - } - s.gasTank.Charge(s.pricelist.OnIpldPut(ln), "storage put %s %d bytes into %v", cid, ln, obj) - return cid -} - -func (s *ActorStorage) Get(cid cid.Cid, obj specsruntime.CBORUnmarshaler) bool { - ln, err := s.inner.Get(s.context, cid, obj) - if err == storage.ErrNotFound { - return false - } - if err != nil { - msg := fmt.Sprintf("failed to get object %s %s from store: %s", reflect.TypeOf(obj), cid, err) - if _, ok := err.(storage.SerializationError); ok { - runtime.Abortf(serializationErr, msg) - } else { - panic(msg) - } - } - s.gasTank.Charge(s.pricelist.OnIpldGet(ln), "storage get %s %d bytes into %v", cid, ln, obj) - return true -} diff --git a/internal/pkg/vm/internal/vmcontext/actor_store_test.go b/internal/pkg/vm/internal/vmcontext/actor_store_test.go deleted file mode 100644 index cff76f5b2f..0000000000 --- a/internal/pkg/vm/internal/vmcontext/actor_store_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package vmcontext_test - -import ( - "context" - "fmt" - "io" - "testing" - - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - typegen "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/gascost" - vmr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/vmcontext" -) - -func TestActorStore(t *testing.T) { - ctx := context.Background() - raw := vm.NewStorage(blockstore.NewBlockstore(datastore.NewMapDatastore())) - gasTank := vmcontext.NewGasTracker(1e6) - - t.Run("abort on put serialization failure", func(t *testing.T) { - store := vmcontext.NewActorStorage(ctx, &raw, &gasTank, gascost.PricelistByEpoch(0)) - _, thrown := tryPut(store, cannotCBOR{}) - abort, ok := thrown.(vmr.ExecutionPanic) - assert.NotNil(t, thrown) - assert.True(t, ok, "expected abort") - assert.Equal(t, exitcode.ErrSerialization, abort.Code()) - }) - - t.Run("abort on get serialization failure", func(t *testing.T) { - store := vmcontext.NewActorStorage(ctx, &raw, &gasTank, gascost.PricelistByEpoch(0)) - v := typegen.CborInt(0) - - c, thrown := tryPut(store, &v) - assert.True(t, c.Defined()) - require.Nil(t, thrown) - - var v2 typegen.CborCid - thrown = tryGet(store, c, &v2) // Attempt decode into wrong type - abort, ok := thrown.(vmr.ExecutionPanic) - assert.NotNil(t, thrown) - assert.True(t, ok, "expected abort") - assert.Equal(t, exitcode.ErrSerialization, abort.Code()) - }) - - t.Run("panic on put storage failure", func(t *testing.T) { - store := vmcontext.NewActorStorage(ctx, &brokenStorage{}, &gasTank, gascost.PricelistByEpoch(0)) - v := typegen.CborInt(0) - _, thrown := tryPut(store, &v) - _, ok := thrown.(vmr.ExecutionPanic) - assert.NotNil(t, thrown) - assert.False(t, ok, "expected non-abort panic") - }) - - t.Run("panic on get storage failure", func(t *testing.T) { - store := vmcontext.NewActorStorage(ctx, &brokenStorage{}, &gasTank, gascost.PricelistByEpoch(0)) - var v typegen.CborInt - thrown := tryGet(store, cid.Undef, &v) - _, ok := thrown.(vmr.ExecutionPanic) - assert.NotNil(t, thrown) - assert.False(t, ok, "expected non-abort panic") - }) -} - -func tryPut(s *vmcontext.ActorStorage, v runtime.CBORMarshaler) (c cid.Cid, thrown interface{}) { - defer func() { - thrown = recover() - }() - c = s.Put(v) - return -} - -func tryGet(s *vmcontext.ActorStorage, c cid.Cid, v runtime.CBORUnmarshaler) (thrown interface{}) { - defer func() { - thrown = recover() - }() - s.Get(c, v) - return -} - -type cannotCBOR struct { -} - -func (c cannotCBOR) MarshalCBOR(w io.Writer) error { - return fmt.Errorf("no") -} - -type brokenStorage struct{} - -func (brokenStorage) Get(_ context.Context, _ cid.Cid, _ interface{}) (int, error) { - return 0, fmt.Errorf("no") -} - -func (brokenStorage) Put(_ context.Context, _ interface{}) (cid.Cid, int, error) { - return cid.Undef, 0, fmt.Errorf("no") -} diff --git a/internal/pkg/vm/internal/vmcontext/context_store.go b/internal/pkg/vm/internal/vmcontext/context_store.go deleted file mode 100644 index c2d01f037e..0000000000 --- a/internal/pkg/vm/internal/vmcontext/context_store.go +++ /dev/null @@ -1,36 +0,0 @@ -package vmcontext - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage" -) - -// Dragons: see if we can reuse the `adt.AsStore` method to construct this instead of re-writing it -type contextStore struct { - context context.Context - store *storage.VMStorage -} - -// implement adt.Store - -var _ adt.Store = (*contextStore)(nil) - -func (a *contextStore) Context() context.Context { - return a.context -} - -// (implement cbor.IpldStore, part of adt.Store) - -func (a *contextStore) Get(ctx context.Context, id cid.Cid, obj interface{}) error { - _, err := a.store.Get(ctx, id, obj) - return err -} - -func (a *contextStore) Put(ctx context.Context, obj interface{}) (cid.Cid, error) { - id, _, err := a.store.Put(ctx, obj) - return id, err -} diff --git a/internal/pkg/vm/internal/vmcontext/gas_tracker.go b/internal/pkg/vm/internal/vmcontext/gas_tracker.go deleted file mode 100644 index 8237e6afec..0000000000 --- a/internal/pkg/vm/internal/vmcontext/gas_tracker.go +++ /dev/null @@ -1,59 +0,0 @@ -package vmcontext - -import ( - "fmt" - - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" -) - -// GasTracker maintains the state of gas usage throughout the execution of a message. -type GasTracker struct { - gasLimit gas.Unit - gasConsumed gas.Unit -} - -// NewGasTracker initializes a new empty gas tracker -func NewGasTracker(limit gas.Unit) GasTracker { - return GasTracker{ - gasLimit: limit, - gasConsumed: gas.Zero, - } -} - -// Charge will add the gas charge to the current method gas context. -// -// WARNING: this method will panic if there is no sufficient gas left. -func (t *GasTracker) Charge(amount gas.Unit, msg string, args ...interface{}) { - if ok := t.TryCharge(amount); !ok { - fmsg := fmt.Sprintf(msg, args...) - runtime.Abortf(exitcode.SysErrOutOfGas, "gas limit %d exceeded with charge of %d: %s", t.gasLimit, amount, fmsg) - } -} - -// TryCharge charges `amount` or `RemainingGas()``, whichever is smaller. -// -// Returns `True` if the there was enough gas to pay for `amount`. -func (t *GasTracker) TryCharge(amount gas.Unit) bool { - // check for limit - aux := t.gasConsumed + amount - if aux > t.gasLimit { - t.gasConsumed = t.gasLimit - return false - } - - t.gasConsumed = aux - return true -} - -// GasConsumed returns the gas consumed. -func (t *GasTracker) GasConsumed() gas.Unit { - return t.gasConsumed -} - -// RemainingGas returns the gas remaining. -func (t *GasTracker) RemainingGas() gas.Unit { - return t.gasLimit - t.gasConsumed -} diff --git a/internal/pkg/vm/internal/vmcontext/invocation_context.go b/internal/pkg/vm/internal/vmcontext/invocation_context.go deleted file mode 100644 index 281b345ceb..0000000000 --- a/internal/pkg/vm/internal/vmcontext/invocation_context.go +++ /dev/null @@ -1,610 +0,0 @@ -package vmcontext - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime/debug" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" -) - -// Context for a top-level invocation sequence. -type topLevelContext struct { - originatorStableAddress address.Address // Stable (public key) address of the top-level message sender. - originatorCallSeq uint64 // Call sequence number of the top-level message. - newActorAddressCount uint64 // Count of calls to NewActorAddress (mutable). -} - -// Context for an individual message invocation, including inter-actor sends. -type invocationContext struct { - rt *VM - topLevel *topLevelContext - msg internalMessage // The message being processed - fromActor *actor.Actor // The immediate calling actor - gasTank *GasTracker - randSource crypto.RandomnessSource - isCallerValidated bool - allowSideEffects bool - toActor *actor.Actor // The receiving actor - stateHandle internalActorStateHandle -} - -type internalActorStateHandle interface { - specsruntime.StateHandle - Validate(func(interface{}) cid.Cid) -} - -func newInvocationContext(rt *VM, topLevel *topLevelContext, msg internalMessage, fromActor *actor.Actor, gasTank *GasTracker, randSource crypto.RandomnessSource) invocationContext { - // Note: the toActor and stateHandle are loaded during the `invoke()` - return invocationContext{ - rt: rt, - topLevel: topLevel, - msg: msg, - fromActor: fromActor, - gasTank: gasTank, - randSource: randSource, - isCallerValidated: false, - allowSideEffects: true, - toActor: nil, - stateHandle: nil, - } -} - -type stateHandleContext invocationContext - -func (shc *stateHandleContext) AllowSideEffects(allow bool) { - shc.allowSideEffects = allow -} - -func (shc *stateHandleContext) Create(obj specsruntime.CBORMarshaler) cid.Cid { - actr := shc.loadActor() - if actr.Head.Cid.Defined() { - runtime.Abortf(exitcode.SysErrorIllegalActor, "failed to construct actor state: already initialized") - } - c := shc.store().Put(obj) - actr.Head = e.NewCid(c) - shc.storeActor(actr) - return c -} - -func (shc *stateHandleContext) Load(obj specsruntime.CBORUnmarshaler) cid.Cid { - // The actor must be loaded from store every time since the state may have changed via a different state handle - // (e.g. in a recursive call). - actr := shc.loadActor() - c := actr.Head.Cid - if !c.Defined() { - runtime.Abortf(exitcode.SysErrorIllegalActor, "failed to load undefined state, must construct first") - } - found := shc.store().Get(c, obj) - if !found { - panic(fmt.Errorf("failed to load state for actor %s, CID %s", shc.msg.to, c)) - } - return c -} - -func (shc *stateHandleContext) Replace(expected cid.Cid, obj specsruntime.CBORMarshaler) cid.Cid { - actr := shc.loadActor() - if !actr.Head.Cid.Equals(expected) { - panic(fmt.Errorf("unexpected prior state %s for actor %s, expected %s", actr.Head, shc.msg.to, expected)) - } - c := shc.store().Put(obj) - actr.Head = e.NewCid(c) - shc.storeActor(actr) - return c -} - -func (shc *stateHandleContext) store() specsruntime.Store { - return ((*invocationContext)(shc)).Store() -} - -func (shc *stateHandleContext) loadActor() *actor.Actor { - actr, found, err := shc.rt.state.GetActor(shc.rt.context, shc.msg.to) - if err != nil { - panic(err) - } - if !found { - panic(fmt.Errorf("failed to find actor %s for state", shc.msg.to)) - } - return actr -} - -func (shc *stateHandleContext) storeActor(actr *actor.Actor) { - err := shc.rt.state.SetActor(shc.rt.context, shc.msg.to, actr) - if err != nil { - panic(err) - } -} - -// runtime aborts are trapped by invoke, it will always return an exit code. -func (ctx *invocationContext) invoke() (ret returnWrapper, errcode exitcode.ExitCode) { - // Checkpoint state, for restoration on rollback - // Note that changes prior to invocation (sequence number bump and gas prepayment) persist even if invocation fails. - priorRoot, err := ctx.rt.checkpoint() - if err != nil { - panic(err) - } - - // Install handler for abort, which rolls back all state changes from this and any nested invocations. - // This is the only path by which a non-OK exit code may be returned. - defer func() { - if r := recover(); r != nil { - if err := ctx.rt.rollback(priorRoot); err != nil { - panic(err) - } - switch r.(type) { - case runtime.ExecutionPanic: - p := r.(runtime.ExecutionPanic) - - vmlog.Warnw("Abort during actor execution.", - "errorMessage", p, - "exitCode", p.Code(), - "sender", ctx.msg.from, - "receiver", ctx.msg.to, - "methodNum", ctx.msg.method, - "value", ctx.msg.value, - "gasLimit", ctx.gasTank.gasLimit) - ret = returnWrapper{adt.Empty} // The Empty here should never be used, but slightly safer than zero value. - errcode = p.Code() - return - default: - // do not trap unknown panics - debug.PrintStack() - panic(r) - } - } - }() - - // pre-dispatch - // 1. charge gas for message invocation - // 2. load target actor - // 3. transfer optional funds - // 4. short-circuit _Send_ method - // 5. load target actor code - // 6. create target state handle - // assert from address is an ID address. - if ctx.msg.from.Protocol() != address.ID { - panic("bad code: sender address MUST be an ID address at invocation time") - } - - // 1. charge gas for msg - ctx.gasTank.Charge(ctx.rt.pricelist.OnMethodInvocation(ctx.msg.value, ctx.msg.method), "method invocation") - - // 2. load target actor - // Note: we replace the "to" address with the normalized version - ctx.toActor, ctx.msg.to = ctx.resolveTarget(ctx.msg.to) - - // 3. transfer funds carried by the msg - if !ctx.msg.value.Nil() && !ctx.msg.value.IsZero() { - if ctx.msg.value.LessThan(big.Zero()) { - runtime.Abortf(exitcode.SysErrForbidden, "attempt to transfer negative value %s from %s to %s", - ctx.msg.value, ctx.msg.from, ctx.msg.to) - } - if ctx.fromActor.Balance.LessThan(ctx.msg.value) { - runtime.Abortf(exitcode.SysErrInsufficientFunds, "sender %s insufficient balance %s to transfer %s to %s", - ctx.msg.from, ctx.fromActor.Balance, ctx.msg.value, ctx.msg.to) - } - ctx.toActor, ctx.fromActor = ctx.rt.transfer(ctx.msg.from, ctx.msg.to, ctx.msg.value) - } - - // 4. if we are just sending funds, there is nothing else to do. - if ctx.msg.method == builtin.MethodSend { - return returnWrapper{adt.Empty}, exitcode.Ok - } - - // 5. load target actor code - actorImpl := ctx.rt.getActorImpl(ctx.toActor.Code.Cid) - // 6. create target state handle - stateHandle := newActorStateHandle((*stateHandleContext)(ctx)) - ctx.stateHandle = &stateHandle - - // dispatch - adapter := runtimeAdapter{ctx: ctx} - out, err := actorImpl.Dispatch(ctx.msg.method, &adapter, ctx.msg.params) - if err != nil { - // Dragons: this could be a params deserialization error too - runtime.Abort(exitcode.SysErrInvalidMethod) - } - - // assert output implements expected interface - var marsh specsruntime.CBORMarshaler = adt.Empty - if out != nil { - var ok bool - marsh, ok = out.(specsruntime.CBORMarshaler) - if !ok { - runtime.Abortf(exitcode.SysErrorIllegalActor, "Returned value is not a CBORMarshaler") - } - } - ret = returnWrapper{inner: marsh} - - // post-dispatch - // 1. check caller was validated - // 2. check state manipulation was valid - // 4. success! - - // 1. check caller was validated - if !ctx.isCallerValidated { - runtime.Abortf(exitcode.SysErrorIllegalActor, "Caller MUST be validated during method execution") - } - - // 2. validate state access - ctx.stateHandle.Validate(func(obj interface{}) cid.Cid { - id, err := ctx.rt.store.CidOf(obj) - if err != nil { - panic(err) - } - return id - }) - - // Reset to pre-invocation state - ctx.toActor = nil - ctx.stateHandle = nil - - // 3. success! - return ret, exitcode.Ok -} - -// resolveTarget loads and actor and returns its ActorID address. -// -// If the target actor does not exist, and the target address is a pub-key address, -// a new account actor will be created. -// Otherwise, this method will abort execution. -func (ctx *invocationContext) resolveTarget(target address.Address) (*actor.Actor, address.Address) { - // resolve the target address via the InitActor, and attempt to load state. - initActorEntry, found, err := ctx.rt.state.GetActor(ctx.rt.context, builtin.InitActorAddr) - if err != nil { - panic(err) - } - if !found { - runtime.Abort(exitcode.SysErrSenderInvalid) - } - - if target == builtin.InitActorAddr { - return initActorEntry, target - } - - // get a view into the actor state - var state init_.State - if _, err := ctx.rt.store.Get(ctx.rt.context, initActorEntry.Head.Cid, &state); err != nil { - panic(err) - } - - // lookup the ActorID based on the address - targetIDAddr, err := state.ResolveAddress(ctx.rt.ContextStore(), target) - created := false - if err == init_.ErrAddressNotFound { - // actor does not exist, create an account actor - // - precond: address must be a pub-key - // - sent init actor a msg to create the new account - - if target.Protocol() != address.SECP256K1 && target.Protocol() != address.BLS { - // Don't implicitly create an account actor for an address without an associated key. - runtime.Abort(exitcode.SysErrInvalidReceiver) - } - - targetIDAddr, err = state.MapAddressToNewID(ctx.rt.ContextStore(), target) - if err != nil { - panic(err) - } - // store new state - initHead, _, err := ctx.rt.store.Put(ctx.rt.context, &state) - if err != nil { - panic(err) - } - // update init actor - initActorEntry.Head = e.NewCid(initHead) - if err := ctx.rt.state.SetActor(ctx.rt.context, builtin.InitActorAddr, initActorEntry); err != nil { - panic(err) - } - - ctx.CreateActor(builtin.AccountActorCodeID, targetIDAddr) - - // call constructor on account - newMsg := internalMessage{ - from: builtin.SystemActorAddr, - to: targetIDAddr, - value: big.Zero(), - method: builtin.MethodsAccount.Constructor, - // use original address as constructor params - // Note: constructor takes a pointer - params: &target, - } - - newCtx := newInvocationContext(ctx.rt, ctx.topLevel, newMsg, nil, ctx.gasTank, ctx.randSource) - _, code := newCtx.invoke() - if code.IsError() { - // we failed to construct an account actor.. - runtime.Abort(code) - } - - created = true - } else if err != nil { - panic(err) - } - - // load actor - targetActor, found, err := ctx.rt.state.GetActor(ctx.rt.context, targetIDAddr) - if err != nil { - panic(err) - } - if !found && created { - panic(fmt.Errorf("unreachable: actor is supposed to exist but it does not. addr: %s, idAddr: %s", target, targetIDAddr)) - } - if !found { - runtime.Abort(exitcode.SysErrInvalidReceiver) - } - - return targetActor, targetIDAddr -} - -// -// implement runtime.InvocationContext for invocationContext -// - -var _ runtime.InvocationContext = (*invocationContext)(nil) - -// Runtime implements runtime.InvocationContext. -func (ctx *invocationContext) Runtime() runtime.Runtime { - return ctx.rt -} - -// Store implements runtime.Runtime. -func (ctx *invocationContext) Store() specsruntime.Store { - return NewActorStorage(ctx.rt.context, ctx.rt.store, ctx.gasTank, ctx.rt.pricelist) -} - -// Message implements runtime.InvocationContext. -func (ctx *invocationContext) Message() specsruntime.Message { - return ctx.msg -} - -// ValidateCaller implements runtime.InvocationContext. -func (ctx *invocationContext) ValidateCaller(pattern runtime.CallerPattern) { - if ctx.isCallerValidated { - runtime.Abortf(exitcode.SysErrorIllegalActor, "Method must validate caller identity exactly once") - } - if !pattern.IsMatch((*patternContext2)(ctx)) { - runtime.Abortf(exitcode.SysErrForbidden, "Method invoked by incorrect caller") - } - ctx.isCallerValidated = true -} - -// State implements runtime.InvocationContext. -func (ctx *invocationContext) State() specsruntime.StateHandle { - return ctx.stateHandle -} - -type returnWrapper struct { - inner specsruntime.CBORMarshaler -} - -func (r returnWrapper) ToCbor() (out []byte, err error) { - if r.inner == nil { - return nil, fmt.Errorf("failed to unmarshal nil return (did you mean adt.Empty?)") - } - b := bytes.Buffer{} - if err = r.inner.MarshalCBOR(&b); err != nil { - return - } - out = b.Bytes() - if out == nil { - // A buffer with zero bytes written returns nil rather than an empty array, - // but the distinction matters for CBOR. - out = []byte{} - } - return -} - -func (r returnWrapper) Into(o specsruntime.CBORUnmarshaler) error { - // TODO: if inner is also a specsruntime.CBORUnmarshaler, overwrite o with inner. - if r.inner == nil { - return fmt.Errorf("failed to unmarshal nil return (did you mean adt.Empty?)") - } - b := bytes.Buffer{} - if err := r.inner.MarshalCBOR(&b); err != nil { - return err - } - return o.UnmarshalCBOR(&b) -} - -// Send implements runtime.InvocationContext. -func (ctx *invocationContext) Send(toAddr address.Address, methodNum abi.MethodNum, params specsruntime.CBORMarshaler, value abi.TokenAmount) (ret specsruntime.SendReturn, errcode exitcode.ExitCode) { - // check if side-effects are allowed - if !ctx.allowSideEffects { - runtime.Abortf(exitcode.SysErrorIllegalActor, "Calling Send() is not allowed during side-effect lock") - } - // prepare - // 1. alias fromActor - // 2. build internal message - - // 1. fromActor = executing toActor - from := ctx.msg.to - fromActor := ctx.toActor - - // 2. build internal message - newMsg := internalMessage{ - from: from, - to: toAddr, - value: value, - method: methodNum, - params: params, - } - - // invoke - // 1. build new context - // 2. invoke message - - // 1. build new context - newCtx := newInvocationContext(ctx.rt, ctx.topLevel, newMsg, fromActor, ctx.gasTank, ctx.randSource) - - // 2. invoke - return newCtx.invoke() -} - -/// Balance implements runtime.InvocationContext. -func (ctx *invocationContext) Balance() abi.TokenAmount { - return ctx.toActor.Balance -} - -// -// implement runtime.InvocationContext for invocationContext -// - -var _ runtime.ExtendedInvocationContext = (*invocationContext)(nil) - -func (ctx *invocationContext) NewActorAddress() address.Address { - var buf bytes.Buffer - - b1, err := encoding.Encode(ctx.topLevel.originatorStableAddress) - if err != nil { - panic(err) - } - _, err = buf.Write(b1) - if err != nil { - panic(err) - } - - err = binary.Write(&buf, binary.BigEndian, ctx.topLevel.originatorCallSeq) - if err != nil { - panic(err) - } - - err = binary.Write(&buf, binary.BigEndian, ctx.topLevel.newActorAddressCount) - if err != nil { - panic(err) - } - - actorAddress, err := address.NewActorAddress(buf.Bytes()) - if err != nil { - panic(err) - } - return actorAddress -} - -// CreateActor implements runtime.ExtendedInvocationContext. -func (ctx *invocationContext) CreateActor(codeID cid.Cid, addr address.Address) { - if !builtin.IsBuiltinActor(codeID) { - runtime.Abortf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.") - } - - if builtin.IsSingletonActor(codeID) { - runtime.Abortf(exitcode.SysErrorIllegalArgument, "Can only have one instance of singleton actors.") - } - - vmlog.Debugf("creating actor, friendly-name: %s, code: %s, addr: %s\n", builtin.ActorNameByCode(codeID), codeID, addr) - - // Check existing address. If nothing there, create empty actor. - // - // Note: we are storing the actors by ActorID *address* - _, found, err := ctx.rt.state.GetActor(ctx.rt.context, addr) - if err != nil { - panic(err) - } - if found { - runtime.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") - } - - // Charge gas now that easy checks are done - ctx.gasTank.Charge(ctx.rt.pricelist.OnCreateActor(), "CreateActor code %s, address %s", codeID, addr) - - newActor := &actor.Actor{ - // make this the right 'type' of actor - Code: e.NewCid(codeID), - Balance: abi.NewTokenAmount(0), - } - if err := ctx.rt.state.SetActor(ctx.rt.context, addr, newActor); err != nil { - panic(err) - } -} - -// DeleteActor implements runtime.ExtendedInvocationContext. -func (ctx *invocationContext) DeleteActor(beneficiary address.Address) { - receiver := ctx.msg.to - receiverActor, found, err := ctx.rt.state.GetActor(ctx.rt.context, receiver) - if err != nil { - panic(err) - } - if !found { - runtime.Abortf(exitcode.SysErrorIllegalActor, "delete non-existent actor %s", receiverActor) - } - ctx.gasTank.Charge(ctx.rt.pricelist.OnDeleteActor(), "DeleteActor %s", receiver) - - // Transfer any remaining balance to the beneficiary. - // This looks like it could cause a problem with gas refund going to a non-existent actor, but the gas payer - // is always an account actor, which cannot be the receiver of this message. - if receiverActor.Balance.GreaterThan(big.Zero()) { - ctx.rt.transfer(receiver, beneficiary, receiverActor.Balance) - } - - if err := ctx.rt.state.DeleteActor(ctx.rt.context, receiver); err != nil { - panic(err) - } -} - -func (ctx *invocationContext) TotalFilCircSupply() abi.TokenAmount { - rewardActor, found, err := ctx.rt.state.GetActor(ctx.rt.context, builtin.RewardActorAddr) - if !found || err != nil { - panic(fmt.Sprintf("failed to get rewardActor actor for computing total supply: %s", err)) - } - - burntActor, found, err := ctx.rt.state.GetActor(ctx.rt.context, builtin.BurntFundsActorAddr) - if !found || err != nil { - panic(fmt.Sprintf("failed to get burntActor funds actor for computing total supply: %s", err)) - } - - marketActor, found, err := ctx.rt.state.GetActor(ctx.rt.context, builtin.StorageMarketActorAddr) - if !found || err != nil { - panic(fmt.Sprintf("failed to get storage marketActor actor for computing total supply: %s", err)) - } - - // TODO: remove this, https://github.com/filecoin-project/go-filecoin/issues/4017 - powerActor, found, err := ctx.rt.state.GetActor(ctx.rt.context, builtin.StoragePowerActorAddr) - if !found || err != nil { - panic(fmt.Sprintf("failed to get storage powerActor actor for computing total supply: %s", err)) - } - - // TODO: this 2 billion is coded for temporary compatibility with the Lotus implementation of this function, - // but including it here is brittle. Instead, this should inspect the reward actor's state which records - // exactly how much has actually been distributed in block rewards to this point, robust to various - // network initial conditions. - // https://github.com/filecoin-project/go-filecoin/issues/4017 - total := big.NewInt(2e9) - total = big.Sub(total, rewardActor.Balance) - total = big.Sub(total, burntActor.Balance) - total = big.Sub(total, marketActor.Balance) - - var st power.State - if found = ctx.Store().Get(powerActor.Head.Cid, &st); !found { - panic("failed to get storage powerActor state") - } - - return big.Sub(total, st.TotalPledgeCollateral) -} - -// patternContext implements the PatternContext -type patternContext2 invocationContext - -var _ runtime.PatternContext = (*patternContext2)(nil) - -func (ctx *patternContext2) CallerCode() cid.Cid { - return ctx.fromActor.Code.Cid -} - -func (ctx *patternContext2) CallerAddr() address.Address { - return ctx.msg.from -} diff --git a/internal/pkg/vm/internal/vmcontext/runtime_adapter.go b/internal/pkg/vm/internal/vmcontext/runtime_adapter.go deleted file mode 100644 index 96eed731e0..0000000000 --- a/internal/pkg/vm/internal/vmcontext/runtime_adapter.go +++ /dev/null @@ -1,149 +0,0 @@ -package vmcontext - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/pattern" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" -) - -type runtimeAdapter struct { - ctx *invocationContext -} - -var _ specsruntime.Runtime = (*runtimeAdapter)(nil) - -// Message implements Runtime. -func (a *runtimeAdapter) Message() specsruntime.Message { - return a.ctx.Message() -} - -// CurrEpoch implements Runtime. -func (a *runtimeAdapter) CurrEpoch() abi.ChainEpoch { - return a.ctx.Runtime().CurrentEpoch() -} - -// ImmediateCaller implements Runtime. -func (a *runtimeAdapter) ImmediateCaller() address.Address { - return a.ctx.Message().Caller() -} - -// ValidateImmediateCallerAcceptAny implements Runtime. -func (a *runtimeAdapter) ValidateImmediateCallerAcceptAny() { - a.ctx.ValidateCaller(pattern.Any{}) -} - -// ValidateImmediateCallerIs implements Runtime. -func (a *runtimeAdapter) ValidateImmediateCallerIs(addrs ...address.Address) { - a.ctx.ValidateCaller(pattern.AddressIn{Addresses: addrs}) -} - -// ValidateImmediateCallerType implements Runtime. -func (a *runtimeAdapter) ValidateImmediateCallerType(codes ...cid.Cid) { - a.ctx.ValidateCaller(pattern.CodeIn{Codes: codes}) -} - -// CurrentBalance implements Runtime. -func (a *runtimeAdapter) CurrentBalance() abi.TokenAmount { - return a.ctx.Balance() -} - -// ResolveAddress implements Runtime. -func (a *runtimeAdapter) ResolveAddress(addr address.Address) (address.Address, bool) { - return a.ctx.rt.normalizeAddress(addr) -} - -// GetActorCodeCID implements Runtime. -func (a *runtimeAdapter) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) { - entry, found, err := a.ctx.rt.state.GetActor(context.Background(), addr) - if !found { - return cid.Undef, false - } - if err != nil { - panic(err) - } - return entry.Code.Cid, true -} - -// GetRandomness implements Runtime. -func (a *runtimeAdapter) GetRandomness(tag crypto.DomainSeparationTag, epoch abi.ChainEpoch, entropy []byte) abi.Randomness { - randomness, err := a.ctx.randSource.Randomness(a.Context(), tag, epoch, entropy) - if err != nil { - panic(err) - } - return randomness -} - -// State implements Runtime. -func (a *runtimeAdapter) State() specsruntime.StateHandle { - return a.ctx.State() -} - -// Store implements Runtime. -func (a *runtimeAdapter) Store() specsruntime.Store { - return a.ctx.Store() -} - -// Send implements Runtime. -func (a *runtimeAdapter) Send(toAddr address.Address, methodNum abi.MethodNum, params specsruntime.CBORMarshaler, value abi.TokenAmount) (ret specsruntime.SendReturn, errcode exitcode.ExitCode) { - return a.ctx.Send(toAddr, methodNum, params, value) -} - -// Abortf implements Runtime. -func (a *runtimeAdapter) Abortf(errExitCode exitcode.ExitCode, msg string, args ...interface{}) { - runtime.Abortf(errExitCode, msg, args...) -} - -// NewActorAddress implements Runtime. -func (a *runtimeAdapter) NewActorAddress() address.Address { - return a.ctx.NewActorAddress() -} - -// CreateActor implements Runtime. -func (a *runtimeAdapter) CreateActor(codeID cid.Cid, addr address.Address) { - a.ctx.CreateActor(codeID, addr) -} - -// DeleteActor implements Runtime. -func (a *runtimeAdapter) DeleteActor(beneficiary address.Address) { - a.ctx.DeleteActor(beneficiary) -} - -// SyscallsImpl implements Runtime. -func (a *runtimeAdapter) Syscalls() specsruntime.Syscalls { - return &syscalls{ - impl: a.ctx.rt.syscalls, - ctx: a.ctx.rt.context, - gasTank: a.ctx.gasTank, - pricelist: a.ctx.rt.pricelist, - head: a.ctx.rt.currentHead, - state: a.ctx.rt.stateView(), - } -} - -func (a *runtimeAdapter) TotalFilCircSupply() abi.TokenAmount { - return a.ctx.TotalFilCircSupply() -} - -// Context implements Runtime. -// Dragons: this can disappear once we have the storage abstraction -func (a *runtimeAdapter) Context() context.Context { - return a.ctx.rt.context -} - -type nullTraceSpan struct{} - -func (*nullTraceSpan) End() {} - -// StartSpan implements Runtime. -func (a *runtimeAdapter) StartSpan(name string) specsruntime.TraceSpan { - // Dragons: leeave empty for now, add TODO to add this into gfc - return &nullTraceSpan{} -} diff --git a/internal/pkg/vm/internal/vmcontext/syscalls.go b/internal/pkg/vm/internal/vmcontext/syscalls.go deleted file mode 100644 index c0e3b4645c..0000000000 --- a/internal/pkg/vm/internal/vmcontext/syscalls.go +++ /dev/null @@ -1,76 +0,0 @@ -package vmcontext - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/gascost" -) - -type SyscallsStateView interface { - state.AccountStateView - MinerControlAddresses(ctx context.Context, maddr address.Address) (owner, worker address.Address, err error) -} - -// Syscall implementation interface. -// These methods take the chain epoch and other context that is implicit in the runtime as explicit parameters. -type SyscallsImpl interface { - VerifySignature(ctx context.Context, view SyscallsStateView, signature crypto.Signature, signer address.Address, plaintext []byte) error - HashBlake2b(data []byte) [32]byte - ComputeUnsealedSectorCID(ctx context.Context, proof abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) - VerifySeal(ctx context.Context, info abi.SealVerifyInfo) error - VerifyPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) error - VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, head block.TipSetKey, view SyscallsStateView) (*specsruntime.ConsensusFault, error) -} - -type syscalls struct { - impl SyscallsImpl - ctx context.Context - gasTank *GasTracker - pricelist gascost.Pricelist - head block.TipSetKey - state SyscallsStateView -} - -var _ specsruntime.Syscalls = (*syscalls)(nil) - -func (sys syscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { - charge, err := sys.pricelist.OnVerifySignature(signature.Type, len(plaintext)) - if err != nil { - return err - } - sys.gasTank.Charge(charge, "VerifySignature") - return sys.impl.VerifySignature(sys.ctx, sys.state, signature, signer, plaintext) -} - -func (sys syscalls) HashBlake2b(data []byte) [32]byte { - sys.gasTank.Charge(sys.pricelist.OnHashing(len(data)), "HashBlake2b") - return sys.impl.HashBlake2b(data) -} - -func (sys syscalls) ComputeUnsealedSectorCID(proof abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { - sys.gasTank.Charge(sys.pricelist.OnComputeUnsealedSectorCid(proof, &pieces), "ComputeUnsealedSectorCID") - return sys.impl.ComputeUnsealedSectorCID(sys.ctx, proof, pieces) -} - -func (sys syscalls) VerifySeal(info abi.SealVerifyInfo) error { - sys.gasTank.Charge(sys.pricelist.OnVerifySeal(info), "VerifySeal") - return sys.impl.VerifySeal(sys.ctx, info) -} - -func (sys syscalls) VerifyPoSt(info abi.WindowPoStVerifyInfo) error { - sys.gasTank.Charge(sys.pricelist.OnVerifyPoSt(info), "VerifyWindowPoSt") - return sys.impl.VerifyPoSt(sys.ctx, info) -} - -func (sys syscalls) VerifyConsensusFault(h1, h2, extra []byte) (*specsruntime.ConsensusFault, error) { - sys.gasTank.Charge(sys.pricelist.OnVerifyConsensusFault(), "VerifyConsensusFault") - return sys.impl.VerifyConsensusFault(sys.ctx, h1, h2, extra, sys.head, sys.state) -} diff --git a/internal/pkg/vm/internal/vmcontext/testing.go b/internal/pkg/vm/internal/vmcontext/testing.go deleted file mode 100644 index 2f5db94693..0000000000 --- a/internal/pkg/vm/internal/vmcontext/testing.go +++ /dev/null @@ -1,566 +0,0 @@ -package vmcontext - -import ( - "context" - "fmt" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/dispatch" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - "github.com/filecoin-project/specs-actors/actors/builtin/system" - "github.com/filecoin-project/specs-actors/actors/puppet" - "math/rand" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - - vtypes "github.com/filecoin-project/chain-validation/chain/types" - vdriver "github.com/filecoin-project/chain-validation/drivers" - vstate "github.com/filecoin-project/chain-validation/state" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-crypto" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - gfcrypto "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/gascost" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/interpreter" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -var _ vstate.Factories = &Factories{} -var _ vstate.VMWrapper = (*ValidationVMWrapper)(nil) -var _ vstate.Applier = (*ValidationApplier)(nil) -var _ vstate.KeyManager = (*KeyManager)(nil) - -var ChainvalActors = dispatch.NewBuilder(). - Add(builtin.InitActorCodeID, &init_.Actor{}). - Add(builtin.AccountActorCodeID, &account.Actor{}). - Add(builtin.MultisigActorCodeID, &multisig.Actor{}). - Add(builtin.PaymentChannelActorCodeID, &paych.Actor{}). - Add(builtin.StoragePowerActorCodeID, &power.Actor{}). - Add(builtin.StorageMarketActorCodeID, &market.Actor{}). - Add(builtin.StorageMinerActorCodeID, &miner.Actor{}). - Add(builtin.SystemActorCodeID, &system.Actor{}). - Add(builtin.RewardActorCodeID, &reward.Actor{}). - Add(builtin.CronActorCodeID, &cron.Actor{}). - // add the puppet actor - Add(puppet.PuppetActorCodeID, &puppet.Actor{}). - Build() - -type Factories struct { - config *ValidationConfig -} - -func NewFactories(config *ValidationConfig) *Factories { - factory := &Factories{config} - return factory -} - -func (f *Factories) NewStateAndApplier() (vstate.VMWrapper, vstate.Applier) { - st := NewState() - return st, &ValidationApplier{state: st} -} - -func (f *Factories) NewKeyManager() vstate.KeyManager { - return newKeyManager() -} - -type fakeRandSrc struct { -} - -func (r fakeRandSrc) Randomness(_ context.Context, _ acrypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) (abi.Randomness, error) { - return abi.Randomness("sausages"), nil -} - -func (f *Factories) NewRandomnessSource() vstate.RandomnessSource { - return &fakeRandSrc{} -} - -func (f *Factories) NewValidationConfig() vstate.ValidationConfig { - return f.config -} - -// -// ValidationConfig -// - -type ValidationConfig struct { - trackGas bool - checkExitCode bool - checkReturnValue bool - checkStateRoot bool -} - -func (v ValidationConfig) ValidateGas() bool { - return v.trackGas -} - -func (v ValidationConfig) ValidateExitCode() bool { - return v.checkExitCode -} - -func (v ValidationConfig) ValidateReturnValue() bool { - return v.checkReturnValue -} - -func (v ValidationConfig) ValidateStateRoot() bool { - return v.checkStateRoot -} - -// -// VMWrapper -// - -type specialSyscallWrapper struct { - internal *vdriver.ChainValidationSyscalls -} - -func (s specialSyscallWrapper) VerifySignature(_ context.Context, _ SyscallsStateView, signature gfcrypto.Signature, signer address.Address, plaintext []byte) error { - return s.internal.VerifySignature(signature, signer, plaintext) -} - -func (s specialSyscallWrapper) HashBlake2b(data []byte) [32]byte { - return s.internal.HashBlake2b(data) -} - -func (s specialSyscallWrapper) ComputeUnsealedSectorCID(_ context.Context, proof abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { - return s.internal.ComputeUnsealedSectorCID(proof, pieces) -} - -func (s specialSyscallWrapper) VerifySeal(_ context.Context, info abi.SealVerifyInfo) error { - return s.internal.VerifySeal(info) -} - -func (s specialSyscallWrapper) VerifyPoSt(_ context.Context, info abi.WindowPoStVerifyInfo) error { - return s.internal.VerifyPoSt(info) -} - -func (s specialSyscallWrapper) VerifyConsensusFault(_ context.Context, h1, h2, extra []byte, _ block.TipSetKey, _ SyscallsStateView) (*runtime.ConsensusFault, error) { - return s.internal.VerifyConsensusFault(h1, h2, extra) -} - -func NewState() *ValidationVMWrapper { - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - cst := cborutil.NewIpldStore(bs) - vmstrg := storage.NewStorage(bs) - vm := NewVM(ChainvalActors, &vmstrg, state.NewState(cst), specialSyscallWrapper{vdriver.NewChainValidationSyscalls()}) - return &ValidationVMWrapper{ - vm: &vm, - } -} - -type ValidationVMWrapper struct { - vm *VM -} - -func (w *ValidationVMWrapper) NewVM() { - return -} - -// Root implements ValidationVMWrapper. -func (w *ValidationVMWrapper) Root() cid.Cid { - root, dirty := w.vm.state.Root() - if dirty { - panic("vm state is dirty") - } - return root -} - -// Get the value at key from vm store -func (w *ValidationVMWrapper) StoreGet(key cid.Cid, out runtime.CBORUnmarshaler) error { - return w.vm.ContextStore().Get(context.Background(), key, out) -} - -// Put `value` into vm store -func (w *ValidationVMWrapper) StorePut(value runtime.CBORMarshaler) (cid.Cid, error) { - return w.vm.ContextStore().Put(context.Background(), value) -} - -// Store implements ValidationVMWrapper. -func (w *ValidationVMWrapper) Store() adt.Store { - return w.vm.ContextStore() -} - -// Actor implements ValidationVMWrapper. -func (w *ValidationVMWrapper) Actor(addr address.Address) (vstate.Actor, error) { - idAddr, found := w.vm.normalizeAddress(addr) - if !found { - return nil, fmt.Errorf("failed to normalize address: %s", addr) - } - - a, found, err := w.vm.state.GetActor(w.vm.context, idAddr) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("actor not found") - } - return &actorWrapper{a}, nil -} - -// CreateActor implements ValidationVMWrapper. -func (w *ValidationVMWrapper) CreateActor(code cid.Cid, addr address.Address, balance abi.TokenAmount, newState runtime.CBORMarshaler) (vstate.Actor, address.Address, error) { - idAddr := addr - if addr.Protocol() != address.ID { - // go through init to register - initActorEntry, found, err := w.vm.state.GetActor(w.vm.context, builtin.InitActorAddr) - if err != nil { - return nil, address.Undef, err - } - if !found { - return nil, address.Undef, fmt.Errorf("actor not found") - } - - // get a view into the actor state - var initState init_.State - if _, err := w.vm.store.Get(w.vm.context, initActorEntry.Head.Cid, &initState); err != nil { - return nil, address.Undef, err - } - - // add addr to inits map - idAddr, err = initState.MapAddressToNewID(w.vm.ContextStore(), addr) - if err != nil { - return nil, address.Undef, err - } - - // persist the init actor state - initHead, _, err := w.vm.store.Put(w.vm.context, &initState) - if err != nil { - return nil, address.Undef, err - } - initActorEntry.Head = enccid.NewCid(initHead) - if err := w.vm.state.SetActor(w.vm.context, builtin.InitActorAddr, initActorEntry); err != nil { - return nil, address.Undef, err - } - // persist state below - } - - // create actor on state stree - - // store newState - head, _, err := w.vm.store.Put(w.vm.context, newState) - if err != nil { - return nil, address.Undef, err - } - - // create and store actor object - a := &actor.Actor{ - Code: enccid.NewCid(code), - Head: enccid.NewCid(head), - Balance: balance, - } - if err := w.vm.state.SetActor(w.vm.context, idAddr, a); err != nil { - return nil, address.Undef, err - } - - if err := w.PersistChanges(); err != nil { - return nil, address.Undef, err - } - - return &actorWrapper{a}, idAddr, nil -} - -// SetActorState implements ValidationVMWrapper. -func (w *ValidationVMWrapper) SetActorState(addr address.Address, balance big.Int, state runtime.CBORMarshaler) (vstate.Actor, error) { - idAddr, ok := w.vm.normalizeAddress(addr) - if !ok { - return nil, fmt.Errorf("actor not found") - } - - a, found, err := w.vm.state.GetActor(w.vm.context, idAddr) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("actor not found") - } - // store state - head, _, err := w.vm.store.Put(w.vm.context, state) - if err != nil { - return nil, err - } - // update fields - a.Head = enccid.NewCid(head) - a.Balance = balance - - if err := w.vm.state.SetActor(w.vm.context, idAddr, a); err != nil { - return nil, err - } - - if err := w.PersistChanges(); err != nil { - return nil, err - } - - return &actorWrapper{a}, nil -} - -func (w *ValidationVMWrapper) PersistChanges() error { - if _, err := w.vm.commit(); err != nil { - return err - } - return nil -} - -// -// Applier -// - -type ValidationApplier struct { - state *ValidationVMWrapper -} - -func (a *ValidationApplier) ApplyMessage(epoch abi.ChainEpoch, msg *vtypes.Message) (vtypes.ApplyMessageResult, error) { - // Prepare message and VM. - ourMsg := a.preApplyMessage(epoch, msg) - - // Invoke. - ourreceipt, penalty, reward := a.state.vm.applyMessage(ourMsg, ourMsg.OnChainLen(), &fakeRandSrc{}) - - // Persist changes. - receipt, err := a.postApplyMessage(ourreceipt) - return vtypes.ApplyMessageResult{ - Receipt: receipt, - Penalty: penalty, - Reward: reward, - Root: a.state.Root().String(), - }, err -} - -func (a *ValidationApplier) ApplySignedMessage(epoch abi.ChainEpoch, msg *vtypes.SignedMessage) (vtypes.ApplyMessageResult, error) { - - // Prepare message and VM. - ourMsg := a.preApplyMessage(epoch, &msg.Message) - ourSigned := &types.SignedMessage{ - Message: *ourMsg, - Signature: msg.Signature, - } - - // Invoke. - ourreceipt, penalty, reward := a.state.vm.applyMessage(ourMsg, ourSigned.OnChainLen(), &fakeRandSrc{}) - - // Persist changes. - receipt, err := a.postApplyMessage(ourreceipt) - return vtypes.ApplyMessageResult{ - Receipt: receipt, - Penalty: penalty, - Reward: reward, - Root: a.state.Root().String(), - }, err -} - -func (a *ValidationApplier) preApplyMessage(epoch abi.ChainEpoch, msg *vtypes.Message) *types.UnsignedMessage { - // set epoch - // Note: this would have normally happened during `ApplyTipset()` - a.state.vm.currentEpoch = epoch - a.state.vm.pricelist = gascost.PricelistByEpoch(epoch) - - // map message - return toOurMessage(msg) -} - -func (a *ValidationApplier) postApplyMessage(ourreceipt message.Receipt) (vtypes.MessageReceipt, error) { - // commit and persist changes - // Note: this is not done on production for each msg - if err := a.state.PersistChanges(); err != nil { - return vtypes.MessageReceipt{}, err - } - - // map receipt - return vtypes.MessageReceipt{ - ExitCode: ourreceipt.ExitCode, - ReturnValue: ourreceipt.ReturnValue, - GasUsed: vtypes.GasUnits(ourreceipt.GasUsed), - }, nil -} - -func toOurMessage(theirs *vtypes.Message) *types.UnsignedMessage { - return &types.UnsignedMessage{ - To: theirs.To, - From: theirs.From, - CallSeqNum: theirs.CallSeqNum, - Value: theirs.Value, - Method: theirs.Method, - Params: theirs.Params, - GasPrice: theirs.GasPrice, - GasLimit: gas.Unit(theirs.GasLimit), - } - -} - -func toOurBlockMessageInfoType(theirs []vtypes.BlockMessagesInfo) []interpreter.BlockMessagesInfo { - ours := make([]interpreter.BlockMessagesInfo, len(theirs)) - for i, bm := range theirs { - ours[i].Miner = bm.Miner - for _, blsMsg := range bm.BLSMessages { - ourbls := &types.UnsignedMessage{ - To: blsMsg.To, - From: blsMsg.From, - CallSeqNum: blsMsg.CallSeqNum, - Value: blsMsg.Value, - Method: blsMsg.Method, - Params: blsMsg.Params, - GasPrice: blsMsg.GasPrice, - GasLimit: gas.Unit(blsMsg.GasLimit), - } - ours[i].BLSMessages = append(ours[i].BLSMessages, ourbls) - } - for _, secpMsg := range bm.SECPMessages { - oursecp := &types.SignedMessage{ - Message: types.UnsignedMessage{ - To: secpMsg.Message.To, - From: secpMsg.Message.From, - CallSeqNum: secpMsg.Message.CallSeqNum, - Value: secpMsg.Message.Value, - Method: secpMsg.Message.Method, - Params: secpMsg.Message.Params, - GasPrice: secpMsg.Message.GasPrice, - GasLimit: gas.Unit(secpMsg.Message.GasLimit), - }, - Signature: secpMsg.Signature, - } - ours[i].SECPMessages = append(ours[i].SECPMessages, oursecp) - } - } - return ours -} - -func (a *ValidationApplier) ApplyTipSetMessages(epoch abi.ChainEpoch, blocks []vtypes.BlockMessagesInfo, rnd vstate.RandomnessSource) (vtypes.ApplyTipSetResult, error) { - - ourBlkMsgs := toOurBlockMessageInfoType(blocks) - // TODO: pass through parameter when chain validation type signature is updated to propagate it - head := block.NewTipSetKey() - receipts, err := a.state.vm.ApplyTipSetMessages(ourBlkMsgs, head, epoch, rnd) - if err != nil { - return vtypes.ApplyTipSetResult{}, err - } - - theirReceipts := make([]vtypes.MessageReceipt, len(receipts)) - for i, r := range receipts { - theirReceipts[i] = vtypes.MessageReceipt{ - ExitCode: r.ExitCode, - ReturnValue: r.ReturnValue, - GasUsed: vtypes.GasUnits(r.GasUsed), - } - } - - return vtypes.ApplyTipSetResult{ - Receipts: theirReceipts, - Root: a.state.Root().String(), - }, nil -} - -// -// KeyManager -// - -type KeyManager struct { - // Private keys by address - keys map[address.Address]*gfcrypto.KeyInfo - - // Seed for deterministic secp key generation. - secpSeed int64 - // Seed for deterministic bls key generation. - blsSeed int64 // nolint: structcheck -} - -func newKeyManager() *KeyManager { - return &KeyManager{ - keys: make(map[address.Address]*gfcrypto.KeyInfo), - secpSeed: 0, - } -} - -func (k *KeyManager) NewSECP256k1AccountAddress() address.Address { - secpKey := k.newSecp256k1Key() - addr, err := secpKey.Address() - if err != nil { - panic(err) - } - k.keys[addr] = secpKey - return addr -} - -func (k *KeyManager) NewBLSAccountAddress() address.Address { - blsKey := k.newBLSKey() - addr, err := blsKey.Address() - if err != nil { - panic(err) - } - k.keys[addr] = blsKey - return addr -} - -func (k *KeyManager) Sign(addr address.Address, data []byte) (acrypto.Signature, error) { - ki, ok := k.keys[addr] - if !ok { - return acrypto.Signature{}, fmt.Errorf("unknown address %v", addr) - } - return gfcrypto.Sign(data, ki.PrivateKey, ki.SigType) -} - -func (k *KeyManager) newSecp256k1Key() *gfcrypto.KeyInfo { - randSrc := rand.New(rand.NewSource(k.secpSeed)) - prv, err := crypto.GenerateKeyFromSeed(randSrc) - if err != nil { - panic(err) - } - k.secpSeed++ - return &gfcrypto.KeyInfo{ - SigType: acrypto.SigTypeSecp256k1, - PrivateKey: prv, - } -} - -func (k *KeyManager) newBLSKey() *gfcrypto.KeyInfo { - // FIXME: bls needs deterministic key generation - //sk := ffi.PrivateKeyGenerate(s.blsSeed) - // s.blsSeed++ - sk := [32]byte{} - sk[0] = uint8(k.blsSeed) // hack to keep gas values and state roots determinist - k.blsSeed++ - return &gfcrypto.KeyInfo{ - SigType: acrypto.SigTypeBLS, - PrivateKey: sk[:], - } -} - -// -// Actor -// - -type actorWrapper struct { - *actor.Actor -} - -func (a *actorWrapper) Code() cid.Cid { - return a.Actor.Code.Cid -} -func (a *actorWrapper) Head() cid.Cid { - return a.Actor.Head.Cid -} -func (a *actorWrapper) CallSeqNum() uint64 { - return a.Actor.CallSeqNum -} -func (a *actorWrapper) Balance() abi.TokenAmount { - return a.Actor.Balance -} diff --git a/internal/pkg/vm/internal/vmcontext/testing_syscalls.go b/internal/pkg/vm/internal/vmcontext/testing_syscalls.go deleted file mode 100644 index 800446833b..0000000000 --- a/internal/pkg/vm/internal/vmcontext/testing_syscalls.go +++ /dev/null @@ -1,46 +0,0 @@ -package vmcontext - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" - "github.com/minio/blake2b-simd" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" -) - -type FakeSyscalls struct { -} - -func (f FakeSyscalls) VerifySignature(ctx context.Context, view SyscallsStateView, signature crypto.Signature, signer address.Address, plaintext []byte) error { - // The signer is assumed to be already resolved to a pubkey address. - return crypto.ValidateSignature(plaintext, signer, signature) -} - -func (f FakeSyscalls) HashBlake2b(data []byte) [32]byte { - return blake2b.Sum256(data) -} - -func (f FakeSyscalls) ComputeUnsealedSectorCID(ctx context.Context, proof abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { - panic("implement me") -} - -func (f FakeSyscalls) VerifySeal(ctx context.Context, info abi.SealVerifyInfo) error { - panic("implement me") -} - -func (f FakeSyscalls) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) error { - panic("implement me") -} - -func (f FakeSyscalls) VerifyPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) error { - panic("implement me") -} - -func (f FakeSyscalls) VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, head block.TipSetKey, view SyscallsStateView) (*runtime.ConsensusFault, error) { - panic("implement me") -} diff --git a/internal/pkg/vm/internal/vmcontext/validation_test.go b/internal/pkg/vm/internal/vmcontext/validation_test.go deleted file mode 100644 index 7f3e52b63e..0000000000 --- a/internal/pkg/vm/internal/vmcontext/validation_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package vmcontext - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "testing" - - "github.com/filecoin-project/chain-validation/suites" -) - -// TestSkipper contains a list of test cases skipped by the implementation. -type TestSkipper struct { - testSkips []suites.TestCase -} - -// Skip return true if the sutire.TestCase should be skipped. -func (ts *TestSkipper) Skip(test suites.TestCase) bool { - for _, skip := range ts.testSkips { - if reflect.ValueOf(skip).Pointer() == reflect.ValueOf(test).Pointer() { - fmt.Printf("=== SKIP %v\n", runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()) - return true - } - } - return false -} - -// TestSuiteSkips contains tests we wish to skip. -var TestSuiteSkipper TestSkipper - -func init() { - // initialize the test skipper with tests being skipped - TestSuiteSkipper = TestSkipper{testSkips: []suites.TestCase{ - // None! - }} -} - -func TestChainValidationMessageSuite(t *testing.T) { - f := NewFactories(&ValidationConfig{ - trackGas: true, - checkExitCode: true, - checkReturnValue: true, - checkStateRoot: true, - }) - for _, testCase := range suites.MessageTestCases() { - if TestSuiteSkipper.Skip(testCase) { - continue - } - t.Run(caseName(testCase), func(t *testing.T) { - testCase(t, f) - }) - } -} - -func TestChainValidationTipSetSuite(t *testing.T) { - f := NewFactories(&ValidationConfig{ - trackGas: true, - checkExitCode: true, - checkReturnValue: true, - checkStateRoot: true, - }) - for _, testCase := range suites.TipSetTestCases() { - if TestSuiteSkipper.Skip(testCase) { - continue - } - t.Run(caseName(testCase), func(t *testing.T) { - testCase(t, f) - }) - } -} - -func caseName(testCase suites.TestCase) string { - fqName := runtime.FuncForPC(reflect.ValueOf(testCase).Pointer()).Name() - toks := strings.Split(fqName, ".") - return toks[len(toks)-1] -} diff --git a/internal/pkg/vm/internal/vmcontext/vmcontext.go b/internal/pkg/vm/internal/vmcontext/vmcontext.go deleted file mode 100644 index f519006e3e..0000000000 --- a/internal/pkg/vm/internal/vmcontext/vmcontext.go +++ /dev/null @@ -1,702 +0,0 @@ -package vmcontext - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/dispatch" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/gascost" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/interpreter" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -var vmlog = logging.Logger("vm.context") - -// VM holds the state and executes messages over the state. -type VM struct { - context context.Context - actorImpls ActorImplLookup - store *storage.VMStorage - state state.Tree - syscalls SyscallsImpl - currentHead block.TipSetKey - currentEpoch abi.ChainEpoch - pricelist gascost.Pricelist -} - -// ActorImplLookup provides access to upgradeable actor code. -type ActorImplLookup interface { - GetActorImpl(code cid.Cid) (dispatch.Dispatcher, error) -} - -type minerPenaltyFIL = abi.TokenAmount - -type gasRewardFIL = abi.TokenAmount - -type internalMessage struct { - from address.Address - to address.Address - value abi.TokenAmount - method abi.MethodNum - params interface{} -} - -// NewVM creates a new runtime for executing messages. -// Dragons: change to take a root and the store, build the tree internally -func NewVM(actorImpls ActorImplLookup, store *storage.VMStorage, st state.Tree, syscalls SyscallsImpl) VM { - return VM{ - context: context.Background(), - actorImpls: actorImpls, - store: store, - state: st, - syscalls: syscalls, - // loaded during execution - // currentEpoch: .., - } -} - -// ApplyGenesisMessage forces the execution of a message in the vm actor. -// -// This method is intended to be used in the generation of the genesis block only. -func (vm *VM) ApplyGenesisMessage(from address.Address, to address.Address, method abi.MethodNum, value abi.TokenAmount, params interface{}, rnd crypto.RandomnessSource) (interface{}, error) { - vm.pricelist = gascost.PricelistByEpoch(vm.currentEpoch) - - // normalize from addr - var ok bool - if from, ok = vm.normalizeAddress(from); !ok { - runtime.Abort(exitcode.SysErrSenderInvalid) - } - - // build internal message - imsg := internalMessage{ - from: from, - to: to, - value: value, - method: method, - params: params, - } - - ret, err := vm.applyImplicitMessage(imsg, rnd) - if err != nil { - return ret, err - } - - // commit - if _, err := vm.commit(); err != nil { - return nil, err - } - - return ret, nil -} - -func (vm *VM) rollback(root state.Root) error { - return vm.state.Rollback(vm.context, root) -} - -func (vm *VM) checkpoint() (state.Root, error) { - root, err := vm.state.Commit(vm.context) - if err != nil { - return cid.Undef, err - } - return root, nil -} - -func (vm *VM) commit() (state.Root, error) { - // Note: the following assumes the state commits into the store, - // unless the store is flushed, the state is not persisted. - - // commit the vm state - root, err := vm.state.Commit(vm.context) - if err != nil { - return cid.Undef, err - } - // flush all blocks out of the store - if err := vm.store.Flush(); err != nil { - return cid.Undef, err - } - - return root, nil -} - -// ContextStore provides access to specs-actors adt library. -// -// This type of store is used to access some internal actor state. -func (vm *VM) ContextStore() adt.Store { - return &contextStore{context: vm.context, store: vm.store} -} - -func (vm *VM) normalizeAddress(addr address.Address) (address.Address, bool) { - // short-circuit if the address is already an ID address - if addr.Protocol() == address.ID { - return addr, true - } - - // resolve the target address via the InitActor, and attempt to load state. - initActorEntry, found, err := vm.state.GetActor(vm.context, builtin.InitActorAddr) - if err != nil { - panic(errors.Wrapf(err, "failed to load init actor")) - } - if !found { - panic(errors.Wrapf(err, "no init actor")) - } - - // get a view into the actor state - var state init_.State - if _, err := vm.store.Get(vm.context, initActorEntry.Head.Cid, &state); err != nil { - panic(err) - } - - idAddr, err := state.ResolveAddress(vm.ContextStore(), addr) - if err == init_.ErrAddressNotFound { - return address.Undef, false - } else if err != nil { - panic(err) - } - return idAddr, true -} - -func (vm *VM) stateView() SyscallsStateView { - // The state tree's root is not committed until the end of a tipset, so we can't use the external state view - // type for this implementation. - // Maybe we could re-work it to use a root HAMT node rather than root CID. - return &syscallsStateView{vm} -} - -// implement VMInterpreter for VM - -var _ interpreter.VMInterpreter = (*VM)(nil) - -// ApplyTipSetMessages implements interpreter.VMInterpreter -func (vm *VM) ApplyTipSetMessages(blocks []interpreter.BlockMessagesInfo, head block.TipSetKey, epoch abi.ChainEpoch, rnd crypto.RandomnessSource) ([]message.Receipt, error) { - receipts := []message.Receipt{} - - // update current tipset - vm.currentHead = head - vm.currentEpoch = epoch - vm.pricelist = gascost.PricelistByEpoch(epoch) - - // create message tracker - // Note: the same message could have been included by more than one miner - seenMsgs := make(map[cid.Cid]struct{}) - - // process messages on each block - for _, blk := range blocks { - if blk.Miner.Protocol() != address.ID { - panic("precond failure: block miner address must be an IDAddress") - } - - // initial miner penalty and gas rewards - // Note: certain msg execution failures can cause the miner to pay for the gas - minerPenaltyTotal := big.Zero() - minerGasRewardTotal := big.Zero() - - // Process BLS messages from the block - for _, m := range blk.BLSMessages { - // do not recompute already seen messages - mcid := msgCID(m) - if _, found := seenMsgs[mcid]; found { - continue - } - - // apply message - receipt, minerPenaltyCurr, minerGasRewardCurr := vm.applyMessage(m, m.OnChainLen(), rnd) - - // accumulate result - minerPenaltyTotal = big.Add(minerPenaltyTotal, minerPenaltyCurr) - minerGasRewardTotal = big.Add(minerGasRewardTotal, minerGasRewardCurr) - receipts = append(receipts, receipt) - - // flag msg as seen - seenMsgs[mcid] = struct{}{} - } - - // Process SECP messages from the block - for _, sm := range blk.SECPMessages { - // extract unsigned message part - m := sm.Message - - // do not recompute already seen messages - mcid := msgCID(&m) - if _, found := seenMsgs[mcid]; found { - continue - } - - // apply message - // Note: the on-chain size for SECP messages is different - receipt, minerPenaltyCurr, minerGasRewardCurr := vm.applyMessage(&m, sm.OnChainLen(), rnd) - - // accumulate result - minerPenaltyTotal = big.Add(minerPenaltyTotal, minerPenaltyCurr) - minerGasRewardTotal = big.Add(minerGasRewardTotal, minerGasRewardCurr) - receipts = append(receipts, receipt) - - // flag msg as seen - seenMsgs[mcid] = struct{}{} - } - - // Pay block reward. - // Dragons: missing final protocol design on if/how to determine the nominal power - rewardMessage := makeBlockRewardMessage(blk.Miner, minerPenaltyTotal, minerGasRewardTotal, 1) - if _, err := vm.applyImplicitMessage(rewardMessage, rnd); err != nil { - return nil, err - } - } - - // cron tick - cronMessage := makeCronTickMessage() - if _, err := vm.applyImplicitMessage(cronMessage, rnd); err != nil { - return nil, err - } - - // commit state - if _, err := vm.commit(); err != nil { - return nil, err - } - - return receipts, nil -} - -// applyImplicitMessage applies messages automatically generated by the vm itself. -// -// This messages do not consume client gas and must not fail. -func (vm *VM) applyImplicitMessage(imsg internalMessage, rnd crypto.RandomnessSource) (specsruntime.CBORMarshaler, error) { - // implicit messages gas is tracked separatly and not paid by the miner - gasTank := NewGasTracker(gas.SystemGasLimit) - - // the execution of the implicit messages is simpler than full external/actor-actor messages - // execution: - // 1. load from actor - // 2. increment seqnumber (only for accounts) - // 3. build new context - // 4. invoke message - - // 1. load from actor - fromActor, found, err := vm.state.GetActor(vm.context, imsg.from) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("implicit message `from` field actor not found, addr: %s", imsg.from) - } - originatorIsAccount := fromActor.Code.Equals(builtin.AccountActorCodeID) - - // Compute the originator address. Unlike real messages, implicit ones can be originated by - // singleton non-account actors. Singleton addresses are reorg-proof so ok to use here. - var originator address.Address - if originatorIsAccount { - // Load sender account state to obtain stable pubkey address. - var senderState account.State - _, err = vm.store.Get(vm.context, fromActor.Head.Cid, &senderState) - if err != nil { - panic(err) - } - originator = senderState.Address - } else if builtin.IsBuiltinActor(fromActor.Code.Cid) { - originator = imsg.from // Cannot resolve non-account actor to pubkey addresses. - } else { - panic(fmt.Sprintf("implicit message from non-account or -singleton actor code %s", fromActor.Code.Cid)) - } - - // 2. increment seq number (only for account actors). - // The account actor distinction only makes a difference for genesis state construction via messages, where - // some messages are sent from non-account actors (e.g. fund transfers from the reward actor). - if originatorIsAccount { - fromActor.IncrementSeqNum() - if err := vm.state.SetActor(vm.context, imsg.from, fromActor); err != nil { - return nil, err - } - } - - // 3. build context - topLevel := topLevelContext{ - originatorStableAddress: originator, - originatorCallSeq: fromActor.CallSeqNum, // Implied CallSeqNum is that of the actor before incrementing. - newActorAddressCount: 0, - } - - ctx := newInvocationContext(vm, &topLevel, imsg, fromActor, &gasTank, rnd) - - // 4. invoke message - ret, code := ctx.invoke() - if code.IsError() { - return nil, fmt.Errorf("invalid exit code %d during implicit message execution: from %s, to %s, method %d, value %s, params %v", - code, imsg.from, imsg.to, imsg.method, imsg.value, imsg.params) - } - return ret.inner, nil -} - -// applyMessage applies the message to the current state. -func (vm *VM) applyMessage(msg *types.UnsignedMessage, onChainMsgSize int, rnd crypto.RandomnessSource) (message.Receipt, minerPenaltyFIL, gasRewardFIL) { - // This method does not actually execute the message itself, - // but rather deals with the pre/post processing of a message. - // (see: `invocationContext.invoke()` for the dispatch and execution) - - // initiate gas tracking - gasTank := NewGasTracker(msg.GasLimit) - - // pre-send - // 1. charge for message existence - // 2. load sender actor - // 3. check message seq number - // 4. check if _sender_ has enough funds - // 5. increment message seq number - // 6. withheld maximum gas from _sender_ - // 7. checkpoint state - - // 1. charge for bytes used in chain - msgGasCost := vm.pricelist.OnChainMessage(onChainMsgSize) - ok := gasTank.TryCharge(msgGasCost) - if !ok { - // Invalid message; insufficient gas limit to pay for the on-chain message size. - // Note: the miner needs to pay the full msg cost, not what might have been partially consumed - return message.Failure(exitcode.SysErrOutOfGas, gas.Zero), msgGasCost.ToTokens(msg.GasPrice), big.Zero() - } - - // 2. load actor from global state - if msg.From, ok = vm.normalizeAddress(msg.From); !ok { - return message.Failure(exitcode.SysErrSenderInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero() - } - - fromActor, found, err := vm.state.GetActor(vm.context, msg.From) - if err != nil { - panic(err) - } - if !found { - // Execution error; sender does not exist at time of message execution. - return message.Failure(exitcode.SysErrSenderInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero() - } - - if !fromActor.Code.Equals(builtin.AccountActorCodeID) { - // Execution error; sender is not an account. - return message.Failure(exitcode.SysErrSenderInvalid, gas.Zero), gasTank.gasConsumed.ToTokens(msg.GasPrice), big.Zero() - } - - // 3. make sure this is the right message order for fromActor - if msg.CallSeqNum != fromActor.CallSeqNum { - // Execution error; invalid seq number. - return message.Failure(exitcode.SysErrSenderStateInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero() - } - - // 4. Check sender balance (gas + value being sent) - gasLimitCost := msg.GasLimit.ToTokens(msg.GasPrice) - totalCost := big.Add(msg.Value, gasLimitCost) - if fromActor.Balance.LessThan(totalCost) { - // Execution error; sender does not have sufficient funds to pay for the gas limit. - return message.Failure(exitcode.SysErrSenderStateInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero() - } - - // 5. Increment sender CallSeqNum - fromActor.IncrementSeqNum() - // update actor - if err := vm.state.SetActor(vm.context, msg.From, fromActor); err != nil { - panic(err) - } - - // 6. Deduct gas limit funds from sender first - // Note: this should always succeed, due to the sender balance check above - // Note: after this point, we need to return this funds back before exiting - if !gasLimitCost.Nil() && !gasLimitCost.IsZero() { - vm.transfer(msg.From, builtin.RewardActorAddr, gasLimitCost) - } - - // reload from actor - // Note: balance might have changed - fromActor, found, err = vm.state.GetActor(vm.context, msg.From) - if err != nil { - panic(err) - } - if !found { - panic("unreachable: actor cannot possibly not exist") - } - - // Load sender account state to obtain stable pubkey address. - var senderState account.State - _, err = vm.store.Get(vm.context, fromActor.Head.Cid, &senderState) - if err != nil { - panic(err) - } - - // 7. checkpoint state - // Even if the message fails, the following accumulated changes will be applied: - // - CallSeqNumber increment - // - sender balance withheld - priorRoot, err := vm.checkpoint() - if err != nil { - panic(err) - } - - // send - // 1. build internal message - // 2. build invocation context - // 3. process the msg - - topLevel := topLevelContext{ - originatorStableAddress: senderState.Address, - originatorCallSeq: msg.CallSeqNum, - newActorAddressCount: 0, - } - - // 1. build internal msg - imsg := internalMessage{ - from: msg.From, - to: msg.To, - value: msg.Value, - method: msg.Method, - params: msg.Params, - } - - // 2. build invocation context - ctx := newInvocationContext(vm, &topLevel, imsg, fromActor, &gasTank, rnd) - - // 3. invoke - ret, code := ctx.invoke() - - // build receipt - receipt := message.Receipt{ - ExitCode: code, - } - // encode value - receipt.ReturnValue, err = ret.ToCbor() - if err != nil { - // failed to encode object returned by actor - receipt.ReturnValue = []byte{} - receipt.ExitCode = exitcode.SysErrorIllegalActor - } - - // post-send - // 1. charge gas for putting the return value on the chain - // 2. settle gas money around (unused_gas -> sender) - // 3. success! - - // 1. charge for the space used by the return value - // Note: the GasUsed in the message receipt does not - ok = gasTank.TryCharge(vm.pricelist.OnChainReturnValue(&receipt)) - if !ok { - // Insufficient gas remaining to cover the on-chain return value; proceed as in the case - // of method execution failure. - receipt.ExitCode = exitcode.SysErrOutOfGas - receipt.ReturnValue = []byte{} - } - - // Roll back all state if the receipt's exit code is not ok. - // This is required in addition to rollback within the invocation context since top level messages can fail for - // more reasons than internal ones. Invocation context still needs its own rollback so actors can recover and - // proceed from a nested call failure. - if receipt.ExitCode != exitcode.Ok { - if err := vm.rollback(priorRoot); err != nil { - panic(err) - } - } - - // 2. settle gas money around (unused_gas -> sender) - receipt.GasUsed = gasTank.GasConsumed() - refundGas := msg.GasLimit - receipt.GasUsed - amount := refundGas.ToTokens(msg.GasPrice) - if !amount.Nil() && !amount.IsZero() { - vm.transfer(builtin.RewardActorAddr, msg.From, refundGas.ToTokens(msg.GasPrice)) - } - - // 3. Success! - return receipt, big.Zero(), gasTank.GasConsumed().ToTokens(msg.GasPrice) -} - -// transfer debits money from one account and credits it to another. -// avoid calling this method with a zero amount else it will perform unnecessary actor loading. -// -// WARNING: this method will panic if the the amount is negative, accounts dont exist, or have inssuficient funds. -// -// Note: this is not idiomatic, it follows the Spec expectations for this method. -func (vm *VM) transfer(debitFrom address.Address, creditTo address.Address, amount abi.TokenAmount) (*actor.Actor, *actor.Actor) { - // allow only for positive amounts - if amount.LessThan(abi.NewTokenAmount(0)) { - panic("unreachable: negative funds transfer not allowed") - } - - ctx := context.Background() - - // retrieve debit account - fromActor, found, err := vm.state.GetActor(ctx, debitFrom) - if err != nil { - panic(err) - } - if !found { - panic(fmt.Errorf("unreachable: debit account not found. %s", err)) - } - - // check that account has enough balance for transfer - if fromActor.Balance.LessThan(amount) { - panic("unreachable: insufficient balance on debit account") - } - - // debit funds - fromActor.Balance = big.Sub(fromActor.Balance, amount) - if err := vm.state.SetActor(ctx, debitFrom, fromActor); err != nil { - panic(err) - } - - // retrieve credit account - toActor, found, err := vm.state.GetActor(ctx, creditTo) - if err != nil { - panic(err) - } - if !found { - panic(fmt.Errorf("unreachable: credit account not found. %s", err)) - } - - // credit funds - toActor.Balance = big.Add(toActor.Balance, amount) - if err := vm.state.SetActor(ctx, creditTo, toActor); err != nil { - panic(err) - } - return toActor, fromActor -} - -func (vm *VM) getActorImpl(code cid.Cid) dispatch.Dispatcher { - actorImpl, err := vm.actorImpls.GetActorImpl(code) - if err != nil { - runtime.Abort(exitcode.SysErrInvalidReceiver) - } - return actorImpl -} - -// -// implement runtime.Runtime for VM -// - -var _ runtime.Runtime = (*VM)(nil) - -// CurrentEpoch implements runtime.Runtime. -func (vm *VM) CurrentEpoch() abi.ChainEpoch { - return vm.currentEpoch -} - -// -// implement runtime.MessageInfo for internalMessage -// - -var _ specsruntime.Message = (*internalMessage)(nil) - -// ValueReceived implements runtime.MessageInfo. -func (msg internalMessage) ValueReceived() abi.TokenAmount { - return msg.value -} - -// Caller implements runtime.MessageInfo. -func (msg internalMessage) Caller() address.Address { - return msg.from -} - -// Receiver implements runtime.MessageInfo. -func (msg internalMessage) Receiver() address.Address { - return msg.to -} - -// -// implement syscalls state view -// - -type syscallsStateView struct { - *VM -} - -func (vm *syscallsStateView) AccountSignerAddress(ctx context.Context, accountAddr address.Address) (address.Address, error) { - // Short-circuit when given a pubkey address. - if accountAddr.Protocol() == address.SECP256K1 || accountAddr.Protocol() == address.BLS { - return accountAddr, nil - } - actor, found, err := vm.state.GetActor(vm.context, accountAddr) - if err != nil { - return address.Undef, errors.Wrapf(err, "signer resolution failed to find actor %s", accountAddr) - } - if !found { - return address.Undef, fmt.Errorf("signer resolution found no such actor %s", accountAddr) - } - var state account.State - if _, err := vm.store.Get(vm.context, actor.Head.Cid, &state); err != nil { - // This error is internal, shouldn't propagate as on-chain failure - panic(fmt.Errorf("signer resolution failed to lost state for %s ", accountAddr)) - } - return state.Address, nil -} - -func (vm *syscallsStateView) MinerControlAddresses(ctx context.Context, maddr address.Address) (owner, worker address.Address, err error) { - actor, found, err := vm.state.GetActor(vm.context, maddr) - if err != nil { - return address.Undef, address.Undef, errors.Wrapf(err, "miner resolution failed to find actor %s", maddr) - } - if !found { - return address.Undef, address.Undef, fmt.Errorf("miner resolution found no such actor %s", maddr) - } - var state miner.State - if _, err := vm.store.Get(vm.context, actor.Head.Cid, &state); err != nil { - // This error is internal, shouldn't propagate as on-chain failure - panic(fmt.Errorf("signer resolution failed to lost state for %s ", maddr)) - } - return state.Info.Owner, state.Info.Worker, nil -} - -// -// utils -// - -func msgCID(msg *types.UnsignedMessage) cid.Cid { - cid, err := msg.Cid() - if err != nil { - panic(fmt.Sprintf("failed to compute message CID: %v; %+v", err, msg)) - } - return cid -} - -func makeBlockRewardMessage(blockMiner address.Address, penalty abi.TokenAmount, gasReward abi.TokenAmount, ticketCount int64) internalMessage { - params := &reward.AwardBlockRewardParams{ - Miner: blockMiner, - Penalty: penalty, - GasReward: gasReward, - TicketCount: ticketCount, - } - encoded, err := encoding.Encode(params) - if err != nil { - panic(fmt.Errorf("failed to encode built-in block reward. %s", err)) - } - return internalMessage{ - from: builtin.SystemActorAddr, - to: builtin.RewardActorAddr, - value: big.Zero(), - method: builtin.MethodsReward.AwardBlockReward, - params: encoded, - } -} - -func makeCronTickMessage() internalMessage { - return internalMessage{ - from: builtin.SystemActorAddr, - to: builtin.CronActorAddr, - value: big.Zero(), - method: builtin.MethodsCron.EpochTick, - params: []byte{}, - } -} diff --git a/internal/pkg/vm/state/state.go b/internal/pkg/vm/state/state.go deleted file mode 100644 index 45548b6254..0000000000 --- a/internal/pkg/vm/state/state.go +++ /dev/null @@ -1,221 +0,0 @@ -package state - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-hamt-ipld" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" -) - -// Review: can we get rid of this? -type Tree interface { - Root() (Root, bool) - - SetActor(ctx context.Context, key actorKey, a *actor.Actor) error - GetActor(ctx context.Context, key actorKey) (*actor.Actor, bool, error) - DeleteActor(ctx context.Context, key actorKey) error - - Rollback(ctx context.Context, root Root) error - Commit(ctx context.Context) (Root, error) - - GetAllActors(ctx context.Context) <-chan GetAllActorsResult -} - -// TreeBitWidth is the bit width of the HAMT used to store a state tree -const TreeBitWidth = 5 - -// Root is the root type of a state. -// -// Any and all state can be identified by this. -// -// Note: it might not be possible to locally reconstruct the entire state if the some parts are missing. -type Root = cid.Cid - -type actorKey = address.Address - -// State is the VM state manager. -type State struct { - store cbor.IpldStore - dirty bool - root Root // The last committed root. - rootNode *hamt.Node // The current (not necessarily committed) root node. -} - -// NewState creates a new VM state. -func NewState(store cbor.IpldStore) *State { - st := newState(store, cid.Undef, hamt.NewNode(store, hamt.UseTreeBitWidth(TreeBitWidth))) - st.dirty = true - return st -} - -// LoadState creates a new VMStorage. -func LoadState(ctx context.Context, store cbor.IpldStore, root Root) (*State, error) { - rootNode, err := hamt.LoadNode(ctx, store, root, hamt.UseTreeBitWidth(TreeBitWidth)) - - if err != nil { - return nil, errors.Wrapf(err, "failed to load node for %s", root) - } - - return newState(store, root, rootNode), nil -} - -func newState(store cbor.IpldStore, root Root, rootNode *hamt.Node) *State { - return &State{ - store: store, - dirty: false, - root: root, - rootNode: rootNode, - } -} - -// GetActor retrieves an actor by their key. -// If the actor is not found it will return false and no error. -// If there are any IO or decoding errors, it will return false and the error. -func (st *State) GetActor(ctx context.Context, key actorKey) (*actor.Actor, bool, error) { - actorBytes, err := st.rootNode.FindRaw(ctx, string(key.Bytes())) - if err == hamt.ErrNotFound { - return nil, false, nil - } - if err != nil { - return nil, false, err - } - - var act actor.Actor - err = encoding.Decode(actorBytes, &act) - if err != nil { - return nil, false, err - } - - return &act, true, nil -} - -// SetActor sets the the actor to the given value whether it previously existed or not. -// -// This method will not check if the actor previuously existed, it will blindly overwrite it. -func (st *State) SetActor(ctx context.Context, key actorKey, a *actor.Actor) error { - actBytes, err := encoding.Encode(a) - if err != nil { - return err - } - if err := st.rootNode.SetRaw(ctx, string(key.Bytes()), actBytes); err != nil { - return errors.Wrap(err, "setting actor in state tree failed") - } - st.dirty = true - return nil -} - -// DeleteActor remove the actor from the storage. -// -// This method will NOT return an error if the actor was not found. -// This behaviour is based on a principle that some store implementations might not be able to determine -// whether something exists before deleting it. -func (st *State) DeleteActor(ctx context.Context, key actorKey) error { - err := st.rootNode.Delete(ctx, string(key.Bytes())) - st.dirty = true - if err == hamt.ErrNotFound { - return nil - } - return err -} - -// Commit will flush the state tree into the backing store. -// The new root is returned. -func (st *State) Commit(ctx context.Context) (Root, error) { - if err := st.rootNode.Flush(ctx); err != nil { - return cid.Undef, err - } - - root, err := st.store.Put(ctx, st.rootNode) - if err != nil { - return cid.Undef, err - } - - st.root = root - st.dirty = false - return root, nil -} - -// Rollback resets the root to a provided value. -func (st *State) Rollback(ctx context.Context, root Root) error { - // load the original root node again - rootNode, err := hamt.LoadNode(ctx, st.store, root, hamt.UseTreeBitWidth(TreeBitWidth)) - if err != nil { - return errors.Wrapf(err, "failed to load node for %s", root) - } - - // reset the root node - st.rootNode = rootNode - st.dirty = false - return nil -} - -// Root returns the last committed root of the tree and whether any writes have since occurred. -func (st *State) Root() (Root, bool) { - return st.root, st.dirty -} - -// GetAllActorsResult is the struct returned via a channel by the GetAllActors -// method. This struct contains only an address string and the actor itself. -type GetAllActorsResult struct { - Key actorKey - Actor *actor.Actor - Error error -} - -// GetAllActors returns a channel which provides all actors in the StateTree. -func (st *State) GetAllActors(ctx context.Context) <-chan GetAllActorsResult { - out := make(chan GetAllActorsResult) - go func() { - defer close(out) - st.getActorsFromPointers(ctx, out, st.rootNode.Pointers) - }() - return out -} - -// NOTE: This extracts actors from pointers recursively. Maybe we shouldn't recurse here. -func (st *State) getActorsFromPointers(ctx context.Context, out chan<- GetAllActorsResult, ps []*hamt.Pointer) { - for _, p := range ps { - for _, kv := range p.KVs { - var a actor.Actor - - if err := encoding.Decode(kv.Value.Raw, &a); err != nil { - fmt.Printf("bad raw bytes: %x\n", kv.Value.Raw) - panic(err) - } - - select { - case <-ctx.Done(): - out <- GetAllActorsResult{ - Error: ctx.Err(), - } - return - default: - addr, err := address.NewFromBytes(kv.Key) - if err != nil { - fmt.Printf("bad address key bytes: %x\n", kv.Value.Raw) - panic(err) - } - out <- GetAllActorsResult{ - Key: addr, - Actor: &a, - } - } - } - if p.Link.Defined() { - n, err := hamt.LoadNode(ctx, st.store, p.Link, hamt.UseTreeBitWidth(TreeBitWidth)) - // Even if we hit an error and can't follow this link, we should - // keep traversing its siblings. - if err != nil { - continue - } - st.getActorsFromPointers(ctx, out, n.Pointers) - } - } -} diff --git a/internal/pkg/vm/state/state_test.go b/internal/pkg/vm/state/state_test.go deleted file mode 100644 index 661c0d9a5c..0000000000 --- a/internal/pkg/vm/state/state_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package state - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" -) - -func TestStatePutGet(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - - bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) - cst := cborutil.NewIpldStore(bs) - tree := NewState(cst) - - act1 := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(0), cid.Undef) - act1.IncrementSeqNum() - act2 := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(0), cid.Undef) - act2.IncrementSeqNum() - act2.IncrementSeqNum() - - addrGetter := vmaddr.NewForTestGetter() - addr1 := addrGetter() - addr2 := addrGetter() - - assert.NoError(t, tree.SetActor(ctx, addr1, act1)) - assert.NoError(t, tree.SetActor(ctx, addr2, act2)) - - act1out, found, err := tree.GetActor(ctx, addr1) - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, act1, act1out) - act2out, found, err := tree.GetActor(ctx, addr2) - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, act2, act2out) - - // now test it persists across recreation of tree - tcid, err := tree.Commit(ctx) - assert.NoError(t, err) - - tree2, err := LoadState(ctx, cst, tcid) - assert.NoError(t, err) - - act1out2, found, err := tree2.GetActor(ctx, addr1) - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, act1, act1out2) - act2out2, found, err := tree2.GetActor(ctx, addr2) - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, act2, act2out2) -} - -func TestStateErrors(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) - cst := cborutil.NewIpldStore(bs) - tree := NewState(cst) - - a, found, err := tree.GetActor(ctx, vmaddr.NewForTestGetter()()) - assert.Nil(t, a) - assert.False(t, found) - assert.NoError(t, err) - - c, err := constants.DefaultCidBuilder.Sum([]byte("cats")) - assert.NoError(t, err) - - tr2, err := LoadState(ctx, cst, c) - assert.Error(t, err) - assert.Nil(t, tr2) -} -func TestGetAllActors(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) - cst := cborutil.NewIpldStore(bs) - tree := NewState(cst) - addr := vmaddr.NewForTestGetter()() - - actor := actor.Actor{Code: e.NewCid(builtin.AccountActorCodeID), CallSeqNum: 1234, Balance: abi.NewTokenAmount(123)} - err := tree.SetActor(ctx, addr, &actor) - assert.NoError(t, err) - _, err = tree.Commit(ctx) - require.NoError(t, err) - - results := tree.GetAllActors(ctx) - - for result := range results { - assert.Equal(t, addr, result.Key) - assert.Equal(t, actor.Code, result.Actor.Code) - assert.Equal(t, actor.CallSeqNum, result.Actor.CallSeqNum) - assert.Equal(t, actor.Balance, result.Actor.Balance) - } -} - -func TestStateTreeConsistency(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) - cst := cborutil.NewIpldStore(bs) - tree := NewState(cst) - - var addrs []address.Address - for i := 100; i < 150; i++ { - a, err := address.NewIDAddress(uint64(i)) - if err != nil { - t.Fatal(err) - } - - addrs = append(addrs, a) - } - - randomCid, err := cid.Decode("bafy2bzacecu7n7wbtogznrtuuvf73dsz7wasgyneqasksdblxupnyovmtwxxu") - if err != nil { - t.Fatal(err) - } - - for i, a := range addrs { - if err := tree.SetActor(ctx, a, &actor.Actor{ - Code: e.NewCid(randomCid), - Head: e.NewCid(randomCid), - Balance: abi.NewTokenAmount(int64(10000 + i)), - CallSeqNum: uint64(1000 - i), - }); err != nil { - t.Fatal(err) - } - } - - root, err := tree.Commit(ctx) - if err != nil { - t.Fatal(err) - } - if root.String() != "bafy2bzaceadyjnrv3sbjvowfl3jr4pdn5p2bf3exjjie2f3shg4oy5sub7h34" { - t.Fatalf("State Tree Mismatch. Expected: bafy2bzaceadyjnrv3sbjvowfl3jr4pdn5p2bf3exjjie2f3shg4oy5sub7h34 Actual: %s", root.String()) - } - -} diff --git a/internal/pkg/vm/state/testing.go b/internal/pkg/vm/state/testing.go deleted file mode 100644 index f3d9171f13..0000000000 --- a/internal/pkg/vm/state/testing.go +++ /dev/null @@ -1,54 +0,0 @@ -package state - -import ( - "context" - "fmt" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" -) - -// NewFromString sets a state tree based on an int. -// -// TODO: we could avoid this if write a test cborStore that can map test cids to test states. -func NewFromString(t *testing.T, s string, store cbor.IpldStore) *State { - tree := NewState(store) - strAddr, err := address.NewSecp256k1Address([]byte(s)) - fmt.Printf("strAddr: %s\n", strAddr) - require.NoError(t, err) - err = tree.SetActor(context.Background(), strAddr, &actor.Actor{}) - require.NoError(t, err) - return tree -} - -// MustCommit flushes the StateTree or panics if it can't. -func MustCommit(st State) cid.Cid { - cid, err := st.Commit(context.Background()) - if err != nil { - panic(err) - } - return cid -} - -// MustGetActor gets the actor or panics if it can't. -func MustGetActor(st State, a address.Address) (*actor.Actor, bool) { - actor, found, err := st.GetActor(context.Background(), a) - if err != nil { - panic(err) - } - return actor, found -} - -// MustSetActor sets the actor or panics if it can't. -func MustSetActor(st State, address address.Address, actor *actor.Actor) cid.Cid { - err := st.SetActor(context.Background(), address, actor) - if err != nil { - panic(err) - } - return MustCommit(st) -} diff --git a/internal/pkg/vm/testing.go b/internal/pkg/vm/testing.go deleted file mode 100644 index 664694be3c..0000000000 --- a/internal/pkg/vm/testing.go +++ /dev/null @@ -1,74 +0,0 @@ -package vm - -import ( - "bytes" - - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/vmcontext" -) - -type FakeSyscalls = vmcontext.FakeSyscalls - -// TestStorage is a fake storage used for testing. -type TestStorage struct { - state interface{} -} - -// NewTestStorage returns a new "TestStorage" -func NewTestStorage(state interface{}) *TestStorage { - return &TestStorage{ - state: state, - } -} - -var _ specsruntime.Store = (*TestStorage)(nil) - -// Put implements runtime.Store. -func (ts *TestStorage) Put(v specsruntime.CBORMarshaler) cid.Cid { - ts.state = v - if cm, ok := v.(cbg.CBORMarshaler); ok { - buf := new(bytes.Buffer) - err := cm.MarshalCBOR(buf) - if err == nil { - return cid.NewCidV1(cid.Raw, buf.Bytes()) - } - } - raw, err := encoding.Encode(v) - if err != nil { - panic("failed to encode") - } - return cid.NewCidV1(cid.Raw, raw) -} - -// Get implements runtime.Store. -func (ts *TestStorage) Get(cid cid.Cid, obj specsruntime.CBORUnmarshaler) bool { - node, err := cbor.WrapObject(ts.state, constants.DefaultHashFunction, -1) - if err != nil { - return false - } - - err = encoding.Decode(node.RawData(), obj) - if err != nil { - return false - } - - return true -} - -// CidOf returns the cid of the object. -func (ts *TestStorage) CidOf(obj interface{}) cid.Cid { - if obj == nil { - return cid.Undef - } - raw, err := encoding.Encode(obj) - if err != nil { - panic("failed to encode") - } - return cid.NewCidV1(cid.Raw, raw) -} diff --git a/internal/pkg/vm/testing_messages.go b/internal/pkg/vm/testing_messages.go deleted file mode 100644 index 906d608c8b..0000000000 --- a/internal/pkg/vm/testing_messages.go +++ /dev/null @@ -1,101 +0,0 @@ -package vm - -import ( - "context" - "fmt" - "testing" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas" -) - -// MessageMaker creates unique, signed messages for use in tests. -type MessageMaker struct { - DefaultGasPrice types.AttoFIL - DefaultGasUnits gas.Unit - - signer *types.MockSigner - seq uint - t *testing.T -} - -// NewMessageMaker creates a new message maker with a set of signing keys. -func NewMessageMaker(t *testing.T, keys []crypto.KeyInfo) *MessageMaker { - addresses := make([]address.Address, len(keys)) - signer := types.NewMockSigner(keys) - - for i, key := range keys { - addr, _ := key.Address() - addresses[i] = addr - } - - return &MessageMaker{types.ZeroAttoFIL, gas.Unit(0), &signer, 0, t} -} - -// Addresses returns the addresses for which this maker can sign messages. -func (mm *MessageMaker) Addresses() []address.Address { - return mm.signer.Addresses -} - -// Signer returns the signer with which this maker signs messages. -func (mm *MessageMaker) Signer() *types.MockSigner { - return mm.signer -} - -// NewUnsignedMessage creates a new message. -func (mm *MessageMaker) NewUnsignedMessage(from address.Address, nonce uint64) *types.UnsignedMessage { - seq := mm.seq - mm.seq++ - to, err := address.NewSecp256k1Address([]byte("destination")) - require.NoError(mm.t, err) - return types.NewMeteredMessage( - from, - to, - nonce, - types.ZeroAttoFIL, - abi.MethodNum(9000+seq), - []byte("params"), - mm.DefaultGasPrice, - mm.DefaultGasUnits) -} - -// NewSignedMessage creates a new signed message. -func (mm *MessageMaker) NewSignedMessage(from address.Address, nonce uint64) *types.SignedMessage { - msg := mm.NewUnsignedMessage(from, nonce) - signed, err := types.NewSignedMessage(context.TODO(), *msg, mm.signer) - require.NoError(mm.t, err) - return signed -} - -// EmptyReceipts returns a slice of n empty receipts. -func EmptyReceipts(n int) []*MessageReceipt { - out := make([]*MessageReceipt, n) - for i := 0; i < n; i++ { - out[i] = &MessageReceipt{} - } - return out -} - -// ReceiptMaker generates unique receipts -type ReceiptMaker struct { - seq uint -} - -// NewReceiptMaker creates a new receipt maker -func NewReceiptMaker() *ReceiptMaker { - return &ReceiptMaker{0} -} - -// NewReceipt creates a new distinct receipt. -func (rm *ReceiptMaker) NewReceipt() MessageReceipt { - seq := rm.seq - rm.seq++ - return MessageReceipt{ - ReturnValue: []byte(fmt.Sprintf("%d", seq)), - } -} diff --git a/internal/pkg/vm/vm.go b/internal/pkg/vm/vm.go deleted file mode 100644 index 282c4b50be..0000000000 --- a/internal/pkg/vm/vm.go +++ /dev/null @@ -1,50 +0,0 @@ -package vm - -import ( - blockstore "github.com/ipfs/go-ipfs-blockstore" - - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor/builtin" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/dispatch" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/interpreter" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/message" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/vmcontext" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" -) - -// Re-exports - -// Interpreter is the VM. -type Interpreter = interpreter.VMInterpreter - -// Storage is the raw storage for the VM. -type Storage = storage.VMStorage - -type SyscallsImpl = vmcontext.SyscallsImpl -type SyscallsStateView = vmcontext.SyscallsStateView - -// BlockMessagesInfo contains messages for one block in a tipset. -type BlockMessagesInfo = interpreter.BlockMessagesInfo - -// MessageReceipt is what is returned by executing a message on the vm. -type MessageReceipt = message.Receipt - -// NewVM creates a new VM interpreter. -func NewVM(st state.Tree, store *storage.VMStorage, syscalls SyscallsImpl) Interpreter { - vm := vmcontext.NewVM(builtin.DefaultActors, store, st, syscalls) - return &vm -} - -// NewStorage creates a new Storage for the VM. -func NewStorage(bs blockstore.Blockstore) Storage { - return storage.NewStorage(bs) -} - -// DefaultActors is a code loader with the built-in actors that come with the system. -var DefaultActors = builtin.DefaultActors - -// ActorCodeLoader allows yo to load an actor's code based on its id an epoch. -type ActorCodeLoader = dispatch.CodeLoader - -// ActorMethodSignature wraps a specific method and allows you to encode/decodes input/output bytes into concrete types. -type ActorMethodSignature = dispatch.MethodSignature diff --git a/internal/pkg/vmsupport/syscalls.go b/internal/pkg/vmsupport/syscalls.go deleted file mode 100644 index d1580e97af..0000000000 --- a/internal/pkg/vmsupport/syscalls.go +++ /dev/null @@ -1,80 +0,0 @@ -package vmsupport - -import ( - "context" - "errors" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" - "github.com/minio/blake2b-simd" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/slashing" - "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -type faultChecker interface { - VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, head block.TipSetKey, view slashing.FaultStateView) (*runtime.ConsensusFault, error) -} - -// Syscalls contains the concrete implementation of VM system calls, including connection to -// proof verification and blockchain inspection. -// Errors returned by these methods are intended to be returned to the actor code to respond to: they must be -// entirely deterministic and repeatable by other implementations. -// Any non-deterministic error will instead trigger a panic. -// TODO: determine a more robust mechanism for distinguishing transient runtime failures from deterministic errors -// in VM and supporting code. https://github.com/filecoin-project/go-filecoin/issues/3844 -type Syscalls struct { - faultChecker faultChecker - verifier ffiwrapper.Verifier -} - -func NewSyscalls(faultChecker faultChecker, verifier ffiwrapper.Verifier) *Syscalls { - return &Syscalls{ - faultChecker: faultChecker, - verifier: verifier, - } -} - -func (s *Syscalls) VerifySignature(ctx context.Context, view vm.SyscallsStateView, signature crypto.Signature, signer address.Address, plaintext []byte) error { - return state.NewSignatureValidator(view).ValidateSignature(ctx, plaintext, signer, signature) -} - -func (s *Syscalls) HashBlake2b(data []byte) [32]byte { - return blake2b.Sum256(data) -} - -func (s *Syscalls) ComputeUnsealedSectorCID(_ context.Context, proof abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { - return ffiwrapper.GenerateUnsealedCID(proof, pieces) -} - -func (s *Syscalls) VerifySeal(_ context.Context, info abi.SealVerifyInfo) error { - ok, err := s.verifier.VerifySeal(info) - if err != nil { - return err - } else if !ok { - return fmt.Errorf("seal invalid") - } - return nil -} - -func (s *Syscalls) VerifyPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) error { - ok, err := s.verifier.VerifyWindowPoSt(ctx, info) - if err != nil { - return err - } - if !ok { - return errors.New("window PoSt verification failed") - } - return nil -} - -func (s *Syscalls) VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, head block.TipSetKey, view vm.SyscallsStateView) (*runtime.ConsensusFault, error) { - return s.faultChecker.VerifyConsensusFault(ctx, h1, h2, extra, head, view) -} diff --git a/internal/pkg/vmsupport/util.go b/internal/pkg/vmsupport/util.go deleted file mode 100644 index e5276adcb1..0000000000 --- a/internal/pkg/vmsupport/util.go +++ /dev/null @@ -1,18 +0,0 @@ -package vmsupport - -import ( - "context" - "fmt" - - "github.com/filecoin-project/specs-actors/actors/runtime" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/slashing" -) - -type NilFaultChecker struct { -} - -func (n *NilFaultChecker) VerifyConsensusFault(_ context.Context, _, _, _ []byte, _ block.TipSetKey, _ slashing.FaultStateView) (*runtime.ConsensusFault, error) { - return nil, fmt.Errorf("empty chain cannot have consensus fault") -} diff --git a/internal/pkg/wallet/backend.go b/internal/pkg/wallet/backend.go deleted file mode 100644 index 58928f48f5..0000000000 --- a/internal/pkg/wallet/backend.go +++ /dev/null @@ -1,33 +0,0 @@ -package wallet - -import ( - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" -) - -// Backend is the interface to represent different storage backends -// that can contain many addresses. -type Backend interface { - // Addresses returns a list of all accounts currently stored in this backend. - Addresses() []address.Address - - // Contains returns true if this backend stores the passed in address. - HasAddress(addr address.Address) bool - - // Sign cryptographically signs data with the private key associated with an address. - SignBytes(data []byte, addr address.Address) (crypto.Signature, error) - - // GetKeyInfo will return the keyinfo associated with address `addr` - // iff backend contains the addr. - GetKeyInfo(addr address.Address) (*crypto.KeyInfo, error) -} - -// Importer is a specialization of a wallet backend that can import -// new keys into its permanent storage. Disk backed wallets can do this, -// hardware wallets generally cannot. -type Importer interface { - // ImportKey imports the key described by the given keyinfo - // into the backend - ImportKey(ki *crypto.KeyInfo) error -} diff --git a/internal/pkg/wallet/dsbackend.go b/internal/pkg/wallet/dsbackend.go deleted file mode 100644 index e9565fa309..0000000000 --- a/internal/pkg/wallet/dsbackend.go +++ /dev/null @@ -1,177 +0,0 @@ -package wallet - -import ( - "crypto/rand" - "reflect" - "strings" - "sync" - - "github.com/filecoin-project/go-address" - ds "github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-datastore/query" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -// DSBackendType is the reflect type of the DSBackend. -var DSBackendType = reflect.TypeOf(&DSBackend{}) - -// DSBackend is a wallet backend implementation for storing addresses in a datastore. -type DSBackend struct { - lk sync.RWMutex - - // TODO: use a better interface that supports time locks, encryption, etc. - ds repo.Datastore - - // TODO: proper cache - cache map[address.Address]struct{} -} - -var _ Backend = (*DSBackend)(nil) - -// NewDSBackend constructs a new backend using the passed in datastore. -func NewDSBackend(ds repo.Datastore) (*DSBackend, error) { - result, err := ds.Query(dsq.Query{ - KeysOnly: true, - }) - if err != nil { - return nil, errors.Wrap(err, "failed to query datastore") - } - - list, err := result.Rest() - if err != nil { - return nil, errors.Wrap(err, "failed to read query results") - } - - cache := make(map[address.Address]struct{}) - for _, el := range list { - parsedAddr, err := address.NewFromString(strings.Trim(el.Key, "/")) - if err != nil { - return nil, errors.Wrapf(err, "trying to restore invalid address: %s", el.Key) - } - cache[parsedAddr] = struct{}{} - } - - return &DSBackend{ - ds: ds, - cache: cache, - }, nil -} - -// ImportKey loads the address in `ai` and KeyInfo `ki` into the backend -func (backend *DSBackend) ImportKey(ki *crypto.KeyInfo) error { - return backend.putKeyInfo(ki) -} - -// Addresses returns a list of all addresses that are stored in this backend. -func (backend *DSBackend) Addresses() []address.Address { - backend.lk.RLock() - defer backend.lk.RUnlock() - - var cpy []address.Address - for addr := range backend.cache { - cpy = append(cpy, addr) - } - return cpy -} - -// HasAddress checks if the passed in address is stored in this backend. -// Safe for concurrent access. -func (backend *DSBackend) HasAddress(addr address.Address) bool { - backend.lk.RLock() - defer backend.lk.RUnlock() - - _, ok := backend.cache[addr] - return ok -} - -// NewAddress creates a new address and stores it. -// Safe for concurrent access. -func (backend *DSBackend) NewAddress(protocol address.Protocol) (address.Address, error) { - switch protocol { - case address.BLS: - return backend.newBLSAddress() - case address.SECP256K1: - return backend.newSecpAddress() - default: - return address.Undef, errors.Errorf("Unknown address protocol %d", protocol) - } -} - -func (backend *DSBackend) newSecpAddress() (address.Address, error) { - ki, err := crypto.NewSecpKeyFromSeed(rand.Reader) - if err != nil { - return address.Undef, err - } - - if err := backend.putKeyInfo(&ki); err != nil { - return address.Undef, err - } - return ki.Address() -} - -func (backend *DSBackend) newBLSAddress() (address.Address, error) { - ki, err := crypto.NewBLSKeyFromSeed(rand.Reader) - if err != nil { - return address.Undef, err - } - - if err := backend.putKeyInfo(&ki); err != nil { - return address.Undef, err - } - return ki.Address() -} - -func (backend *DSBackend) putKeyInfo(ki *crypto.KeyInfo) error { - a, err := ki.Address() - if err != nil { - return err - } - - backend.lk.Lock() - defer backend.lk.Unlock() - - kib, err := ki.Marshal() - if err != nil { - return err - } - - if err := backend.ds.Put(ds.NewKey(a.String()), kib); err != nil { - return errors.Wrap(err, "failed to store new address") - } - - backend.cache[a] = struct{}{} - return nil -} - -// SignBytes cryptographically signs `data` using the private key `priv`. -func (backend *DSBackend) SignBytes(data []byte, addr address.Address) (crypto.Signature, error) { - ki, err := backend.GetKeyInfo(addr) - if err != nil { - return crypto.Signature{}, err - } - return crypto.Sign(data, ki.PrivateKey, ki.SigType) -} - -// GetKeyInfo will return the private & public keys associated with address `addr` -// iff backend contains the addr. -func (backend *DSBackend) GetKeyInfo(addr address.Address) (*crypto.KeyInfo, error) { - if !backend.HasAddress(addr) { - return nil, errors.New("backend does not contain address") - } - - // kib is a cbor of types.KeyInfo - kib, err := backend.ds.Get(ds.NewKey(addr.String())) - if err != nil { - return nil, errors.Wrap(err, "failed to fetch private key from backend") - } - - ki := &crypto.KeyInfo{} - if err := ki.Unmarshal(kib); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal keyinfo from backend") - } - - return ki, nil -} diff --git a/internal/pkg/wallet/dsbackend_test.go b/internal/pkg/wallet/dsbackend_test.go deleted file mode 100644 index 488ddb7084..0000000000 --- a/internal/pkg/wallet/dsbackend_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package wallet - -import ( - "sync" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -func TestDSBackendSimple(t *testing.T) { - tf.UnitTest(t) - - ds := datastore.NewMapDatastore() - defer func() { - require.NoError(t, ds.Close()) - }() - - fs, err := NewDSBackend(ds) - assert.NoError(t, err) - - t.Log("empty address list on empty datastore") - assert.Len(t, fs.Addresses(), 0) - - t.Log("can create new address") - addr, err := fs.NewAddress(address.SECP256K1) - assert.NoError(t, err) - - t.Log("address is stored") - assert.True(t, fs.HasAddress(addr)) - - t.Log("address is stored in repo, and back when loading fresh in a new backend") - fs2, err := NewDSBackend(ds) - assert.NoError(t, err) - - assert.True(t, fs2.HasAddress(addr)) -} - -func TestDSBackendKeyPairMatchAddress(t *testing.T) { - tf.UnitTest(t) - - ds := datastore.NewMapDatastore() - defer func() { - require.NoError(t, ds.Close()) - }() - - fs, err := NewDSBackend(ds) - assert.NoError(t, err) - - t.Log("can create new address") - addr, err := fs.NewAddress(address.SECP256K1) - assert.NoError(t, err) - - t.Log("address is stored") - assert.True(t, fs.HasAddress(addr)) - - t.Log("address references to a secret key") - ki, err := fs.GetKeyInfo(addr) - assert.NoError(t, err) - - dAddr, err := ki.Address() - assert.NoError(t, err) - - t.Log("generated address and stored address should match") - assert.Equal(t, addr, dAddr) -} - -func TestDSBackendErrorsForUnknownAddress(t *testing.T) { - tf.UnitTest(t) - - // create 2 backends - ds1 := datastore.NewMapDatastore() - defer func() { - require.NoError(t, ds1.Close()) - }() - fs1, err := NewDSBackend(ds1) - assert.NoError(t, err) - - ds2 := datastore.NewMapDatastore() - defer func() { - require.NoError(t, ds2.Close()) - }() - fs2, err := NewDSBackend(ds2) - assert.NoError(t, err) - - t.Log("can create new address in fs1") - addr, err := fs1.NewAddress(address.SECP256K1) - assert.NoError(t, err) - - t.Log("address is stored fs1") - assert.True(t, fs1.HasAddress(addr)) - - t.Log("address is not stored fs2") - assert.False(t, fs2.HasAddress(addr)) - - t.Log("address references to a secret key in fs1") - _, err = fs1.GetKeyInfo(addr) - assert.NoError(t, err) - - t.Log("address does not references to a secret key in fs2") - _, err = fs2.GetKeyInfo(addr) - assert.Error(t, err) - assert.Contains(t, "backend does not contain address", err.Error()) - -} - -func TestDSBackendParallel(t *testing.T) { - tf.UnitTest(t) - - ds := datastore.NewMapDatastore() - defer func() { - require.NoError(t, ds.Close()) - }() - - fs, err := NewDSBackend(ds) - assert.NoError(t, err) - - var wg sync.WaitGroup - count := 10 - wg.Add(count) - for i := 0; i < count; i++ { - go func() { - _, err := fs.NewAddress(address.SECP256K1) - assert.NoError(t, err) - wg.Done() - }() - } - - wg.Wait() - assert.Len(t, fs.Addresses(), 10) -} diff --git a/internal/pkg/wallet/signature_test.go b/internal/pkg/wallet/signature_test.go deleted file mode 100644 index 05dd78145a..0000000000 --- a/internal/pkg/wallet/signature_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// These tests check that the signature validation in go-filecoin/types -// works as expected. They are kept in the wallet package because -// these tests need to generate signatures and the wallet package owns this -// function. They cannot be kept in types because wallet imports "types" -// for the Signature and KeyInfo types. TODO: organize packages in a way -// that makes more sense, e.g. so that signature tests can be in same package -// as signature code. - -package wallet - -import ( - "testing" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -/* Test types.ValidateSignature */ - -func requireSignerAddr(t *testing.T) (*DSBackend, address.Address) { - ds := datastore.NewMapDatastore() - fs, err := NewDSBackend(ds) - require.NoError(t, err) - - addr, err := fs.NewAddress(address.SECP256K1) - require.NoError(t, err) - return fs, addr -} - -// Signature is over the data being verified and was signed by the verifying -// address. Everything should work out ok. -func TestSignatureOk(t *testing.T) { - tf.UnitTest(t) - - fs, addr := requireSignerAddr(t) - - data := []byte("THESE BYTES WILL BE SIGNED") - sig, err := fs.SignBytes(data, addr) - require.NoError(t, err) - - assert.NoError(t, crypto.ValidateSignature(data, addr, sig)) -} - -// Signature is nil. -func TestNilSignature(t *testing.T) { - tf.UnitTest(t) - - _, addr := requireSignerAddr(t) - - data := []byte("THESE BYTES NEED A SIGNATURE") - assert.Error(t, crypto.ValidateSignature(data, addr, crypto.Signature{})) -} - -// Signature is over different data. -func TestDataCorrupted(t *testing.T) { - tf.UnitTest(t) - - fs, addr := requireSignerAddr(t) - - data := []byte("THESE BYTES ARE SIGNED") - sig, err := fs.SignBytes(data, addr) - require.NoError(t, err) - - corruptData := []byte("THESE BYTEZ ARE SIGNED") - - assert.Error(t, crypto.ValidateSignature(corruptData, addr, sig)) -} - -// Signature is valid for data but was signed by a different address. -func TestInvalidAddress(t *testing.T) { - tf.UnitTest(t) - - fs, addr := requireSignerAddr(t) - - data := []byte("THESE BYTES ARE SIGNED") - sig, err := fs.SignBytes(data, addr) - require.NoError(t, err) - - badAddr, err := fs.NewAddress(address.SECP256K1) - require.NoError(t, err) - - assert.Error(t, crypto.ValidateSignature(data, badAddr, sig)) -} - -// Signature is corrupted. -func TestSignatureCorrupted(t *testing.T) { - tf.UnitTest(t) - - fs, addr := requireSignerAddr(t) - - data := []byte("THESE BYTES ARE SIGNED") - sig, err := fs.SignBytes(data, addr) - require.NoError(t, err) - sig.Data[0] = sig.Data[0] ^ 0xFF // This operation ensures sig is modified - - assert.Error(t, crypto.ValidateSignature(data, addr, sig)) -} diff --git a/internal/pkg/wallet/wallet.go b/internal/pkg/wallet/wallet.go deleted file mode 100644 index f2c9e701fb..0000000000 --- a/internal/pkg/wallet/wallet.go +++ /dev/null @@ -1,193 +0,0 @@ -package wallet - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" -) - -// Wallet manages the locally stored addresses. -type Wallet struct { - lk sync.Mutex - - backends map[reflect.Type][]Backend -} - -// New constructs a new wallet, that manages addresses in all the -// passed in backends. -func New(backends ...Backend) *Wallet { - backendsMap := make(map[reflect.Type][]Backend) - - for _, backend := range backends { - kind := reflect.TypeOf(backend) - backendsMap[kind] = append(backendsMap[kind], backend) - } - - return &Wallet{ - backends: backendsMap, - } -} - -// HasAddress checks if the given address is stored. -// Safe for concurrent access. -func (w *Wallet) HasAddress(a address.Address) bool { - _, err := w.Find(a) - return err == nil -} - -// Find searches through all backends and returns the one storing the passed -// in address. -// Safe for concurrent access. -func (w *Wallet) Find(addr address.Address) (Backend, error) { - w.lk.Lock() - defer w.lk.Unlock() - - for _, backends := range w.backends { - for _, backend := range backends { - if backend.HasAddress(addr) { - return backend, nil - } - } - } - - return nil, fmt.Errorf("wallet has no address %s", addr) -} - -// Addresses retrieves all stored addresses. -// Safe for concurrent access. -// Always sorted in the same order. -func (w *Wallet) Addresses() []address.Address { - w.lk.Lock() - defer w.lk.Unlock() - - var out []address.Address - for _, backends := range w.backends { - for _, backend := range backends { - out = append(out, backend.Addresses()...) - } - } - sort.Slice(out, func(i, j int) bool { - return bytes.Compare(out[i].Bytes(), out[j].Bytes()) < 0 - }) - - return out -} - -// Backends returns backends by their kind. -func (w *Wallet) Backends(kind reflect.Type) []Backend { - w.lk.Lock() - defer w.lk.Unlock() - - cpy := make([]Backend, len(w.backends[kind])) - copy(cpy, w.backends[kind]) - return cpy -} - -// SignBytes cryptographically signs `data` using the private key corresponding to -// address `addr` -func (w *Wallet) SignBytes(data []byte, addr address.Address) (crypto.Signature, error) { - // Check that we are storing the address to sign for. - backend, err := w.Find(addr) - if err != nil { - return crypto.Signature{}, errors.Wrapf(err, "could not find address: %s", addr) - } - return backend.SignBytes(data, addr) -} - -// NewAddress creates a new account address on the default wallet backend. -func NewAddress(w *Wallet, p address.Protocol) (address.Address, error) { - backends := w.Backends(DSBackendType) - if len(backends) == 0 { - return address.Undef, fmt.Errorf("missing default ds backend") - } - - backend := (backends[0]).(*DSBackend) - return backend.NewAddress(p) -} - -// GetPubKeyForAddress returns the public key in the keystore associated with -// the given address. -func (w *Wallet) GetPubKeyForAddress(addr address.Address) ([]byte, error) { - info, err := w.keyInfoForAddr(addr) - if err != nil { - return nil, err - } - - return info.PublicKey(), nil -} - -// NewKeyInfo creates a new KeyInfo struct in the wallet backend and returns it -func (w *Wallet) NewKeyInfo() (*crypto.KeyInfo, error) { - newAddr, err := NewAddress(w, address.SECP256K1) - if err != nil { - return &crypto.KeyInfo{}, err - } - - return w.keyInfoForAddr(newAddr) -} - -func (w *Wallet) keyInfoForAddr(addr address.Address) (*crypto.KeyInfo, error) { - backend, err := w.Find(addr) - if err != nil { - return &crypto.KeyInfo{}, err - } - - info, err := backend.GetKeyInfo(addr) - if err != nil { - return &crypto.KeyInfo{}, err - } - return info, nil -} - -// Import adds the given keyinfos to the wallet -func (w *Wallet) Import(kinfos ...*crypto.KeyInfo) ([]address.Address, error) { - dsb := w.Backends(DSBackendType) - if len(dsb) != 1 { - return nil, fmt.Errorf("expected exactly one datastore wallet backend") - } - - imp, ok := dsb[0].(Importer) - if !ok { - return nil, fmt.Errorf("datastore backend wallets should implement importer") - } - - var out []address.Address - for _, ki := range kinfos { - if err := imp.ImportKey(ki); err != nil { - return nil, err - } - - a, err := ki.Address() - if err != nil { - return nil, err - } - out = append(out, a) - } - return out, nil -} - -// Export returns the KeyInfos for the given wallet addresses -func (w *Wallet) Export(addrs []address.Address) ([]*crypto.KeyInfo, error) { - out := make([]*crypto.KeyInfo, len(addrs)) - for i, addr := range addrs { - bck, err := w.Find(addr) - if err != nil { - return nil, err - } - - ki, err := bck.GetKeyInfo(addr) - if err != nil { - return nil, err - } - out[i] = ki - } - - return out, nil -} diff --git a/internal/pkg/wallet/wallet_test.go b/internal/pkg/wallet/wallet_test.go deleted file mode 100644 index e8d67be55f..0000000000 --- a/internal/pkg/wallet/wallet_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package wallet_test - -import ( - "bytes" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address" - "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" -) - -func TestWalletSimple(t *testing.T) { - tf.UnitTest(t) - - t.Log("create a backend") - ds := datastore.NewMapDatastore() - fs, err := wallet.NewDSBackend(ds) - assert.NoError(t, err) - - t.Log("create a wallet with a single backend") - w := wallet.New(fs) - - t.Log("check backends") - assert.Len(t, w.Backends(wallet.DSBackendType), 1) - - t.Log("create a new address in the backend") - addr, err := fs.NewAddress(address.SECP256K1) - assert.NoError(t, err) - - t.Log("test HasAddress") - assert.True(t, w.HasAddress(addr)) - - t.Log("find backend") - backend, err := w.Find(addr) - assert.NoError(t, err) - assert.Equal(t, fs, backend) - - t.Log("find unknown address") - randomAddr := vmaddr.NewForTestGetter()() - - assert.False(t, w.HasAddress(randomAddr)) - - t.Log("list all addresses") - list := w.Addresses() - assert.Len(t, list, 1) - assert.Equal(t, list[0], addr) - - t.Log("addresses are sorted") - addr2, err := fs.NewAddress(address.SECP256K1) - assert.NoError(t, err) - - if bytes.Compare(addr2.Bytes(), addr.Bytes()) < 0 { - addr, addr2 = addr2, addr - } - for i := 0; i < 16; i++ { - list := w.Addresses() - assert.Len(t, list, 2) - assert.Equal(t, list[0], addr) - assert.Equal(t, list[1], addr2) - } -} - -func TestWalletBLSKeys(t *testing.T) { - tf.UnitTest(t) - - ds := datastore.NewMapDatastore() - wb, err := wallet.NewDSBackend(ds) - require.NoError(t, err) - w := wallet.New(wb) - - addr, err := wallet.NewAddress(w, address.BLS) - require.NoError(t, err) - - data := []byte("data to be signed") - sig, err := w.SignBytes(data, addr) - require.NoError(t, err) - - t.Run("address is BLS protocol", func(t *testing.T) { - assert.Equal(t, address.BLS, addr.Protocol()) - }) - - t.Run("key uses BLS cryptography", func(t *testing.T) { - ki, err := wb.GetKeyInfo(addr) - require.NoError(t, err) - assert.Equal(t, crypto.SigTypeBLS, ki.SigType) - }) - - t.Run("valid signatures verify", func(t *testing.T) { - err := crypto.ValidateSignature(data, addr, sig) - assert.NoError(t, err) - }) - - t.Run("invalid signatures do not verify", func(t *testing.T) { - notTheData := []byte("not the data") - err := crypto.ValidateSignature(notTheData, addr, sig) - assert.Error(t, err) - - notTheSig := crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: make([]byte, bls.SignatureBytes), - } - copy(notTheSig.Data[:], "not the sig") - err = crypto.ValidateSignature(data, addr, notTheSig) - assert.Error(t, err) - }) -} - -func TestSimpleSignAndVerify(t *testing.T) { - tf.UnitTest(t) - - t.Log("create a backend") - ds := datastore.NewMapDatastore() - fs, err := wallet.NewDSBackend(ds) - assert.NoError(t, err) - - t.Log("create a wallet with a single backend") - w := wallet.New(fs) - - t.Log("check backends") - assert.Len(t, w.Backends(wallet.DSBackendType), 1) - - t.Log("create a new address in the backend") - addr, err := fs.NewAddress(address.SECP256K1) - assert.NoError(t, err) - - t.Log("test HasAddress") - assert.True(t, w.HasAddress(addr)) - - t.Log("find backend") - backend, err := w.Find(addr) - assert.NoError(t, err) - assert.Equal(t, fs, backend) - - // data to sign - dataA := []byte("THIS IS A SIGNED SLICE OF DATA") - t.Log("sign content") - sig, err := w.SignBytes(dataA, addr) - assert.NoError(t, err) - - t.Log("verify signed content") - err = crypto.ValidateSignature(dataA, addr, sig) - assert.NoError(t, err) - - // data that is unsigned - dataB := []byte("I AM UNSIGNED DATA!") - t.Log("verify fails for unsigned content") - err = crypto.ValidateSignature(dataB, addr, sig) - assert.Error(t, err) -} - -func TestSignErrorCases(t *testing.T) { - tf.UnitTest(t) - - t.Log("create 2 backends") - ds1 := datastore.NewMapDatastore() - fs1, err := wallet.NewDSBackend(ds1) - assert.NoError(t, err) - - ds2 := datastore.NewMapDatastore() - fs2, err := wallet.NewDSBackend(ds2) - assert.NoError(t, err) - - t.Log("create 2 wallets each with a backend") - w1 := wallet.New(fs1) - w2 := wallet.New(fs2) - - t.Log("check backends") - assert.Len(t, w1.Backends(wallet.DSBackendType), 1) - assert.Len(t, w2.Backends(wallet.DSBackendType), 1) - - t.Log("create a new address each backend") - addr1, err := fs1.NewAddress(address.SECP256K1) - assert.NoError(t, err) - addr2, err := fs2.NewAddress(address.SECP256K1) - assert.NoError(t, err) - - t.Log("test HasAddress") - assert.True(t, w1.HasAddress(addr1)) - assert.False(t, w1.HasAddress(addr2)) - - t.Log("find backends") - backend1, err := w1.Find(addr1) - assert.NoError(t, err) - assert.Equal(t, fs1, backend1) - - t.Log("find backend fails for unknown address") - _, err = w1.Find(addr2) - assert.Error(t, err) - - // data to sign - dataA := []byte("Set tab width to '1' and make everyone happy") - t.Log("sign content") - _, err = w1.SignBytes(dataA, addr2) - assert.Error(t, err) - assert.Contains(t, err.Error(), "could not find address:") -} diff --git a/main.go b/main.go index f1f853efd2..e549e29fde 100644 --- a/main.go +++ b/main.go @@ -2,42 +2,48 @@ package main import ( "context" + _ "net/http/pprof" "os" logging "github.com/ipfs/go-log/v2" - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" + "github.com/filecoin-project/venus/cmd" ) func main() { - // set default log level if no flags given - var level logging.LogLevel - var err error lvl := os.Getenv("GO_FILECOIN_LOG_LEVEL") if lvl == "" { - level = logging.LevelInfo + logging.SetAllLoggers(logging.LevelInfo) + _ = logging.SetLogLevel("beacon", "error") + _ = logging.SetLogLevel("peer-tracker", "error") + _ = logging.SetLogLevel("dht", "error") + _ = logging.SetLogLevel("bitswap", "error") + _ = logging.SetLogLevel("graphsync", "info") + _ = logging.SetLogLevel("heartbeat", "error") + _ = logging.SetLogLevel("dagservice", "error") + _ = logging.SetLogLevel("peerqueue", "error") + _ = logging.SetLogLevel("swarm", "error") + _ = logging.SetLogLevel("swarm2", "error") + _ = logging.SetLogLevel("basichost", "error") + _ = logging.SetLogLevel("dht_net", "error") + _ = logging.SetLogLevel("pubsub", "error") + _ = logging.SetLogLevel("relay", "error") + _ = logging.SetLogLevel("dht/RtRefreshManager", "error") } else { - level, err = logging.LevelFromString(lvl) + level, err := logging.LevelFromString(lvl) if err != nil { level = logging.LevelInfo } + logging.SetAllLoggers(level) } - logging.SetAllLoggers(level) - logging.SetLogLevel("dht", "error") // nolint: errcheck - logging.SetLogLevel("bitswap", "error") // nolint: errcheck - logging.SetLogLevel("graphsync", "info") // nolint: errcheck - logging.SetLogLevel("heartbeat", "error") // nolint: errcheck - logging.SetLogLevel("blockservice", "error") // nolint: errcheck - logging.SetLogLevel("peerqueue", "error") // nolint: errcheck - logging.SetLogLevel("swarm", "error") // nolint: errcheck - logging.SetLogLevel("swarm2", "error") // nolint: errcheck - logging.SetLogLevel("basichost", "error") // nolint: errcheck - logging.SetLogLevel("dht_net", "error") // nolint: errcheck - logging.SetLogLevel("pubsub", "error") // nolint: errcheck - logging.SetLogLevel("relay", "error") // nolint: errcheck + if len(os.Args) > 1 { + if os.Args[1] == "-v" { + os.Args[1] = "version" + } + } - code, _ := commands.Run(context.Background(), os.Args, os.Stdin, os.Stdout, os.Stderr) + code, _ := cmd.Run(context.Background(), os.Args, os.Stdin, os.Stdout, os.Stderr) os.Exit(code) } diff --git a/networking.md b/networking.md index 52def6c192..fa0452e774 100644 --- a/networking.md +++ b/networking.md @@ -45,7 +45,7 @@ a. we find the peer; highly unlikely scenario, as the chance is 1/2^256 with our b. we yield because a timer runs out; the most probable scenario. This is a healthy timeout to timebox discovery iterations. Throughout this process, the libp2p stack generates `CONNECTED` events for every new connection we establish to a peer, -for which `go-filecoin` registers a callback that triggers the Filecoin `HELLO` protocol negotiation. +for which `venus` registers a callback that triggers the Filecoin `HELLO` protocol negotiation. If the other party responds positively, chain sync with that peer begins. A few relevant aspects to note: @@ -111,7 +111,7 @@ The last step is crucial, as it enables peers to learn our updated addresses, an ## Pubsub via Gossipsub Filecoin relies on [GossipSub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) for pubsub gossip among peers propagating blockchain blocks and unmined messages. -Please refer to the linked spec for more infomation. +Please refer to the linked spec for more information. ## Contribute to libp2p diff --git a/parameters.json b/parameters.json deleted file mode 100644 index a47c9227e4..0000000000 --- a/parameters.json +++ /dev/null @@ -1,152 +0,0 @@ -{ - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { - "cid": "QmYkygifkXnrnsN4MJsjBFHTQJHx294CyikDgDK8nYxdGh", - "digest": "df3f30442a6d6b4192f5071fb17e820c", - "sector_size": 2048 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { - "cid": "QmdXyqbmy2bkJA9Kyhh6z25GrTCq48LwX6c1mxPsm54wi7", - "digest": "0bea3951abf9557a3569f68e52a30c6c", - "sector_size": 2048 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { - "cid": "Qmf5XZZtP5VcYTf65MbKjLVabcS6cYMbr2rFShmfJzh5e5", - "digest": "655e6277638edc8c658094f6f0b33d54", - "sector_size": 536870912 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { - "cid": "QmPuhdWnAXBks43emnkqi9FQzyU1gASKyz23zrD27BPGs8", - "digest": "57690e3a6a94c3f704802a674b34f36b", - "sector_size": 536870912 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { - "cid": "QmPNVgTN7N5vDtD5u7ERMTLcvUtrKRBfYVUDr6uW3pKhX7", - "digest": "3d390654f58e603b896ac70c653f5676", - "sector_size": 2048 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { - "cid": "Qmbj61Zez7v5xA7nSCnmWbyLYznWJDWeusz7Yg8EcgVdoN", - "digest": "8c170a164743c39576a7f47a1b51e6f3", - "sector_size": 2048 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { - "cid": "QmRApb8RZoBK3cqicT7V3ydXg8yVvqPFMPrQNXP33aBihp", - "digest": "b1b58ff9a297b82885e8a7dfb035f83c", - "sector_size": 8388608 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { - "cid": "QmcytF1dTdqMFoyXi931j1RgmGtLfR9LLLaBznRt1tPQyD", - "digest": "1a09e00c641f192f55af3433a028f050", - "sector_size": 8388608 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { - "cid": "QmPvr54tWaVeP4WnekivzUAJitTqsQfvikBvAHNEaDNQSw", - "digest": "9380e41368ed4083dbc922b290d3b786", - "sector_size": 8388608 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { - "cid": "QmXyVLVDRCcxA9SjT7PeK8HFtyxZ2ZH3SHa8KoGLw8VGJt", - "digest": "f0731a7e20f90704bd38fc5d27882f6d", - "sector_size": 8388608 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { - "cid": "Qmf5f6ko3dqj7qauzXpZqxM9B2x2sL977K6gE2ppNwuJPv", - "digest": "273ebb8c896326b7c292bee8b775fd38", - "sector_size": 536870912 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { - "cid": "QmfP3MQe8koW63n5MkDENENVHxib78MJYYyZvbneCsuze8", - "digest": "3dd94da9da64e51b3445bc528d84e76d", - "sector_size": 536870912 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { - "cid": "QmYEeeCE8uT2bsVkxcqqUYeMmMEbe6rfmo8wQCv7jFHqqm", - "digest": "c947f2021304ed43b7216f7a8436e294", - "sector_size": 34359738368 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { - "cid": "QmXB63ExriFjB4ywWnXTnFwCcLFfCeEP3h15qtL5i7F4aX", - "digest": "ab20d7b253e7e9a0d2ccdf7599ec8ec3", - "sector_size": 34359738368 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { - "cid": "QmW5Yxg3L1NSzuQVcRMHMbG3uvVoi4dTLzVaDpnEUPQpnA", - "digest": "079ba19645828ae42b22b0e3f4866e8d", - "sector_size": 34359738368 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { - "cid": "QmQzZ5dJ11tcSBees38WX41tZLXS9BqpEti253m5QcnTNs", - "digest": "c76125a50a7de315165de359b5174ae4", - "sector_size": 34359738368 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { - "cid": "QmNk3wga1tS53FUu1QnkK8ehWA2cqpCnSEAPv3KLxdJxNa", - "digest": "421e4790c0b80e0107a7ff67acf14084", - "sector_size": 68719476736 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { - "cid": "QmVQCHGsrUtbn9RjHs1e6GXfeXDW5m9w4ge48PSX3Z2as2", - "digest": "8b60e9cc1470a6729c687d6cf0a1f79c", - "sector_size": 68719476736 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { - "cid": "QmTL3VvydaMFWKvE5VzxjgKsJYgL9JMM4JVYNtQxdj9JK1", - "digest": "2685f31124b22ea6b2857e5a5e87ffa3", - "sector_size": 68719476736 - }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { - "cid": "QmSVWbLqQYbUbbJyfsRMzEib2rfSqMtnPks1Nw22omcBQm", - "digest": "efe703cd2839597c7ca5c2a906b74296", - "sector_size": 68719476736 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { - "cid": "QmU9dH31nZZUJnsogR4Ld4ySUcH6wm2RgmGiujwnqtbU6k", - "digest": "fcef8e87ae2afd7a28aae44347b804cf", - "sector_size": 2048 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { - "cid": "QmdJ15DMGPooye5NaPcRfXUdHUDibcN7hKjbmTGuu1K4AQ", - "digest": "2ee2b3518229680db15161d4f582af37", - "sector_size": 2048 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { - "cid": "QmZgtxcY3tMXXQxZTA7ZTUDXLVUnfxNcerXgeW4gG2NnfP", - "digest": "3273c7135cb75684248b475781b738ee", - "sector_size": 536870912 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { - "cid": "QmSS6ZkAV2aGZcgKgdPpEEgihXF1ryZX8PSAZDWSoeL1d4", - "digest": "1519b5f61d9044a59f2bdc57537c094b", - "sector_size": 536870912 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { - "cid": "QmQBGXeiNn6hVwbR6qFarQqiNGDdKk4h9ucfyvcXyfYz2N", - "digest": "7d5f896f435c38e93bcda6dd168d860b", - "sector_size": 8388608 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { - "cid": "QmPrZgBVGMckEAeu5eSJnLmiAwcPQjKjZe5ir6VaQ5AxKs", - "digest": "fe6d2de44580a0db5a4934688899b92f", - "sector_size": 8388608 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { - "cid": "QmZL2cq45XJn5BFzagAZwgFmLrcM1W6CXoiEF9C5j5tjEF", - "digest": "acdfed9f0512bc85a01a9fb871d475d5", - "sector_size": 34359738368 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { - "cid": "QmQ4zB7nNa1tDYNifBkExRnZtwtxZw775iaqvVsZyRi6Q2", - "digest": "524a2f3e9d6826593caebc41bb545c40", - "sector_size": 34359738368 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { - "cid": "QmY7DitNKXFeLQt9QoVQkfjM1EvRnprqUVxjmkTXkHDNka", - "digest": "f27271c0537ba65ade2ec045f8fbd069", - "sector_size": 68719476736 - }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { - "cid": "QmUJsvoCuQ4LszPmeRVAkMYb5qY95ctz3UXKhu8xLzyFKo", - "digest": "576b292938c6c9d0a0e721bd867a543b", - "sector_size": 68719476736 - } -} diff --git a/pkg/beacon/beacon.go b/pkg/beacon/beacon.go new file mode 100644 index 0000000000..193caefa59 --- /dev/null +++ b/pkg/beacon/beacon.go @@ -0,0 +1,150 @@ +package beacon + +import ( + "context" + "fmt" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/venus-shared/types" + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("beacon") + +type Response struct { + Entry types.BeaconEntry + Err error +} + +type BeaconPoint struct { //nolint + Start abi.ChainEpoch + Beacon RandomBeacon +} + +// RandomBeacon represents a system that provides randomness to Lotus. +// Other components interrogate the RandomBeacon to acquire randomness that's +// valid for a specific chain epoch. Also to verify beacon entries that have +// been posted on chain. +type RandomBeacon interface { + Entry(context.Context, uint64) <-chan Response + VerifyEntry(types.BeaconEntry, types.BeaconEntry) error + MaxBeaconRoundForEpoch(network.Version, abi.ChainEpoch) uint64 +} + +// ValidateBlockValues Verify that the beacon in the block header is correct, first get beacon server at block epoch and parent block epoch in schedule. +// if paraent beacon is the same beacon server. value beacon normally but if not equal, means that the pre entry in another beacon chain, so just validate +// beacon value in current block header. the first values is parent beacon the the second value is current beacon. +func ValidateBlockValues(bSchedule Schedule, nv network.Version, h *types.BlockHeader, parentEpoch abi.ChainEpoch, prevEntry *types.BeaconEntry) error { + { + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(h.Height) + if parentBeacon != currBeacon { + if len(h.BeaconEntries) != 2 { + return fmt.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries)) + } + err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0]) + if err != nil { + return fmt.Errorf("beacon at fork point invalid: (%v, %v): %w", + h.BeaconEntries[1], h.BeaconEntries[0], err) + } + return nil + } + } + + // TODO: fork logic + b := bSchedule.BeaconForEpoch(h.Height) + maxRound := b.MaxBeaconRoundForEpoch(nv, h.Height) + if maxRound == prevEntry.Round { + if len(h.BeaconEntries) != 0 { + return fmt.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries)) + } + return nil + } + + if len(h.BeaconEntries) == 0 { + return fmt.Errorf("expected to have beacon entries in this block, but didn't find any") + } + + last := h.BeaconEntries[len(h.BeaconEntries)-1] + if last.Round != maxRound { + return fmt.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round) + } + + for i, e := range h.BeaconEntries { + if err := b.VerifyEntry(e, *prevEntry); err != nil { + return fmt.Errorf("beacon entry %d (%d - %x (%d)) was invalid: %w", i, e.Round, e.Data, len(e.Data), err) + } + prevEntry = &h.BeaconEntries[i] + + } + + return nil +} + +func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, nv network.Version, epoch abi.ChainEpoch, parentEpoch abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) { //nolint + { + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(epoch) + if parentBeacon != currBeacon { + // Fork logic + round := currBeacon.MaxBeaconRoundForEpoch(nv, epoch) + out := make([]types.BeaconEntry, 2) + rch := currBeacon.Entry(ctx, round-1) + res := <-rch + if res.Err != nil { + return nil, fmt.Errorf("getting entry %d returned error: %w", round-1, res.Err) + } + out[0] = res.Entry + rch = currBeacon.Entry(ctx, round) + res = <-rch + if res.Err != nil { + return nil, fmt.Errorf("getting entry %d returned error: %w", round, res.Err) + } + out[1] = res.Entry + return out, nil + } + } + + beacon := bSchedule.BeaconForEpoch(epoch) + + start := time.Now() + + maxRound := beacon.MaxBeaconRoundForEpoch(nv, epoch) + if maxRound == prev.Round { + return nil, nil + } + + // TODO: this is a sketchy way to handle the genesis block not having a beacon entry + if prev.Round == 0 { + prev.Round = maxRound - 1 + } + + cur := maxRound + var out []types.BeaconEntry + for cur > prev.Round { + rch := beacon.Entry(ctx, cur) + select { + case resp := <-rch: + if resp.Err != nil { + return nil, fmt.Errorf("beacon entry request returned error: %w", resp.Err) + } + + out = append(out, resp.Entry) + cur = resp.Entry.Round - 1 + case <-ctx.Done(): + return nil, fmt.Errorf("context timed out waiting on beacon entry to come back for epoch %d: %w", epoch, ctx.Err()) + } + } + + log.Debugw("fetching beacon entries", "took", time.Since(start), "numEntries", len(out)) + reverse(out) + return out, nil +} + +func reverse(arr []types.BeaconEntry) { + for i := 0; i < len(arr)/2; i++ { + arr[i], arr[len(arr)-(1+i)] = arr[len(arr)-(1+i)], arr[i] + } +} diff --git a/pkg/beacon/drand.go b/pkg/beacon/drand.go new file mode 100644 index 0000000000..5ff4902ab2 --- /dev/null +++ b/pkg/beacon/drand.go @@ -0,0 +1,204 @@ +package beacon + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + + dchain "github.com/drand/drand/chain" + dclient "github.com/drand/drand/client" + hclient "github.com/drand/drand/client/http" + dlog "github.com/drand/drand/log" + "github.com/drand/kyber" + kzap "github.com/go-kit/kit/log/zap" + lru "github.com/hashicorp/golang-lru" + "go.uber.org/zap/zapcore" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + cfg "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// DrandBeacon connects Lotus with a drand network in order to provide +// randomness to the system in a way that's aligned with Filecoin rounds/epochs. +// +// We connect to drand peers via their public HTTP endpoints. The peers are +// enumerated in the drandServers variable. +// +// The root trust for the Drand chain is configured from build.DrandChain. +type DrandBeacon struct { + client dclient.Client + + pubkey kyber.Point + + // seconds + interval time.Duration + + drandGenTime uint64 + filGenTime uint64 + filRoundTime uint64 + + localCache *lru.Cache +} + +// DrandHTTPClient interface overrides the user agent used by drand +type DrandHTTPClient interface { + SetUserAgent(string) +} + +// NewDrandBeacon create new beacon client from config, genesis block time and block delay +func NewDrandBeacon(genTimeStamp, interval uint64, config cfg.DrandConf) (*DrandBeacon, error) { + drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(config.ChainInfoJSON))) + if err != nil { + return nil, fmt.Errorf("unable to unmarshal drand chain info: %w", err) + } + + dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger( + log.SugaredLogger.Desugar(), zapcore.InfoLevel)) + + var clients []dclient.Client + for _, url := range config.Servers { + hc, err := hclient.NewWithInfo(url, drandChain, nil) + if err != nil { + return nil, fmt.Errorf("could not create http drand client: %w", err) + } + hc.(DrandHTTPClient).SetUserAgent("drand-client-lotus/" + constants.BuildVersion) + clients = append(clients, hc) + + } + + opts := []dclient.Option{ + dclient.WithChainInfo(drandChain), + dclient.WithCacheSize(1024), + dclient.WithLogger(dlogger), + } + + log.Info("drand beacon without pubsub") + + client, err := dclient.Wrap(clients, opts...) + if err != nil { + return nil, fmt.Errorf("creating drand client: %v", err) + } + + lc, err := lru.New(1024) + if err != nil { + return nil, err + } + + db := &DrandBeacon{ + client: client, + localCache: lc, + } + + db.pubkey = drandChain.PublicKey + db.interval = drandChain.Period + db.drandGenTime = uint64(drandChain.GenesisTime) + db.filRoundTime = interval + db.filGenTime = genTimeStamp + + return db, nil +} + +// Entry get a beacon value of specify block height, +func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan Response { + out := make(chan Response, 1) + if round != 0 { + be := db.getCachedValue(round) + if be != nil { + out <- Response{Entry: *be} + close(out) + return out + } + } + + go func() { + start := time.Now() + log.Infow("start fetching randomness", "round", round) + resp, err := db.client.Get(ctx, round) + + var br Response + if err != nil { + br.Err = fmt.Errorf("drand failed Get request: %w", err) + } else { + br.Entry.Round = resp.Round() + br.Entry.Data = resp.Signature() + } + log.Infow("done fetching randomness", "round", round, "took", time.Since(start)) + out <- br + close(out) + }() + + return out +} + +func (db *DrandBeacon) cacheValue(e types.BeaconEntry) { + db.localCache.Add(e.Round, e) +} + +func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { + v, ok := db.localCache.Get(round) + if !ok { + return nil + } + e, _ := v.(types.BeaconEntry) + return &e +} + +func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { + if prev.Round == 0 { + // TODO handle genesis better + return nil + } + if be := db.getCachedValue(curr.Round); be != nil { + if !bytes.Equal(curr.Data, be.Data) { + return errors.New("invalid beacon value, does not match cached good value") + } + // return no error if the value is in the cache already + return nil + } + b := &dchain.Beacon{ + PreviousSig: prev.Data, + Round: curr.Round, + Signature: curr.Data, + } + err := dchain.VerifyBeacon(db.pubkey, b) + if err == nil { + db.cacheValue(curr) + } + return err +} + +// MaxBeaconRoundForEpoch get the turn of beacon chain corresponding to chain height +func (db *DrandBeacon) MaxBeaconRoundForEpoch(nv network.Version, filEpoch abi.ChainEpoch) uint64 { + // TODO: sometimes the genesis time for filecoin is zero and this goes negative + latestTS := ((uint64(filEpoch) * db.filRoundTime) + db.filGenTime) - db.filRoundTime + + if nv <= network.Version15 { + return db.maxBeaconRoundV1(latestTS) + } + + return db.maxBeaconRoundV2(latestTS) +} + +func (db *DrandBeacon) maxBeaconRoundV1(latestTS uint64) uint64 { + dround := (latestTS - db.drandGenTime) / uint64(db.interval.Seconds()) + return dround +} + +func (db *DrandBeacon) maxBeaconRoundV2(latestTS uint64) uint64 { + if latestTS < db.drandGenTime { + return 1 + } + + fromGenesis := latestTS - db.drandGenTime + // we take the time from genesis divided by the periods in seconds, that + // gives us the number of periods since genesis. We also add +1 because + // round 1 starts at genesis time. + return fromGenesis/uint64(db.interval.Seconds()) + 1 +} + +var _ RandomBeacon = (*DrandBeacon)(nil) diff --git a/pkg/beacon/drand_test.go b/pkg/beacon/drand_test.go new file mode 100644 index 0000000000..61230f4402 --- /dev/null +++ b/pkg/beacon/drand_test.go @@ -0,0 +1,41 @@ +// stm: ignore +// Only tests external library behavior, therefore it should not be annotated +package beacon + +import ( + "os" + "testing" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/config" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + dchain "github.com/drand/drand/chain" + hclient "github.com/drand/drand/client/http" + "github.com/stretchr/testify/assert" +) + +func TestPrintGroupInfo(t *testing.T) { + tf.UnitTest(t) + server := config.DrandConfigs[config.DrandDevnet].Servers[0] + c, err := hclient.New(server, nil, nil) + assert.NoError(t, err) + cg := c.(interface { + FetchChainInfo(groupHash []byte) (*dchain.Info, error) + }) + chain, err := cg.FetchChainInfo(nil) + assert.NoError(t, err) + err = chain.ToJSON(os.Stdout) + assert.NoError(t, err) +} + +func TestMaxBeaconRoundForEpoch(t *testing.T) { + tf.UnitTest(t) + todayTS := uint64(1652222222) + drandCfg := config.DrandConfigs[config.DrandDevnet] + db, err := NewDrandBeacon(todayTS, config.NewDefaultConfig().NetworkParams.BlockDelay, drandCfg) + assert.NoError(t, err) + mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100) + mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100) + assert.Equal(t, mbr15+1, mbr16) +} diff --git a/pkg/beacon/mock.go b/pkg/beacon/mock.go new file mode 100644 index 0000000000..7c5556063f --- /dev/null +++ b/pkg/beacon/mock.go @@ -0,0 +1,69 @@ +package beacon + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/minio/blake2b-simd" +) + +// Mock beacon assumes that filecoin rounds are 1:1 mapped with the beacon rounds +type mockBeacon struct { + interval time.Duration +} + +func NewMockBeacon(interval time.Duration) RandomBeacon { + mb := &mockBeacon{interval: interval} + + return mb +} + +func NewMockSchedule(interval time.Duration) Schedule { + return []BeaconPoint{{ + Start: abi.ChainEpoch(0), + Beacon: NewMockBeacon(interval), + }} +} + +func (mb *mockBeacon) RoundTime() time.Duration { + return mb.interval +} + +func (mb *mockBeacon) entryForIndex(index uint64) types.BeaconEntry { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, index) + rval := blake2b.Sum256(buf) + return types.BeaconEntry{ + Round: index, + Data: rval[:], + } +} + +func (mb *mockBeacon) Entry(ctx context.Context, index uint64) <-chan Response { + e := mb.entryForIndex(index) + out := make(chan Response, 1) + out <- Response{Entry: e} + return out +} + +func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry) error { + // TODO: cache this, especially for bls + oe := mb.entryForIndex(from.Round) + if !bytes.Equal(from.Data, oe.Data) { + return fmt.Errorf("mock beacon entry was invalid") + } + return nil +} + +func (mb *mockBeacon) MaxBeaconRoundForEpoch(nv network.Version, epoch abi.ChainEpoch) uint64 { + // offset for better testing + return uint64(epoch + 100) +} + +var _ RandomBeacon = (*mockBeacon)(nil) diff --git a/pkg/beacon/schedule.go b/pkg/beacon/schedule.go new file mode 100644 index 0000000000..e063878d67 --- /dev/null +++ b/pkg/beacon/schedule.go @@ -0,0 +1,43 @@ +package beacon + +import ( + "fmt" + "sort" + + "github.com/filecoin-project/go-state-types/abi" + + cfg "github.com/filecoin-project/venus/pkg/config" +) + +type Schedule []BeaconPoint + +// BeaconForEpoch select beacon at specify epoch +func (bs Schedule) BeaconForEpoch(e abi.ChainEpoch) RandomBeacon { + for i := len(bs) - 1; i >= 0; i-- { + bp := bs[i] + if e >= bp.Start { + return bp.Beacon + } + } + return bs[0].Beacon +} + +// DrandConfigSchedule create new beacon schedule , used to select beacon server at specify chain height +func DrandConfigSchedule(genTimeStamp uint64, blockDelay uint64, drandSchedule map[abi.ChainEpoch]cfg.DrandEnum) (Schedule, error) { + shd := Schedule{} + + for start, config := range drandSchedule { + bc, err := NewDrandBeacon(genTimeStamp, blockDelay, cfg.DrandConfigs[config]) + if err != nil { + return nil, fmt.Errorf("creating drand beacon: %v", err) + } + shd = append(shd, BeaconPoint{Start: start, Beacon: bc}) + } + + sort.Slice(shd, func(i, j int) bool { + return shd[i].Start < shd[j].Start + }) + + log.Infof("Schedule: %v", shd) + return shd, nil +} diff --git a/pkg/chain/cbor_gen.go b/pkg/chain/cbor_gen.go new file mode 100644 index 0000000000..0f7bbd096c --- /dev/null +++ b/pkg/chain/cbor_gen.go @@ -0,0 +1,98 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package chain + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufTSState = []byte{130} + +func (t *TSState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufTSState); err != nil { + return err + } + + // t.StateRoot (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.StateRoot); err != nil { + return xerrors.Errorf("failed to write cid field t.StateRoot: %w", err) + } + + // t.Receipts (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Receipts); err != nil { + return xerrors.Errorf("failed to write cid field t.Receipts: %w", err) + } + + return nil +} + +func (t *TSState) UnmarshalCBOR(r io.Reader) (err error) { + *t = TSState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.StateRoot (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.StateRoot: %w", err) + } + + t.StateRoot = c + + } + // t.Receipts (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Receipts: %w", err) + } + + t.Receipts = c + + } + return nil +} diff --git a/pkg/chain/chain_index.go b/pkg/chain/chain_index.go new file mode 100644 index 0000000000..c6d0c1cc70 --- /dev/null +++ b/pkg/chain/chain_index.go @@ -0,0 +1,173 @@ +package chain + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" + lru "github.com/hashicorp/golang-lru" +) + +var DefaultChainIndexCacheSize = 32 << 10 + +// ChainIndex tipset height index, used to getting tipset by height quickly +type ChainIndex struct { //nolint + skipCache *lru.ARCCache + + loadTipSet loadTipSetFunc + + skipLength abi.ChainEpoch +} + +// NewChainIndex return a new chain index with arc cache +func NewChainIndex(lts loadTipSetFunc) *ChainIndex { + sc, _ := lru.NewARC(DefaultChainIndexCacheSize) + return &ChainIndex{ + skipCache: sc, + loadTipSet: lts, + skipLength: 20, + } +} + +type lbEntry struct { + ts *types.TipSet + parentHeight abi.ChainEpoch + targetHeight abi.ChainEpoch + target types.TipSetKey +} + +// GetTipSetByHeight get tipset at specify height from specify tipset +// the tipset within the skiplength is directly obtained by reading the database. +// if the height difference exceeds the skiplength, the tipset is read from caching. +// if the caching fails, the tipset is obtained by reading the database and updating the cache +func (ci *ChainIndex) GetTipSetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + if from.Height()-to <= ci.skipLength { + return ci.walkBack(ctx, from, to) + } + + rounded, err := ci.roundDown(ctx, from) + if err != nil { + return nil, err + } + + cur := rounded.Key() + // cur := from.Key() + for { + cval, ok := ci.skipCache.Get(cur) + if !ok { + fc, err := ci.fillCache(ctx, cur) + if err != nil { + return nil, err + } + cval = fc + } + + lbe := cval.(*lbEntry) + if lbe.ts.Height() == to || lbe.parentHeight < to { + return lbe.ts, nil + } else if to > lbe.targetHeight { + return ci.walkBack(ctx, lbe.ts, to) + } + + cur = lbe.target + } +} + +// GetTipsetByHeightWithoutCache get the tipset of specific height by reading the database directly +func (ci *ChainIndex) GetTipsetByHeightWithoutCache(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + return ci.walkBack(ctx, from, to) +} + +func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) { + ts, err := ci.loadTipSet(ctx, tsk) + if err != nil { + return nil, err + } + + if ts.Height() == 0 { + return &lbEntry{ + ts: ts, + parentHeight: 0, + }, nil + } + + // will either be equal to ts.Height, or at least > ts.Parent.Height() + rheight := ci.roundHeight(ts.Height()) + + parent, err := ci.loadTipSet(ctx, ts.Parents()) + if err != nil { + return nil, err + } + + rheight -= ci.skipLength + if rheight < 0 { + rheight = 0 + } + + var skipTarget *types.TipSet + if parent.Height() < rheight { + skipTarget = parent + } else { + skipTarget, err = ci.walkBack(ctx, parent, rheight) + if err != nil { + return nil, fmt.Errorf("fillCache walkback: %s", err) + } + } + + lbe := &lbEntry{ + ts: ts, + parentHeight: parent.Height(), + targetHeight: skipTarget.Height(), + target: skipTarget.Key(), + } + ci.skipCache.Add(tsk, lbe) + + return lbe, nil +} + +// floors to nearest skipLength multiple +func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch { + return (h / ci.skipLength) * ci.skipLength +} + +func (ci *ChainIndex) roundDown(ctx context.Context, ts *types.TipSet) (*types.TipSet, error) { + target := ci.roundHeight(ts.Height()) + + rounded, err := ci.walkBack(ctx, ts, target) + if err != nil { + return nil, err + } + + return rounded, nil +} + +func (ci *ChainIndex) walkBack(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + if to > from.Height() { + return nil, fmt.Errorf("looking for tipset with height greater than start point") + } + + if to == from.Height() { + return from, nil + } + + ts := from + + for { + pts, err := ci.loadTipSet(ctx, ts.Parents()) + if err != nil { + return nil, err + } + + if to > pts.Height() { + // in case pts is lower than the epoch we're looking for (null blocks) + // return a tipset above that height + return ts, nil + } + if to == pts.Height() { + return pts, nil + } + + ts = pts + } +} diff --git a/pkg/chain/chain_index_test.go b/pkg/chain/chain_index_test.go new file mode 100644 index 0000000000..c41f64bd82 --- /dev/null +++ b/pkg/chain/chain_index_test.go @@ -0,0 +1,57 @@ +// stm: #unit +package chain + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/require" +) + +func TestChainIndex(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + + builder := NewBuilder(t, address.Undef) + genTS := builder.Genesis() + + linksCount := 20 + links := make([]*types.TipSet, linksCount) + links[0] = genTS + + for i := 1; i < linksCount; i++ { + links[i] = builder.AppendOn(ctx, links[i-1], rand.Intn(2)+1) + } + + head := links[linksCount-1] + + DefaultChainIndexCacheSize = 10 + chainIndex := NewChainIndex(builder.GetTipSet) + chainIndex.skipLength = 10 + + // stm: @CHAIN_INDEX_GET_TIPSET_BY_HEIGHT_001, @CHAIN_INDEX_GET_TIPSET_BY_HEIGHT_002, @CHAIN_INDEX_GET_TIPSET_BY_HEIGHT_004 + for i := 0; i < linksCount; i++ { + _, err := chainIndex.GetTipSetByHeight(ctx, head, abi.ChainEpoch(i)) + require.NoError(t, err) + } + + // stm: @CHAIN_INDEX_GET_TIPSET_BY_HEIGHT_NO_CACHE_001 + _, err := chainIndex.GetTipsetByHeightWithoutCache(ctx, head, head.Height()/2) + require.NoError(t, err) + + chainIndex.loadTipSet = func(_ context.Context, _ types.TipSetKey) (*types.TipSet, error) { + return nil, fmt.Errorf("error round down") + } + + // If error occurs after calling roundDown function + // stm: @CHAIN_INDEX_GET_TIPSET_BY_HEIGHT_003 + _, err = chainIndex.GetTipSetByHeight(ctx, head, head.Height()/2) + require.Error(t, err) +} diff --git a/pkg/chain/circulating_supply.go b/pkg/chain/circulating_supply.go new file mode 100644 index 0000000000..07c95add7f --- /dev/null +++ b/pkg/chain/circulating_supply.go @@ -0,0 +1,423 @@ +package chain + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + cbornode "github.com/ipfs/go-ipld-cbor" + + // Used for genesis. + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" +) + +type ICirculatingSupplyCalcualtor interface { + GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st tree.Tree) (types.CirculatingSupply, error) + GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) +} + +// CirculatingSupplyCalculator used to calculate the funds at a specific block height +type CirculatingSupplyCalculator struct { + bstore blockstoreutil.Blockstore + genesisRoot cid.Cid + + // info about the Accounts in the genesis state + preIgnitionVesting []msig0.State + postIgnitionVesting []msig0.State + postCalicoVesting []msig0.State + + genesisPledge abi.TokenAmount + genesisMarketFunds abi.TokenAmount + + genesisMsigLk sync.Mutex + upgradeConfig *config.ForkUpgradeConfig +} + +// NewCirculatingSupplyCalculator create new circulating supply calculator +func NewCirculatingSupplyCalculator(bstore blockstoreutil.Blockstore, genesisRoot cid.Cid, upgradeConfig *config.ForkUpgradeConfig) *CirculatingSupplyCalculator { + return &CirculatingSupplyCalculator{bstore: bstore, genesisRoot: genesisRoot, upgradeConfig: upgradeConfig} +} + +// GetCirculatingSupplyDetailed query contract and calculate circulation status at specific height and tree state +func (caculator *CirculatingSupplyCalculator) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st tree.Tree) (types.CirculatingSupply, error) { + filVested, err := caculator.GetFilVested(ctx, height) + if err != nil { + return types.CirculatingSupply{}, fmt.Errorf("failed to calculate filVested: %v", err) + } + + filReserveDisbursed := big.Zero() + if height > caculator.upgradeConfig.UpgradeAssemblyHeight { + filReserveDisbursed, err = caculator.GetFilReserveDisbursed(ctx, st) + if err != nil { + return types.CirculatingSupply{}, fmt.Errorf("failed to calculate filReserveDisbursed: %v", err) + } + } + + filMined, err := GetFilMined(ctx, st) + if err != nil { + return types.CirculatingSupply{}, fmt.Errorf("failed to calculate filMined: %v", err) + } + filBurnt, err := GetFilBurnt(ctx, st) + if err != nil { + return types.CirculatingSupply{}, fmt.Errorf("failed to calculate filBurnt: %v", err) + } + filLocked, err := caculator.GetFilLocked(ctx, st) + if err != nil { + return types.CirculatingSupply{}, fmt.Errorf("failed to calculate filLocked: %v", err) + } + ret := big.Add(filVested, filMined) + ret = big.Add(ret, filReserveDisbursed) + ret = big.Sub(ret, filBurnt) + ret = big.Sub(ret, filLocked) + + if ret.LessThan(big.Zero()) { + ret = big.Zero() + } + + return types.CirculatingSupply{ + FilVested: filVested, + FilMined: filMined, + FilBurnt: filBurnt, + FilLocked: filLocked, + FilCirculating: ret, + FilReserveDisbursed: filReserveDisbursed, + }, nil +} + +/*func (c *Expected) processBlock(ctx context.Context, ts *block.TipSet) (cid.Cid, []types.MessageReceipt, error) { + var secpMessages [][]*types.SignedMessage + var blsMessages [][]*types.Message + for i := 0; i < ts.Len(); i++ { + blk := ts.At(i) + secpMsgs, blsMsgs, err := c.messageStore.LoadMetaMessages(ctx, blk.Messages.Cid) + if err != nil { + return cid.Undef, []types.MessageReceipt{}, xerrors.Wrapf(err, "syncing tip %s failed loading message list %s for block %s", ts.Key(), blk.Messages, blk.Cid()) + } + + blsMessages = append(blsMessages, blsMsgs) + secpMessages = append(secpMessages, secpMsgs) + } + + vms := vm.NewStorage(c.bstore) + priorState, err := state.LoadState(ctx, vms, ts.At(0).StateRoot.Cid) + if err != nil { + return cid.Undef, []types.MessageReceipt{}, err + } + + var newState state.Tree + newState, receipts, err := c.runMessages(ctx, priorState, vms, ts, blsMessages, secpMessages) + if err != nil { + return cid.Undef, []types.MessageReceipt{}, err + } + err = vms.Flush() + if err != nil { + return cid.Undef, []types.MessageReceipt{}, err + } + + root, err := newState.Flush(ctx) + if err != nil { + return cid.Undef, []types.MessageReceipt{}, err + } + return root, receipts, err +} +*/ + +// sets up information about the vesting schedule +func (caculator *CirculatingSupplyCalculator) setupGenesisVestingSchedule(ctx context.Context) error { + cst := cbornode.NewCborStore(caculator.bstore) + sTree, err := tree.LoadState(ctx, cst, caculator.genesisRoot) + if err != nil { + return fmt.Errorf("loading state tree: %v", err) + } + + gmf, err := getFilMarketLocked(ctx, sTree) + if err != nil { + return fmt.Errorf("setting up genesis market funds: %v", err) + } + + gp, err := getFilPowerLocked(ctx, sTree) + if err != nil { + return fmt.Errorf("setting up genesis pledge: %v", err) + } + + caculator.genesisMarketFunds = gmf + caculator.genesisPledge = gp + + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(49_929_341) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) + + // 1 year + oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) + + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) + + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) + + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + + caculator.preIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + InitialBalance: v, + UnlockDuration: k, + PendingTxns: cid.Undef, + } + caculator.preIgnitionVesting = append(caculator.preIgnitionVesting, ns) + } + + return nil +} + +// sets up information about the vesting schedule post the ignition upgrade +func (caculator *CirculatingSupplyCalculator) setupPostIgnitionVesting(ctx context.Context) error { + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(49_929_341) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) + + // 1 year + oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) + + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) + + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) + + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + + caculator.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error + InitialBalance: big.Mul(v, big.NewInt(int64(constants.FilecoinPrecision))), + UnlockDuration: k, + PendingTxns: cid.Undef, + // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself. + StartEpoch: caculator.upgradeConfig.UpgradeLiftoffHeight, + } + caculator.postIgnitionVesting = append(caculator.postIgnitionVesting, ns) + } + + return nil +} + +// sets up information about the vesting schedule post the calico upgrade +func (caculator *CirculatingSupplyCalculator) setupPostCalicoVesting(ctx context.Context) error { + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + + // 0 days + zeroDays := abi.ChainEpoch(0) + totalsByEpoch[zeroDays] = big.NewInt(10_632_000) + + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(19_015_887) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) + + // 1 year + oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) + totalsByEpoch[oneYear] = big.Add(totalsByEpoch[oneYear], big.NewInt(9_400_000)) + + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) + + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) + totalsByEpoch[threeYears] = big.Add(totalsByEpoch[threeYears], big.NewInt(898_958)) + + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053)) + + caculator.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + InitialBalance: big.Mul(v, big.NewInt(int64(constants.FilecoinPrecision))), + UnlockDuration: k, + PendingTxns: cid.Undef, + StartEpoch: caculator.upgradeConfig.UpgradeLiftoffHeight, + } + caculator.postCalicoVesting = append(caculator.postCalicoVesting, ns) + } + + return nil +} + +// GetVestedFunds returns all funds that have "left" actors that are in the genesis state: +// - For Multisigs, it counts the actual amounts that have vested at the given epoch +// - For Accounts, it counts max(currentBalance - genesisBalance, 0). +func (caculator *CirculatingSupplyCalculator) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) { + vf := big.Zero() + + caculator.genesisMsigLk.Lock() + defer caculator.genesisMsigLk.Unlock() + + // TODO: combine all this? + if caculator.preIgnitionVesting == nil || caculator.genesisPledge.IsZero() || caculator.genesisMarketFunds.IsZero() { + err := caculator.setupGenesisVestingSchedule(ctx) + if err != nil { + return vf, fmt.Errorf("failed to setup pre-ignition vesting schedule: %w", err) + } + } + if caculator.postIgnitionVesting == nil { + err := caculator.setupPostIgnitionVesting(ctx) + if err != nil { + return vf, fmt.Errorf("failed to setup post-ignition vesting schedule: %w", err) + } + } + if caculator.postCalicoVesting == nil { + err := caculator.setupPostCalicoVesting(ctx) + if err != nil { + return vf, fmt.Errorf("failed to setup post-calico vesting schedule: %w", err) + } + } + + if height <= caculator.upgradeConfig.UpgradeIgnitionHeight { + for _, v := range caculator.preIgnitionVesting { + au := big.Sub(v.InitialBalance, v.AmountLocked(height)) + vf = big.Add(vf, au) + } + } else if height <= caculator.upgradeConfig.UpgradeCalicoHeight { + for _, v := range caculator.postIgnitionVesting { + // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. + // The start epoch changed in the Ignition upgrade. + au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) + vf = big.Add(vf, au) + } + } else { + for _, v := range caculator.postCalicoVesting { + // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. + // The start epoch changed in the Ignition upgrade. + au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) + vf = big.Add(vf, au) + } + } + + // After UpgradeAssemblyHeight these funds are accounted for in GetFilReserveDisbursed + if height <= caculator.upgradeConfig.UpgradeAssemblyHeight { + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + vf = big.Add(vf, caculator.genesisPledge) + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + vf = big.Add(vf, caculator.genesisMarketFunds) + } + + return vf, nil +} + +func (caculator *CirculatingSupplyCalculator) GetFilReserveDisbursed(ctx context.Context, st tree.Tree) (abi.TokenAmount, error) { + ract, found, err := st.GetActor(ctx, builtin.ReserveAddress) + if !found || err != nil { + return big.Zero(), fmt.Errorf("failed to get reserve actor: %v", err) + } + + // If money enters the reserve actor, this could lead to a negative term + return big.Sub(big.NewFromGo(constants.InitialFilReserved), ract.Balance), nil +} + +// GetFilMined query reward contract to get amount of mined fil +func GetFilMined(ctx context.Context, st tree.Tree) (abi.TokenAmount, error) { + ractor, found, err := st.GetActor(ctx, reward.Address) + if !found || err != nil { + return big.Zero(), fmt.Errorf("failed to load reward actor state: %v", err) + } + + rst, err := reward.Load(adt.WrapStore(ctx, st.GetStore()), ractor) + if err != nil { + return big.Zero(), err + } + + return rst.TotalStoragePowerReward() +} + +// GetFilBurnt query burnt contract to get amount of burnt fil +func GetFilBurnt(ctx context.Context, st tree.Tree) (abi.TokenAmount, error) { + burnt, found, err := st.GetActor(ctx, builtin.BurntFundsActorAddr) + if !found || err != nil { + return big.Zero(), fmt.Errorf("failed to load burnt actor: %v", err) + } + + return burnt.Balance, nil +} + +// GetFilLocked query the market contract and power contract to get the amount of locked fils +func (caculator *CirculatingSupplyCalculator) GetFilLocked(ctx context.Context, st tree.Tree) (abi.TokenAmount, error) { + filMarketLocked, err := getFilMarketLocked(ctx, st) + if err != nil { + return big.Zero(), fmt.Errorf("failed to get filMarketLocked: %v", err) + } + + filPowerLocked, err := getFilPowerLocked(ctx, st) + if err != nil { + return big.Zero(), fmt.Errorf("failed to get filPowerLocked: %v", err) + } + + return big.Add(filMarketLocked, filPowerLocked), nil +} + +func getFilMarketLocked(ctx context.Context, st tree.Tree) (abi.TokenAmount, error) { + act, found, err := st.GetActor(ctx, market.Address) + if !found || err != nil { + return big.Zero(), fmt.Errorf("failed to load market actor: %v", err) + } + + mst, err := market.Load(adt.WrapStore(ctx, st.GetStore()), act) + if err != nil { + return big.Zero(), fmt.Errorf("failed to load market state: %v", err) + } + + return mst.TotalLocked() +} + +func getFilPowerLocked(ctx context.Context, st tree.Tree) (abi.TokenAmount, error) { + pactor, found, err := st.GetActor(ctx, power.Address) + if !found || err != nil { + return big.Zero(), fmt.Errorf("failed to load power actor: %v", err) + } + + pst, err := power.Load(adt.WrapStore(ctx, st.GetStore()), pactor) + if err != nil { + return big.Zero(), fmt.Errorf("failed to load power state: %v", err) + } + + return pst.TotalLocked() +} diff --git a/pkg/chain/circulating_supply_mock.go b/pkg/chain/circulating_supply_mock.go new file mode 100644 index 0000000000..f5e02288c4 --- /dev/null +++ b/pkg/chain/circulating_supply_mock.go @@ -0,0 +1,32 @@ +package chain + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var _ ICirculatingSupplyCalcualtor = (*MockCirculatingSupplyCalculator)(nil) + +type MockCirculatingSupplyCalculator struct{} + +func NewMockCirculatingSupplyCalculator() ICirculatingSupplyCalcualtor { + return &MockCirculatingSupplyCalculator{} +} + +func (m MockCirculatingSupplyCalculator) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st tree.Tree) (types.CirculatingSupply, error) { + return types.CirculatingSupply{ + FilVested: abi.TokenAmount{}, + FilMined: abi.TokenAmount{}, + FilBurnt: abi.TokenAmount{}, + FilLocked: abi.TokenAmount{}, + FilCirculating: abi.TokenAmount{}, + FilReserveDisbursed: abi.TokenAmount{}, + }, nil +} + +func (m MockCirculatingSupplyCalculator) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) { + return abi.TokenAmount{}, nil +} diff --git a/pkg/chain/coalescer.go b/pkg/chain/coalescer.go new file mode 100644 index 0000000000..7869345c97 --- /dev/null +++ b/pkg/chain/coalescer.go @@ -0,0 +1,219 @@ +package chain + +import ( + "context" + "time" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer. +// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will +// +// wait for that long to coalesce more head changes. +// +// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change +// +// more than that. +// +// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was +// +// within the merge interval when the coalesce timer fires, then the coalesce time is extended +// by min delay and up to max delay total. +func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee { + c := NewHeadChangeCoalescer(fn, minDelay, maxDelay, mergeInterval) + return c.HeadChange +} + +// HeadChangeCoalescer is a stateful reorg notifee which coalesces incoming head changes +// with pending head changes to reduce state computations from head change notifications. +type HeadChangeCoalescer struct { + notify ReorgNotifee + + ctx context.Context + cancel func() + + eventq chan headChange + + revert []*types.TipSet + apply []*types.TipSet +} + +type headChange struct { + revert, apply []*types.TipSet +} + +// NewHeadChangeCoalescer creates a HeadChangeCoalescer. +func NewHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) *HeadChangeCoalescer { + ctx, cancel := context.WithCancel(context.Background()) + c := &HeadChangeCoalescer{ + notify: fn, + ctx: ctx, + cancel: cancel, + eventq: make(chan headChange), + } + + go c.background(minDelay, maxDelay, mergeInterval) + + return c +} + +// HeadChange is the ReorgNotifee callback for the stateful coalescer; it receives an incoming +// head change and schedules dispatch of a coalesced head change in the background. +func (c *HeadChangeCoalescer) HeadChange(revert, apply []*types.TipSet) error { + select { + case c.eventq <- headChange{revert: revert, apply: apply}: + return nil + case <-c.ctx.Done(): + return c.ctx.Err() + } +} + +// Close closes the coalescer and cancels the background dispatch goroutine. +// Any further notification will result in an error. +func (c *HeadChangeCoalescer) Close() error { + select { + case <-c.ctx.Done(): + default: + c.cancel() + } + + return nil +} + +// Implementation details + +func (c *HeadChangeCoalescer) background(minDelay, maxDelay, mergeInterval time.Duration) { + var timerC <-chan time.Time + var first, last time.Time + + for { + select { + case evt := <-c.eventq: + c.coalesce(evt.revert, evt.apply) + + now := time.Now() + last = now + if first.IsZero() { + first = now + } + + if timerC == nil { + timerC = time.After(minDelay) + } + + case now := <-timerC: + sinceFirst := now.Sub(first) + sinceLast := now.Sub(last) + + if sinceLast < mergeInterval && sinceFirst < maxDelay { + // coalesce some more + maxWait := maxDelay - sinceFirst + wait := minDelay + if maxWait < wait { + wait = maxWait + } + + timerC = time.After(wait) + } else { + // dispatch + c.dispatch() + + first = time.Time{} + last = time.Time{} + timerC = nil + } + + case <-c.ctx.Done(): + if c.revert != nil || c.apply != nil { + c.dispatch() + } + return + } + } +} + +func (c *HeadChangeCoalescer) coalesce(revert, apply []*types.TipSet) { + // newly reverted tipsets cancel out with pending applys. + // similarly, newly applied tipsets cancel out with pending reverts. + + // pending tipsets + pendRevert := make(map[string]struct{}, len(c.revert)) + for _, ts := range c.revert { + pendRevert[ts.Key().String()] = struct{}{} + } + + pendApply := make(map[string]struct{}, len(c.apply)) + for _, ts := range c.apply { + pendApply[ts.Key().String()] = struct{}{} + } + + // incoming tipsets + reverting := make(map[string]struct{}, len(revert)) + for _, ts := range revert { + reverting[ts.Key().String()] = struct{}{} + } + + applying := make(map[string]struct{}, len(apply)) + for _, ts := range apply { + applying[ts.Key().String()] = struct{}{} + } + + // coalesced revert set + // - pending reverts are cancelled by incoming applys + // - incoming reverts are cancelled by pending applys + newRevert := make([]*types.TipSet, 0, len(c.revert)+len(revert)) + for _, ts := range c.revert { + _, cancel := applying[ts.Key().String()] + if cancel { + continue + } + + newRevert = append(newRevert, ts) + } + + for _, ts := range revert { + _, cancel := pendApply[ts.Key().String()] + if cancel { + continue + } + + newRevert = append(newRevert, ts) + } + + // coalesced apply set + // - pending applys are cancelled by incoming reverts + // - incoming applys are cancelled by pending reverts + newApply := make([]*types.TipSet, 0, len(c.apply)+len(apply)) + for _, ts := range c.apply { + _, cancel := reverting[ts.Key().String()] + if cancel { + continue + } + + newApply = append(newApply, ts) + } + + for _, ts := range apply { + _, cancel := pendRevert[ts.Key().String()] + if cancel { + continue + } + + newApply = append(newApply, ts) + } + + // commit the coalesced sets + c.revert = newRevert + c.apply = newApply +} + +func (c *HeadChangeCoalescer) dispatch() { + err := c.notify(c.revert, c.apply) + if err != nil { + log.Errorf("error dispatching coalesced head change notification: %s", err) + } + + c.revert = nil + c.apply = nil +} diff --git a/pkg/chain/coalescer_test.go b/pkg/chain/coalescer_test.go new file mode 100644 index 0000000000..92179db6ce --- /dev/null +++ b/pkg/chain/coalescer_test.go @@ -0,0 +1,143 @@ +package chain + +import ( + "fmt" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + tbig "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func mkAddress(i uint64) address.Address { + a, err := address.NewIDAddress(i) + if err != nil { + panic(err) + } + return a +} + +func mkBlock(parents *types.TipSet, weightInc int64, ticketNonce uint64) *types.BlockHeader { + addr := mkAddress(123561) + + c, err := cid.Decode("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") + if err != nil { + panic(err) + } + + pstateRoot := c + if parents != nil { + pstateRoot = parents.Blocks()[0].ParentStateRoot + } + + var height abi.ChainEpoch + var tsKey types.TipSetKey + weight := tbig.NewInt(weightInc) + var timestamp uint64 + if parents != nil { + height = parents.Height() + height = height + 1 + timestamp = parents.MinTimestamp() + constants.MainNetBlockDelaySecs + weight = tbig.Add(parents.Blocks()[0].ParentWeight, weight) + tsKey = parents.Key() + } + + return &types.BlockHeader{ + Miner: addr, + ElectionProof: &types.ElectionProof{ + VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), + }, + Ticket: &types.Ticket{ + VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), + }, + Parents: tsKey.Cids(), + ParentMessageReceipts: c, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, + ParentWeight: weight, + Messages: c, + Height: height, + Timestamp: timestamp, + ParentStateRoot: pstateRoot, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, + ParentBaseFee: tbig.NewInt(int64(constants.MinimumBaseFee)), + } +} + +func mkTipSet(blks ...*types.BlockHeader) *types.TipSet { + ts, err := types.NewTipSet(blks) + if err != nil { + panic(err) + } + return ts +} + +func TestHeadChangeCoalescer(t *testing.T) { + tf.UnitTest(t) + + notif := make(chan headChange, 1) + c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error { + notif <- headChange{apply: apply, revert: revert} + return nil + }, + 100*time.Millisecond, + 200*time.Millisecond, + 10*time.Millisecond, + ) + defer c.Close() //nolint + + b0 := mkBlock(nil, 0, 0) + root := mkTipSet(b0) + bA := mkBlock(root, 1, 1) + tA := mkTipSet(bA) + bB := mkBlock(root, 1, 2) + tB := mkTipSet(bB) + tAB := mkTipSet(bA, bB) + bC := mkBlock(root, 1, 3) + tABC := mkTipSet(bA, bB, bC) + bD := mkBlock(root, 1, 4) + tABCD := mkTipSet(bA, bB, bC, bD) + bE := mkBlock(root, 1, 5) + tABCDE := mkTipSet(bA, bB, bC, bD, bE) + + c.HeadChange(nil, []*types.TipSet{tA}) //nolint + c.HeadChange(nil, []*types.TipSet{tB}) //nolint + c.HeadChange([]*types.TipSet{tA, tB}, []*types.TipSet{tAB}) //nolint + c.HeadChange([]*types.TipSet{tAB}, []*types.TipSet{tABC}) //nolint + + change := <-notif + + if len(change.revert) != 0 { + t.Fatalf("expected empty revert set but got %d elements", len(change.revert)) + } + if len(change.apply) != 1 { + t.Fatalf("expected single element apply set but got %d elements", len(change.apply)) + } + if change.apply[0] != tABC { + t.Fatalf("expected to apply tABC") + } + + c.HeadChange([]*types.TipSet{tABC}, []*types.TipSet{tABCD}) //nolint + c.HeadChange([]*types.TipSet{tABCD}, []*types.TipSet{tABCDE}) //nolint + + change = <-notif + + if len(change.revert) != 1 { + t.Fatalf("expected single element revert set but got %d elements", len(change.revert)) + } + if change.revert[0] != tABC { + t.Fatalf("expected to revert tABC") + } + if len(change.apply) != 1 { + t.Fatalf("expected single element apply set but got %d elements", len(change.apply)) + } + if change.apply[0] != tABCDE { + t.Fatalf("expected to revert tABC") + } +} diff --git a/pkg/chain/message_store.go b/pkg/chain/message_store.go new file mode 100644 index 0000000000..e2ce296b27 --- /dev/null +++ b/pkg/chain/message_store.go @@ -0,0 +1,655 @@ +package chain + +import ( + "bytes" + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + cbor2 "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + blockstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" + + "github.com/pkg/errors" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/state/tree" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// MessageProvider is an interface exposing the load methods of the +// MessageStore. +type MessageProvider interface { + LoadTipSetMessage(ctx context.Context, ts *types.TipSet) ([]types.BlockMessagesInfo, error) + LoadMetaMessages(context.Context, cid.Cid) ([]*types.SignedMessage, []*types.Message, error) + ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) + LoadUnsignedMessagesFromCids(ctx context.Context, blsCids []cid.Cid) ([]*types.Message, error) + LoadSignedMessagesFromCids(ctx context.Context, secpCids []cid.Cid) ([]*types.SignedMessage, error) + LoadReceipts(context.Context, cid.Cid) ([]types.MessageReceipt, error) + LoadTxMeta(context.Context, cid.Cid) (types.MessageRoot, error) +} + +// MessageWriter is an interface exposing the write methods of the +// MessageStore. +type MessageWriter interface { + StoreMessages(ctx context.Context, secpMessages []*types.SignedMessage, blsMessages []*types.Message) (cid.Cid, error) + StoreReceipts(context.Context, []types.MessageReceipt) (cid.Cid, error) + StoreTxMeta(context.Context, types.MessageRoot) (cid.Cid, error) +} + +// MessageStore stores and loads collections of signed messages and receipts. +type MessageStore struct { + bs blockstoreutil.Blockstore + fkCfg *config.ForkUpgradeConfig +} + +// NewMessageStore creates and returns a new store +func NewMessageStore(bs blockstoreutil.Blockstore, fkCfg *config.ForkUpgradeConfig) *MessageStore { + return &MessageStore{bs: bs, fkCfg: fkCfg} +} + +// LoadMetaMessages loads the signed messages in the collection with cid c from ipld +// storage. +func (ms *MessageStore) LoadMetaMessages(ctx context.Context, metaCid cid.Cid) ([]*types.SignedMessage, []*types.Message, error) { + // load txmeta + meta, err := ms.LoadTxMeta(ctx, metaCid) + if err != nil { + return nil, nil, err + } + + secpCids, err := ms.loadAMTCids(ctx, meta.SecpkRoot) + if err != nil { + return nil, nil, err + } + + // load secp messages from cids + secpMsgs, err := ms.LoadSignedMessagesFromCids(ctx, secpCids) + if err != nil { + return nil, nil, err + } + + blsCids, err := ms.loadAMTCids(ctx, meta.BlsRoot) + if err != nil { + return nil, nil, err + } + + // load bls messages from cids + blsMsgs, err := ms.LoadUnsignedMessagesFromCids(ctx, blsCids) + if err != nil { + return nil, nil, err + } + + return secpMsgs, blsMsgs, nil +} + +// ReadMsgMetaCids load messager from message meta cid +func (ms *MessageStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { + meta, err := ms.LoadTxMeta(ctx, mmc) + if err != nil { + return nil, nil, err + } + + secpCids, err := ms.loadAMTCids(ctx, meta.SecpkRoot) + if err != nil { + return nil, nil, err + } + blsCids, err := ms.loadAMTCids(ctx, meta.BlsRoot) + if err != nil { + return nil, nil, err + } + return blsCids, secpCids, nil +} + +// LoadMessage load message of specify message cid +// First get the unsigned message. If it is not found, then get the signed message. If still not found, an error will be returned +func (ms *MessageStore) LoadMessage(ctx context.Context, mid cid.Cid) (types.ChainMsg, error) { + m, err := ms.LoadUnsignedMessage(ctx, mid) + if err == nil { + return m, nil + } + + if !ipld.IsNotFound(err) { + log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err) + } + + return ms.LoadSignedMessage(ctx, mid) +} + +// LoadUnsignedMessage load unsigned messages in tipset +func (ms *MessageStore) LoadUnsignedMessage(ctx context.Context, mid cid.Cid) (*types.Message, error) { + messageBlock, err := ms.bs.Get(ctx, mid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get bls message %s", mid) + } + message := &types.Message{} + if err := message.UnmarshalCBOR(bytes.NewReader(messageBlock.RawData())); err != nil { + return nil, errors.Wrapf(err, "could not decode bls message %s", mid) + } + return message, nil +} + +// LoadUnsignedMessagesFromCids load unsigned messages of cid array +func (ms *MessageStore) LoadSignedMessage(ctx context.Context, mid cid.Cid) (*types.SignedMessage, error) { + messageBlock, err := ms.bs.Get(ctx, mid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get bls message %s", mid) + } + + message := &types.SignedMessage{} + if err := message.UnmarshalCBOR(bytes.NewReader(messageBlock.RawData())); err != nil { + return nil, errors.Wrapf(err, "could not decode secp message %s", mid) + } + + return message, nil +} + +// LoadUnsignedMessagesFromCids load unsigned messages of cid array +func (ms *MessageStore) LoadUnsignedMessagesFromCids(ctx context.Context, blsCids []cid.Cid) ([]*types.Message, error) { + blsMsgs := make([]*types.Message, len(blsCids)) + for i, c := range blsCids { + message, err := ms.LoadUnsignedMessage(ctx, c) + if err != nil { + return nil, err + } + blsMsgs[i] = message + } + return blsMsgs, nil +} + +// LoadSignedMessagesFromCids load signed messages of cid array +func (ms *MessageStore) LoadSignedMessagesFromCids(ctx context.Context, secpCids []cid.Cid) ([]*types.SignedMessage, error) { + secpMsgs := make([]*types.SignedMessage, len(secpCids)) + for i, c := range secpCids { + message, err := ms.LoadSignedMessage(ctx, c) + if err != nil { + return nil, err + } + secpMsgs[i] = message + } + return secpMsgs, nil +} + +// StoreMessages puts the input signed messages to a collection and then writes +// this collection to ipld storage. The cid of the collection is returned. +func (ms *MessageStore) StoreMessages(ctx context.Context, secpMessages []*types.SignedMessage, blsMessages []*types.Message) (cid.Cid, error) { + var ret types.MessageRoot + var err error + + // store secp messages + as := cbor.NewCborStore(ms.bs) + secpMsgArr := adt.MakeEmptyArray(adt.WrapStore(ctx, as)) + for i, msg := range secpMessages { + secpCid, err := ms.StoreMessage(msg) + if err != nil { + return cid.Undef, errors.Wrap(err, "could not store secp messages") + } + err = secpMsgArr.Set(uint64(i), (*cbg.CborCid)(&secpCid)) + if err != nil { + return cid.Undef, errors.Wrap(err, "could not store secp messages cid") + } + } + + secpRaw, err := secpMsgArr.Root() + if err != nil { + return cid.Undef, errors.Wrap(err, "could not store secp cids as AMT") + } + ret.SecpkRoot = secpRaw + + // store bls messages + blsMsgArr := adt.MakeEmptyArray(adt.WrapStore(ctx, as)) + for i, msg := range blsMessages { + blsCid, err := ms.StoreMessage(msg) + if err != nil { + return cid.Undef, errors.Wrap(err, "could not store bls messages") + } + err = blsMsgArr.Set(uint64(i), (*cbg.CborCid)(&blsCid)) + if err != nil { + return cid.Undef, errors.Wrap(err, "could not store secp messages cid") + } + } + + blsRaw, err := blsMsgArr.Root() + if err != nil { + return cid.Undef, errors.Wrap(err, "could not store bls cids as AMT") + } + ret.BlsRoot = blsRaw + + return ms.StoreTxMeta(ctx, ret) +} + +// load message from tipset NOTICE skip message with the same nonce +func (ms *MessageStore) LoadTipSetMesssages(ctx context.Context, ts *types.TipSet) ([][]*types.SignedMessage, [][]*types.Message, error) { + var secpMessages [][]*types.SignedMessage + var blsMessages [][]*types.Message + + applied := make(map[address.Address]uint64) + + vms := cbor.NewCborStore(ms.bs) + st, err := tree.LoadState(ctx, vms, ts.Blocks()[0].ParentStateRoot) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to load state tree %s", ts.Blocks()[0].ParentStateRoot.String()) + } + + selectMsg := func(m *types.Message) (bool, error) { + var sender address.Address + if ts.Height() >= ms.fkCfg.UpgradeHyperdriveHeight { + sender, err = st.LookupID(m.From) + if err != nil { + return false, err + } + } else { + sender = m.From + } + + // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise + if _, ok := applied[sender]; !ok { + applied[sender] = m.Nonce + } + + if applied[sender] != m.Nonce { + return false, nil + } + + applied[sender]++ + + return true, nil + } + + for i := 0; i < ts.Len(); i++ { + blk := ts.At(i) + secpMsgs, blsMsgs, err := ms.LoadMetaMessages(ctx, blk.Messages) + if err != nil { + return nil, nil, errors.Wrapf(err, "syncing tip %s failed loading message list %s for block %s", ts.Key(), blk.Messages, blk.Cid()) + } + + var blksecpMessages []*types.SignedMessage + var blkblsMessages []*types.Message + + for _, msg := range blsMsgs { + b, err := selectMsg(msg) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to decide whether to select message for block") + } + if b { + blkblsMessages = append(blkblsMessages, msg) + } + } + + for _, msg := range secpMsgs { + b, err := selectMsg(&msg.Message) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to decide whether to select message for block") + } + if b { + blksecpMessages = append(blksecpMessages, msg) + } + } + + blsMessages = append(blsMessages, blkblsMessages) + secpMessages = append(secpMessages, blksecpMessages) + } + + return secpMessages, blsMessages, nil +} + +// LoadReceipts loads the signed messages in the collection with cid c from ipld +// storage and returns the slice implied by the collection +func (ms *MessageStore) LoadReceipts(ctx context.Context, c cid.Cid) ([]types.MessageReceipt, error) { + as := cbor.NewCborStore(ms.bs) + fmt.Println(c.String()) + a, err := adt.AsArray(adt.WrapStore(ctx, as), c) + if err != nil { + return nil, err + } + + receipts := make([]types.MessageReceipt, a.Length()) + for i := uint64(0); i < a.Length(); i++ { + var rec types.MessageReceipt + if found, err := a.Get(i, &rec); err != nil { + return nil, errors.Wrapf(err, "could not retrieve %d bytes from AMT", i) + } else if !found { + return nil, errors.Errorf("failed to find receipt %d", i) + } + receipts[i] = rec + } + return receipts, nil +} + +// StoreReceipts puts the input signed messages to a collection and then writes +// this collection to ipld storage. The cid of the collection is returned. +func (ms *MessageStore) StoreReceipts(ctx context.Context, receipts []types.MessageReceipt) (cid.Cid, error) { + tmp := blockstoreutil.NewTemporary() + rectarr := adt.MakeEmptyArray(adt.WrapStore(ctx, cbor.NewCborStore(tmp))) + + for i, receipt := range receipts { + if err := rectarr.Set(uint64(i), &receipt); err != nil { + return cid.Undef, errors.Wrap(err, "failed to build receipts amt") + } + } + + root, err := rectarr.Root() + if err != nil { + return cid.Undef, err + } + + err = blockstoreutil.CopyParticial(ctx, tmp, ms.bs, root) + if err != nil { + return cid.Undef, err + } + + return rectarr.Root() +} + +func (ms *MessageStore) loadAMTCids(ctx context.Context, c cid.Cid) ([]cid.Cid, error) { + as := cbor.NewCborStore(ms.bs) + a, err := adt.AsArray(adt.WrapStore(ctx, as), c) + if err != nil { + return []cid.Cid{}, err + } + + cids := make([]cid.Cid, a.Length()) + for i := uint64(0); i < a.Length(); i++ { + oc := cbg.CborCid(c) + if found, err := a.Get(i, &oc); err != nil { + return nil, errors.Wrapf(err, "could not retrieve %d cid from AMT", i) + } else if !found { + return nil, errors.Errorf("failed to find receipt %d", i) + } + + cids[i] = cid.Cid(oc) + } + + return cids, nil +} + +// LoadTxMeta loads the secproot, blsroot data from the message store +func (ms *MessageStore) LoadTxMeta(ctx context.Context, c cid.Cid) (types.MessageRoot, error) { + metaBlock, err := ms.bs.Get(ctx, c) + if err != nil { + return types.MessageRoot{}, errors.Wrapf(err, "failed to get tx meta %s", c) + } + + var meta types.MessageRoot + if err := meta.UnmarshalCBOR(bytes.NewReader(metaBlock.RawData())); err != nil { + return types.MessageRoot{}, errors.Wrapf(err, "could not decode tx meta %s", c) + } + return meta, nil +} + +// LoadTipSetMessage message from tipset NOTICE skip message with the same nonce +func (ms *MessageStore) LoadTipSetMessage(ctx context.Context, ts *types.TipSet) ([]types.BlockMessagesInfo, error) { + // gather message + applied := make(map[address.Address]uint64) + + vms := cbor.NewCborStore(ms.bs) + st, err := tree.LoadState(ctx, vms, ts.Blocks()[0].ParentStateRoot) + if err != nil { + return nil, errors.Errorf("failed to load state tree") + } + + selectMsg := func(m *types.Message) (bool, error) { + var sender address.Address + if ts.Height() >= ms.fkCfg.UpgradeHyperdriveHeight { + sender, err = st.LookupID(m.From) + if err != nil { + return false, err + } + } else { + sender = m.From + } + + // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise + if _, ok := applied[sender]; !ok { + applied[sender] = m.Nonce + } + + if applied[sender] != m.Nonce { + return false, nil + } + + applied[sender]++ + + return true, nil + } + + var blockMsg []types.BlockMessagesInfo + for i := 0; i < ts.Len(); i++ { + blk := ts.At(i) + secpMsgs, blsMsgs, err := ms.LoadMetaMessages(ctx, blk.Messages) // Corresponding to MessagesForBlock of lotus + if err != nil { + return nil, errors.Wrapf(err, "syncing tip %s failed loading message list %s for block %s", ts.Key(), blk.Messages, blk.Cid()) + } + + sBlsMsg := make([]types.ChainMsg, 0, len(blsMsgs)) + sSecpMsg := make([]types.ChainMsg, 0, len(secpMsgs)) + for _, msg := range blsMsgs { + b, err := selectMsg(msg) + if err != nil { + return nil, errors.Wrap(err, "failed to decide whether to select message for block") + } + if b { + sBlsMsg = append(sBlsMsg, msg) + } + } + for _, msg := range secpMsgs { + b, err := selectMsg(&msg.Message) + if err != nil { + return nil, errors.Wrap(err, "failed to decide whether to select message for block") + } + if b { + sSecpMsg = append(sSecpMsg, msg) + } + } + + blockMsg = append(blockMsg, types.BlockMessagesInfo{ + BlsMessages: sBlsMsg, + SecpkMessages: sSecpMsg, + Block: blk, + }) + } + + return blockMsg, nil +} + +// MessagesForTipset return of message ( bls message + secp message) of tipset +func (ms *MessageStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { + bmsgs, err := ms.LoadTipSetMessage(context.TODO(), ts) + if err != nil { + return nil, err + } + + var out []types.ChainMsg + for _, bm := range bmsgs { + out = append(out, bm.BlsMessages...) + out = append(out, bm.SecpkMessages...) + } + + return out, nil +} + +// StoreMessage put message(include signed message and unsigned message) to database +func (ms *MessageStore) StoreMessage(message types.ChainMsg) (cid.Cid, error) { + return cbor.NewCborStore(ms.bs).Put(context.TODO(), message) +} + +// StoreTxMeta writes the secproot, blsroot block to the message store +func (ms *MessageStore) StoreTxMeta(ctx context.Context, meta types.MessageRoot) (cid.Cid, error) { + return cbor.NewCborStore(ms.bs).Put(ctx, &meta) +} + +func MakeBlock(obj cbor2.Marshaler) (blocks.Block, error) { + buf := new(bytes.Buffer) + err := obj.MarshalCBOR(buf) + if err != nil { + return nil, err + } + data := buf.Bytes() + c, err := constants.DefaultCidBuilder.Sum(data) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) +} + +// todo move to a more suitable position +func ComputeNextBaseFee(baseFee abi.TokenAmount, gasLimitUsed int64, noOfBlocks int, epoch abi.ChainEpoch, upgrade *config.ForkUpgradeConfig) abi.TokenAmount { + // deta := gasLimitUsed/noOfBlocks - constants.BlockGasTarget + // change := baseFee * deta / BlockGasTarget + // nextBaseFee = baseFee + change + // nextBaseFee = max(nextBaseFee, constants.MinimumBaseFee) + + var delta int64 + if epoch > upgrade.UpgradeSmokeHeight { + delta = gasLimitUsed / int64(noOfBlocks) + delta -= constants.BlockGasTarget + } else { + delta = constants.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * constants.PackingEfficiencyNum) + delta -= constants.BlockGasTarget + } + + // cap change at 12.5% (BaseFeeMaxChangeDenom) by capping delta + if delta > constants.BlockGasTarget { + delta = constants.BlockGasTarget + } + if delta < -constants.BlockGasTarget { + delta = -constants.BlockGasTarget + } + + change := big.Mul(baseFee, big.NewInt(delta)) + change = big.Div(change, big.NewInt(constants.BlockGasTarget)) + change = big.Div(change, big.NewInt(constants.BaseFeeMaxChangeDenom)) + + nextBaseFee := big.Add(baseFee, change) + if big.Cmp(nextBaseFee, big.NewInt(constants.MinimumBaseFee)) < 0 { + nextBaseFee = big.NewInt(constants.MinimumBaseFee) + } + return nextBaseFee +} + +// todo move to a more suitable position +func (ms *MessageStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet, upgrade *config.ForkUpgradeConfig) (abi.TokenAmount, error) { + zero := abi.NewTokenAmount(0) + baseHeight := ts.Height() + + if upgrade.UpgradeBreezeHeight >= 0 && baseHeight > upgrade.UpgradeBreezeHeight && baseHeight < upgrade.UpgradeBreezeHeight+upgrade.BreezeGasTampingDuration { + return abi.NewTokenAmount(100), nil + } + + // totalLimit is sum of GasLimits of unique messages in a tipset + totalLimit := int64(0) + + seen := make(map[cid.Cid]struct{}) + + for _, b := range ts.Blocks() { + secpMsgs, blsMsgs, err := ms.LoadMetaMessages(ctx, b.Messages) + if err != nil { + return zero, errors.Wrapf(err, "error getting messages for: %s", b.Cid()) + } + + for _, m := range blsMsgs { + c := m.Cid() + if _, ok := seen[c]; !ok { + totalLimit += m.GasLimit + seen[c] = struct{}{} + } + } + for _, m := range secpMsgs { + c := m.Cid() + if _, ok := seen[c]; !ok { + totalLimit += m.Message.GasLimit + seen[c] = struct{}{} + } + } + } + + parentBaseFee := ts.Blocks()[0].ParentBaseFee + + return ComputeNextBaseFee(parentBaseFee, totalLimit, len(ts.Blocks()), baseHeight, upgrade), nil +} + +func GetReceiptRoot(receipts []types.MessageReceipt) (cid.Cid, error) { + bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) + as := cbor.NewCborStore(bs) + rectarr := adt.MakeEmptyArray(adt.WrapStore(context.TODO(), as)) + for i, receipt := range receipts { + if err := rectarr.Set(uint64(i), &receipt); err != nil { + return cid.Undef, errors.Wrapf(err, "failed to build receipts amt") + } + } + return rectarr.Root() +} + +func GetChainMsgRoot(ctx context.Context, messages []types.ChainMsg) (cid.Cid, error) { + tmpbs := blockstoreutil.NewTemporary() + tmpstore := adt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) + + arr := adt.MakeEmptyArray(tmpstore) + + for i, m := range messages { + b, err := m.ToStorageBlock() + if err != nil { + return cid.Undef, err + } + k := cbg.CborCid(b.Cid()) + if err := arr.Set(uint64(i), &k); err != nil { + return cid.Undef, errors.Wrap(err, "failed to put message") + } + } + + return arr.Root() +} + +// computeMsgMeta computes the root CID of the combined arrays of message CIDs +// of both types (BLS and Secpk). +func ComputeMsgMeta(bs blockstore.Blockstore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) { + // block headers use adt0 + store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) + bmArr := adt.MakeEmptyArray(store) + smArr := adt.MakeEmptyArray(store) + + for i, m := range bmsgCids { + c := cbg.CborCid(m) + if err := bmArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + + for i, m := range smsgCids { + c := cbg.CborCid(m) + if err := smArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + + bmroot, err := bmArr.Root() + if err != nil { + return cid.Undef, err + } + + smroot, err := smArr.Root() + if err != nil { + return cid.Undef, err + } + + mrcid, err := store.Put(store.Context(), &types.MessageRoot{ + BlsRoot: bmroot, + SecpkRoot: smroot, + }) + if err != nil { + return cid.Undef, errors.Wrap(err, "failed to put msgmeta") + } + + return mrcid, nil +} diff --git a/pkg/chain/message_store_test.go b/pkg/chain/message_store_test.go new file mode 100644 index 0000000000..799e1d2b38 --- /dev/null +++ b/pkg/chain/message_store_test.go @@ -0,0 +1,231 @@ +// stm: #unit +package chain_test + +import ( + "context" + "io" + "testing" + + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/testhelpers" + "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-datastore" + blockstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type cborString string + +func (cs cborString) MarshalCBOR(w io.Writer) error { + cw := cbg.NewCborWriter(w) + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(cs))); err != nil { + return err + } + _, err := io.WriteString(w, string(cs)) + return err +} + +func (cs *cborString) UnmarshalCBOR(r io.Reader) error { + sval, err := cbg.ReadString(cbg.NewCborReader(r)) + if err != nil { + return err + } + *cs = cborString(sval) + return nil +} + +func TestMessageStoreMessagesHappy(t *testing.T) { + testflags.UnitTest(t) + ctx := context.Background() + keys := testhelpers.MustGenerateKeyInfo(2, 42) + mm := testhelpers.NewMessageMaker(t, keys) + + alice := mm.Addresses()[0] + bob := mm.Addresses()[1] + + signedMsgs := []*types.SignedMessage{ + mm.NewSignedMessage(alice, 0), + mm.NewSignedMessage(alice, 1), + mm.NewSignedMessage(bob, 0), + mm.NewSignedMessage(alice, 2), + mm.NewSignedMessage(alice, 3), + mm.NewSignedMessage(bob, 1), + mm.NewSignedMessage(alice, 4), + mm.NewSignedMessage(bob, 2), + } + + unsignedMsgs := []*types.Message{ + mm.NewUnsignedMessage(alice, 4), + } + + bs := blockstoreutil.Adapt(blockstore.NewBlockstore(datastore.NewMapDatastore())) + ms := chain.NewMessageStore(bs, config.DefaultForkUpgradeParam) + // stm: @CHAIN_MESSAGE_STORE_MESSAGES_001 + msgsCid, err := ms.StoreMessages(ctx, signedMsgs, unsignedMsgs) + assert.NoError(t, err) + + // stm: @CHAIN_MESSAGE_STORE_LOAD_META_001, @CHAIN_MESSAGE_LOAD_SIGNED_FROM_CIDS_001, @CHAIN_MESSAGE_LOAD_UNSIGNED_FROM_CIDS_001 + rtMsgs, _, err := ms.LoadMetaMessages(ctx, msgsCid) + assert.NoError(t, err) + assert.Equal(t, signedMsgs, rtMsgs) + + // stm: @CHAIN_MESSAGE_READ_META_CID_001 + _, _, err = ms.ReadMsgMetaCids(ctx, msgsCid) + assert.NoError(t, err) + + var notFoundCID cid.Cid + testutil.Provide(t, ¬FoundCID) + + { + meta, err := ms.LoadTxMeta(ctx, msgsCid) + require.NoError(t, err) + + as := cbor.NewCborStore(bs) + + var goodCid = signedMsgs[0].Cid() + + secpMsgArr := adt.MakeEmptyArray(adt.WrapStore(ctx, as)) + assert.NoError(t, secpMsgArr.Set(0, (*cbg.CborCid)(&goodCid))) + blsMsgArr := adt.MakeEmptyArray(adt.WrapStore(ctx, as)) + assert.NoError(t, blsMsgArr.Set(0, (*cbg.CborCid)(¬FoundCID))) + meta.SecpkRoot, err = secpMsgArr.Root() + assert.NoError(t, err) + meta.BlsRoot, err = blsMsgArr.Root() + assert.NoError(t, err) + + // store a 'bad' message meta with bls(unsigned) message can't be found. + metaCid, err := ms.StoreTxMeta(ctx, meta) + require.NoError(t, err) + + // error occurs while load unsigned messages of cid array + // stm: @CHAIN_MESSAGE_STORE_LOAD_META_005, @CHAIN_MESSAGE_STORE_LOAD_META_002 + _, _, err = ms.LoadMetaMessages(ctx, metaCid) + require.Error(t, err) + + // store a 'bad' message meta with bad root of 'AMTCIDs' + meta.BlsRoot = notFoundCID + metaCid, err = ms.StoreTxMeta(ctx, meta) + require.NoError(t, err) + + // stm: @CHAIN_MESSAGE_STORE_LOAD_META_003 + _, _, err = ms.LoadMetaMessages(ctx, metaCid) + assert.Error(t, err) + + // stm: @CHAIN_MESSAGE_READ_META_CID_002, @CHAIN_MESSAGE_READ_META_CID_003 + _, _, err = ms.ReadMsgMetaCids(ctx, metaCid) + require.Error(t, err) + + // store a 'bad' message meta with secp(signed) message can't be found. + assert.NoError(t, secpMsgArr.Set(uint64(1), (*cbg.CborCid)(¬FoundCID))) + meta.SecpkRoot, err = secpMsgArr.Root() + require.NoError(t, err) + metaCid, err = ms.StoreTxMeta(ctx, meta) + require.NoError(t, err) + + // error occurs while loading signed messages of cir array + // stm: @CHAIN_MESSAGE_STORE_LOAD_META_004, @CHAIN_MESSAGE_LOAD_SIGNED_003, @CHAIN_MESSAGE_LOAD_SIGNED_FROM_CIDS_002 + _, _, err = ms.LoadMetaMessages(ctx, metaCid) + assert.Error(t, err) + } +} + +func TestMessageStoreReceiptsHappy(t *testing.T) { + ctx := context.Background() + mr := testhelpers.NewReceiptMaker() + + receipts := []types.MessageReceipt{ + mr.NewReceipt(), + mr.NewReceipt(), + mr.NewReceipt(), + } + + bs := blockstoreutil.Adapt(blockstore.NewBlockstore(datastore.NewMapDatastore())) + ms := chain.NewMessageStore(bs, config.DefaultForkUpgradeParam) + receiptCids, err := ms.StoreReceipts(ctx, receipts) + assert.NoError(t, err) + + // stm: @CHAIN_MESSAGE_LOAD_RECEIPTS_001 + rtReceipts, err := ms.LoadReceipts(ctx, receiptCids) + assert.NoError(t, err) + assert.Equal(t, receipts, rtReceipts) + + var badReceiptID cid.Cid + // stm: CHAIN_MESSAGE_LOAD_RECEIPTS_002 + _, err = ms.LoadReceipts(ctx, badReceiptID) + assert.Error(t, err) + + rectArr := adt.MakeEmptyArray(adt.WrapStore(ctx, cbor.NewCborStore(bs))) + assert.NoError(t, rectArr.Set(0, cborString("invalid receipt data"))) + + badReceiptID, err = rectArr.Root() + assert.NoError(t, err) + + // expect unmarshal to receipt failed + // stm: @CHAIN_MESSAGE_LOAD_RECEIPTS_003 + _, err = ms.LoadReceipts(ctx, badReceiptID) + assert.Error(t, err) +} + +func TestMessageStoreLoadMessage(t *testing.T) { + testflags.UnitTest(t) + ctx := context.Background() + keys := testhelpers.MustGenerateKeyInfo(2, 42) + mm := testhelpers.NewMessageMaker(t, keys) + + alice := mm.Addresses()[0] + bob := mm.Addresses()[1] + + signedMsgs := []*types.SignedMessage{ + mm.NewSignedMessage(alice, 0), + mm.NewSignedMessage(bob, 0), + } + + unsignedMsgs := []*types.Message{ + mm.NewUnsignedMessage(alice, 4), + } + + bs := blockstoreutil.Adapt(blockstore.NewBlockstore(datastore.NewMapDatastore())) + ms := chain.NewMessageStore(bs, config.DefaultForkUpgradeParam) + _, err := ms.StoreMessages(ctx, signedMsgs, unsignedMsgs) + assert.NoError(t, err) + + var notFoundCID cid.Cid + testutil.Provide(t, ¬FoundCID) + + // stm @CHAIN_MESSAGE_LOAD_MESSAGE_001, @CHAIN_MESSAGE_LOAD_SIGNED_001 + _, err = ms.LoadMessage(ctx, signedMsgs[0].Cid()) + assert.NoError(t, err) + + // stm: @CHAIN_MESSAGE_LOAD_UNSIGNED_001 + _, err = ms.LoadUnsignedMessage(ctx, unsignedMsgs[0].Cid()) + assert.NoError(t, err) + + // put a message with un-cbor-Unmarshal-able message data. + badMsgCID, err := cbor.NewCborStore(bs).Put(ctx, (*cbg.CborCid)(¬FoundCID)) + assert.NoError(t, err) + + // Unmarshal to signed message failed error. + // stm: @CHAIN_MESSAGE_LOAD_SIGNED_003 + _, err = ms.LoadMessage(ctx, badMsgCID) + assert.Error(t, err) + + // If message is not found, return error; also getting message from blockstore failed in 'LoadUnsignedMessage' + // stm: @CHAIN_MESSAGE_LOAD_MESSAGE_003, @CHAIN_MESSAGE_LOAD_SIGNED_002, @CHAIN_MESSAGE_LOAD_UNSIGNED_002 + _, err = ms.LoadMessage(ctx, notFoundCID) + assert.Error(t, err) + + // Unmarshal to unsigned message failed error. + // stm: @CHAIN_MESSAGE_LOAD_UNSIGNED_003, @CHAIN_MESSAGE_LOAD_UNSIGNED_FROM_CIDS_002 + _, err = ms.LoadUnsignedMessagesFromCids(ctx, []cid.Cid{badMsgCID}) + assert.Error(t, err) +} diff --git a/pkg/chain/randomness.go b/pkg/chain/randomness.go new file mode 100644 index 0000000000..371141cbe3 --- /dev/null +++ b/pkg/chain/randomness.go @@ -0,0 +1,313 @@ +package chain + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math/rand" + + "github.com/filecoin-project/venus/pkg/beacon" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + "github.com/minio/blake2b-simd" + "github.com/pkg/errors" +) + +type RandomSeed []byte + +var _ RandomnessSource = (*GenesisRandomnessSource)(nil) + +// A sampler for use when computing genesis state (the state that the genesis block points to as parent state). +// There is no chain to sample a seed from. +type GenesisRandomnessSource struct { + vrf types.VRFPi +} + +func NewGenesisRandomnessSource(vrf types.VRFPi) *GenesisRandomnessSource { + return &GenesisRandomnessSource{vrf: vrf} +} + +func (g *GenesisRandomnessSource) ChainGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +func (g *GenesisRandomnessSource) ChainGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +func (g *GenesisRandomnessSource) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint + return out, nil +} + +func (g *GenesisRandomnessSource) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint + return out, nil +} + +func (g *GenesisRandomnessSource) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +func (g *GenesisRandomnessSource) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +func (g *GenesisRandomnessSource) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +// Computes a random seed from raw ticket bytes. +// A randomness seed is the VRF digest of the minimum ticket of the tipset at or before the requested epoch +func MakeRandomSeed(rawVRFProof types.VRFPi) (RandomSeed, error) { + digest := rawVRFProof.Digest() + return digest[:], nil +} + +///// GetRandomnessFromTickets derivation ///// + +// RandomnessSource provides randomness to actors. +type RandomnessSource interface { + GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) +} + +type TipSetByHeight interface { + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + GetTipSetByHeight(context.Context, *types.TipSet, abi.ChainEpoch, bool) (*types.TipSet, error) +} + +var _ RandomnessSource = (*ChainRandomnessSource)(nil) + +type NetworkVersionGetter func(context.Context, abi.ChainEpoch) network.Version + +// A randomness source that seeds computations with a sample drawn from a chain epoch. +type ChainRandomnessSource struct { //nolint + reader TipSetByHeight + head types.TipSetKey + beacon beacon.Schedule + networkVersionGetter NetworkVersionGetter +} + +func NewChainRandomnessSource(reader TipSetByHeight, head types.TipSetKey, beacon beacon.Schedule, networkVersionGetter NetworkVersionGetter) RandomnessSource { + return &ChainRandomnessSource{reader: reader, head: head, beacon: beacon, networkVersionGetter: networkVersionGetter} +} + +func (c *ChainRandomnessSource) GetBeaconRandomnessTipset(ctx context.Context, randEpoch abi.ChainEpoch, lookback bool) (*types.TipSet, error) { + ts, err := c.reader.GetTipSet(ctx, c.head) + if err != nil { + return nil, err + } + + if randEpoch > ts.Height() { + return nil, fmt.Errorf("cannot draw randomness from the future") + } + + searchHeight := randEpoch + if searchHeight < 0 { + searchHeight = 0 + } + + randTS, err := c.reader.GetTipSetByHeight(ctx, ts, searchHeight, lookback) + if err != nil { + return nil, err + } + return randTS, nil +} + +// Draws a ticket from the chain identified by `head` and the highest tipset with height <= `epoch`. +// If `head` is empty (as when processing the pre-genesis state or the genesis block), the seed derived from +// a fixed genesis ticket. +// Note that this may produce the same value for different, neighbouring epochs when the epoch references a round +// in which no blocks were produced (an empty tipset or "null block"). A caller desiring a unique see for each epoch +// should blend in some distinguishing value (such as the epoch itself) into a hash of this ticket. +func (c *ChainRandomnessSource) GetChainRandomness(ctx context.Context, epoch abi.ChainEpoch, lookback bool) (types.Ticket, error) { + if !c.head.IsEmpty() { + start, err := c.reader.GetTipSet(ctx, c.head) + if err != nil { + return types.Ticket{}, err + } + + if epoch > start.Height() { + return types.Ticket{}, fmt.Errorf("cannot draw randomness from the future") + } + + searchHeight := epoch + if searchHeight < 0 { + searchHeight = 0 + } + + // Note: it is not an error to have epoch > start.Height(); in the case of a run of null blocks the + // sought-after height may be after the base (last non-empty) tipset. + // It's also not an error for the requested epoch to be negative. + tip, err := c.reader.GetTipSetByHeight(ctx, start, searchHeight, lookback) + if err != nil { + return types.Ticket{}, err + } + return *tip.MinTicket(), nil + } + return types.Ticket{}, fmt.Errorf("cannot get ticket for empty tipset") +} + +// network v0-12 +func (c *ChainRandomnessSource) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + ticket, err := c.GetChainRandomness(ctx, round, true) + if err != nil { + return nil, err + } + // if at (or just past -- for null epochs) appropriate epoch + // or at genesis (works for negative epochs) + return DrawRandomness(ticket.VRFProof, pers, round, entropy) +} + +// network v13 and on +func (c *ChainRandomnessSource) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + ticket, err := c.GetChainRandomness(ctx, round, false) + if err != nil { + return nil, err + } + // if at (or just past -- for null epochs) appropriate epoch + // or at genesis (works for negative epochs) + return DrawRandomness(ticket.VRFProof, pers, round, entropy) +} + +// network v0-12 +func (c *ChainRandomnessSource) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + randTS, err := c.GetBeaconRandomnessTipset(ctx, round, true) + if err != nil { + return nil, err + } + + be, err := FindLatestDRAND(ctx, randTS, c.reader) + if err != nil { + return nil, err + } + + // if at (or just past -- for null epochs) appropriate epoch + // or at genesis (works for negative epochs) + return DrawRandomness(be.Data, pers, round, entropy) +} + +// network v13 +func (c *ChainRandomnessSource) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + randTS, err := c.GetBeaconRandomnessTipset(ctx, round, false) + if err != nil { + return nil, err + } + + be, err := FindLatestDRAND(ctx, randTS, c.reader) + if err != nil { + return nil, err + } + + // if at (or just past -- for null epochs) appropriate epoch + // or at genesis (works for negative epochs) + return DrawRandomness(be.Data, pers, round, entropy) +} + +// network v14 and on +func (c *ChainRandomnessSource) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + if filecoinEpoch < 0 { + return c.GetBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) + } + + be, err := c.extractBeaconEntryForEpoch(ctx, filecoinEpoch) + if err != nil { + log.Errorf("failed to get beacon entry as expected: %s", err) + return nil, err + } + + return DrawRandomness(be.Data, pers, filecoinEpoch, entropy) +} + +func (c *ChainRandomnessSource) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpoch abi.ChainEpoch) (*types.BeaconEntry, error) { + randTS, err := c.GetBeaconRandomnessTipset(ctx, filecoinEpoch, false) + if err != nil { + return nil, err + } + + nv := c.networkVersionGetter(ctx, filecoinEpoch) + + round := c.beacon.BeaconForEpoch(filecoinEpoch).MaxBeaconRoundForEpoch(nv, filecoinEpoch) + + for i := 0; i < 20; i++ { + cbe := randTS.Blocks()[0].BeaconEntries + for _, v := range cbe { + if v.Round == round { + return &v, nil + } + } + + next, err := c.reader.GetTipSet(ctx, randTS.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to load parents when searching back for beacon entry: %w", err) + } + + randTS = next + } + + return nil, fmt.Errorf("didn't find beacon for round %d (epoch %d)", round, filecoinEpoch) +} + +// BlendEntropy get randomness with chain value. sha256(buf(tag, seed, epoch, entropy)) +func BlendEntropy(tag crypto.DomainSeparationTag, seed RandomSeed, epoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + buffer := bytes.Buffer{} + err := binary.Write(&buffer, binary.BigEndian, int64(tag)) + if err != nil { + return nil, errors.Wrap(err, "failed to write tag for randomness") + } + _, err = buffer.Write(seed) + if err != nil { + return nil, errors.Wrap(err, "failed to write seed for randomness") + } + err = binary.Write(&buffer, binary.BigEndian, int64(epoch)) + if err != nil { + return nil, errors.Wrap(err, "failed to write epoch for randomness") + } + _, err = buffer.Write(entropy) + if err != nil { + return nil, errors.Wrap(err, "failed to write entropy for randomness") + } + bufHash := blake2b.Sum256(buffer.Bytes()) + return bufHash[:], nil +} + +func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + h := blake2b.New256() + if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil { + return nil, fmt.Errorf("deriving randomness: %s", err) + } + VRFDigest := blake2b.Sum256(rbase) + _, err := h.Write(VRFDigest[:]) + if err != nil { + return nil, fmt.Errorf("hashing VRFDigest: %s", err) + } + if err := binary.Write(h, binary.BigEndian, round); err != nil { + return nil, fmt.Errorf("deriving randomness: %s", err) + } + _, err = h.Write(entropy) + if err != nil { + return nil, fmt.Errorf("hashing entropy: %s", err) + } + + return h.Sum(nil), nil +} diff --git a/pkg/chain/reorg.go b/pkg/chain/reorg.go new file mode 100644 index 0000000000..ec7e7785b4 --- /dev/null +++ b/pkg/chain/reorg.go @@ -0,0 +1,32 @@ +package chain + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/pkg/errors" +) + +// IsReorg determines if choosing the end of the newChain as the new head +// would cause a "reorg" given the current head is at curHead. +// A reorg occurs when the old head is not a member of the new chain AND the +// old head is not a subset of the new head. +func IsReorg(old, new, commonAncestor *types.TipSet) bool { + oldSortedSet := old.Key() + newSortedSet := new.Key() + + return !(&newSortedSet).ContainsAll(oldSortedSet) && !commonAncestor.Equals(old) +} + +// ReorgDiff returns the dropped and added block heights resulting from the +// reorg given the old and new heads and their common ancestor. +func ReorgDiff(old, new, commonAncestor *types.TipSet) (abi.ChainEpoch, abi.ChainEpoch, error) { + hOld := old.Height() + hNew := new.Height() + hCommon := commonAncestor.Height() + + if hCommon > hOld || hCommon > hNew { + return 0, 0, errors.New("invalid common ancestor") + } + + return hOld - hCommon, hNew - hCommon, nil +} diff --git a/pkg/chain/reorg_test.go b/pkg/chain/reorg_test.go new file mode 100644 index 0000000000..65577815f4 --- /dev/null +++ b/pkg/chain/reorg_test.go @@ -0,0 +1,99 @@ +package chain_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/venus/pkg/chain" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func TestIsReorgFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + + // main chain has 3 blocks past CA, fork has 1 + old, new, common := getForkOldNewCommon(ctx, t, builder, 2, 3, 1) + assert.True(t, chain.IsReorg(old, new, common)) +} + +func TestIsReorgPrefix(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + // Old head is a direct ancestor of new head + old, new, common := getForkOldNewCommon(ctx, t, builder, 2, 3, 0) + assert.False(t, chain.IsReorg(old, new, common)) +} + +func TestIsReorgSubset(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + old, new, common := getSubsetOldNewCommon(ctx, t, builder, 2) + assert.False(t, chain.IsReorg(old, new, common)) +} + +func TestReorgDiffFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + // main chain has 11 blocks past CA, fork has 10 + old, new, common := getForkOldNewCommon(ctx, t, builder, 10, 11, 10) + + dropped, added, err := chain.ReorgDiff(old, new, common) + assert.NoError(t, err) + assert.Equal(t, abi.ChainEpoch(10), dropped) + assert.Equal(t, abi.ChainEpoch(11), added) +} + +func TestReorgDiffSubset(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + old, new, common := getSubsetOldNewCommon(ctx, t, builder, 10) + + dropped, added, err := chain.ReorgDiff(old, new, common) + assert.NoError(t, err) + assert.Equal(t, abi.ChainEpoch(1), dropped) + assert.Equal(t, abi.ChainEpoch(1), added) +} + +// getForkOldNewCommon is a testing helper function that creates chain with the builder. +// The blockchain forks and the common ancestor block is 'a' (> 0) blocks after the genesis block. +// The main chain has an additional 'b' blocks, the fork has an additional 'c' blocks. +// This function returns the forked head, the main head and the common ancestor. +func getForkOldNewCommon(ctx context.Context, t *testing.T, builder *chain.Builder, a, b, c int) (*types.TipSet, *types.TipSet, *types.TipSet) { + // Add "a" tipsets to the head of the chainStore. + commonHead := builder.AppendManyOn(ctx, a, types.UndefTipSet) + oldHead := commonHead + + if c > 0 { + oldHead = builder.AppendManyOn(ctx, c, commonHead) + } + newHead := builder.AppendManyOn(ctx, b, commonHead) + return oldHead, newHead, commonHead +} + +// getSubsetOldNewCommon is a testing helper function that creates and stores +// a blockchain in the chainStore. The blockchain has 'a' blocks after genesis +// and then a fork. The forked head has a single block and the main chain +// consists of this single block and another block together forming a tipset +// that is a superset of the forked head. +func getSubsetOldNewCommon(ctx context.Context, t *testing.T, builder *chain.Builder, a int) (*types.TipSet, *types.TipSet, *types.TipSet) { + commonHead := builder.AppendManyBlocksOnBlocks(ctx, a) + block1 := builder.AppendBlockOnBlocks(ctx, commonHead) + block2 := builder.AppendBlockOnBlocks(ctx, commonHead) + + oldHead := testhelpers.RequireNewTipSet(t, block1) + superset := testhelpers.RequireNewTipSet(t, block1, block2) + return oldHead, superset, testhelpers.RequireNewTipSet(t, commonHead) +} diff --git a/pkg/chain/store.go b/pkg/chain/store.go new file mode 100644 index 0000000000..564b8e94f8 --- /dev/null +++ b/pkg/chain/store.go @@ -0,0 +1,1378 @@ +package chain + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "runtime/debug" + "sync" + + "github.com/filecoin-project/pubsub" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + lru "github.com/hashicorp/golang-lru" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + blockstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-car" + carutil "github.com/ipld/go-car/util" + carv2 "github.com/ipld/go-car/v2" + mh "github.com/multiformats/go-multihash" + "github.com/pkg/errors" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/trace" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/metrics/tracing" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/util" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + _init "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/multisig" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// ErrNoMethod is returned by Get when there is no method signature (eg, transfer). +var ErrNoMethod = errors.New("no method") + +// ErrNoActorImpl is returned by Get when the actor implementation doesn't exist, eg +// the actor address is an empty actor, an address that has received a transfer of FIL +// but hasn't yet been upgraded to an account actor. (The actor implementation might +// also genuinely be missing, which is not expected.) +var ErrNoActorImpl = errors.New("no actor implementation") + +// GenesisKey is the key at which the genesis Cid is written in the datastore. +var GenesisKey = datastore.NewKey("/consensus/genesisCid") + +var log = logging.Logger("chain.store") + +// HeadKey is the key at which the head tipset cid's are written in the datastore. +var HeadKey = datastore.NewKey("/chain/heaviestTipSet") + +var ErrNotifeeDone = errors.New("notifee is done and should be removed") + +type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error) + +// ReorgNotifee represents a callback that gets called upon reorgs. +type ReorgNotifee func(rev, app []*types.TipSet) error + +var DefaultTipsetLruCacheSize = 10000 + +type reorg struct { + old []*types.TipSet + new []*types.TipSet +} + +// CheckPoint is the key which the check-point written in the datastore. +var CheckPoint = datastore.NewKey("/chain/checkPoint") + +// TSState export this func is just for gen cbor tool to work +type TSState struct { + StateRoot cid.Cid + Receipts cid.Cid +} + +func ActorStore(ctx context.Context, bs blockstore.Blockstore) adt.Store { + return adt.WrapStore(ctx, cbor.NewCborStore(bs)) +} + +// Store is a generic implementation of the Store interface. +// It works(tm) for now. +type Store struct { + // ipldSource is a wrapper around ipld storage. It is used + // for reading filecoin block and state objects kept by the node. + stateAndBlockSource cbor.IpldStore + + bsstore blockstoreutil.Blockstore + + // ds is the datastore for the chain's private metadata which consists + // of the tipset key to state root cid mapping, and the heaviest tipset + // key. + ds repo.Datastore + + // genesis is the CID of the genesis block. + genesis cid.Cid + // head is the tipset at the head of the best known chain. + head *types.TipSet + + checkPoint types.TipSetKey + // Protects head and genesisCid. + mu sync.RWMutex + + // headEvents is a pubsub channel that publishes an event every time the head changes. + // We operate under the assumption that tipsets published to this channel + // will always be queued and delivered to subscribers in the order discovered. + // Successive published tipsets may be supersets of previously published tipsets. + // TODO: rename to notifications. Also, reconsider ordering assumption depending + // on decisions made around the FC node notification system. + // TODO: replace this with a synchronous event bus + // https://github.com/filecoin-project/venus/issues/2309 + headEvents *pubsub.PubSub + + // Tracks tipsets by height/parentset for use by expected consensus. + tipIndex *TipStateCache + + circulatingSupplyCalculator ICirculatingSupplyCalcualtor + + chainIndex *ChainIndex + + reorgCh chan reorg + reorgNotifeeCh chan ReorgNotifee + + tsCache *lru.ARCCache +} + +// NewStore constructs a new default store. +func NewStore(chainDs repo.Datastore, + bsstore blockstoreutil.Blockstore, + genesisCid cid.Cid, + circulatiingSupplyCalculator ICirculatingSupplyCalcualtor, +) *Store { + tsCache, _ := lru.NewARC(DefaultTipsetLruCacheSize) + store := &Store{ + stateAndBlockSource: cbor.NewCborStore(bsstore), + ds: chainDs, + bsstore: bsstore, + headEvents: pubsub.New(64), + + checkPoint: types.EmptyTSK, + genesis: genesisCid, + reorgNotifeeCh: make(chan ReorgNotifee), + tsCache: tsCache, + } + // todo cycle reference , may think a better idea + store.tipIndex = NewTipStateCache(store) + store.chainIndex = NewChainIndex(store.GetTipSet) + store.circulatingSupplyCalculator = circulatiingSupplyCalculator + + val, err := store.ds.Get(context.TODO(), CheckPoint) + if err != nil { + store.checkPoint = types.NewTipSetKey(genesisCid) + } else { + _ = store.checkPoint.UnmarshalCBOR(bytes.NewReader(val)) //nolint:staticcheck + } + log.Infof("check point value: %v", store.checkPoint) + + store.reorgCh = store.reorgWorker(context.TODO()) + return store +} + +// Load rebuilds the Store's caches by traversing backwards from the +// most recent best head as stored in its datastore. Because Load uses a +// content addressed datastore it guarantees that parent blocks are correctly +// resolved from the datastore. Furthermore Load ensures that all tipsets +// references correctly have the same parent height, weight and parent set. +// However, Load DOES NOT validate state transitions, it assumes that the +// tipset were only Put to the Store after checking for valid transitions. +// +// Furthermore Load trusts that the Store's backing datastore correctly +// preserves the cids of the heaviest tipset under the "HeadKey" datastore key. +// If the HeadKey cids are tampered with and invalid blocks added to the datastore +// then Load could be tricked into loading an invalid chain. Load will error if the +// head does not link back to the expected genesis block, or the Store's +// datastore does not store a link in the chain. In case of error the caller +// should not consider the chain useable and propagate the error. +func (store *Store) Load(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "Store.Load") + defer tracing.AddErrorEndSpan(ctx, span, &err) + + var headTS *types.TipSet + + if headTS, err = store.loadHead(ctx); err != nil { + return err + } + + if headTS.Height() == 0 { + return store.SetHead(ctx, headTS) + } + + latestHeight := headTS.At(0).Height + loopBack := latestHeight - policy.ChainFinality + log.Infof("start loading chain at tipset: %s, height: %d", headTS.Key(), headTS.Height()) + + // `Metadata` of head may not exist, this is okay, its parent's `Meta` is surely exists. + headParent, err := store.GetTipSet(ctx, headTS.Parents()) + if err != nil { + return err + } + + // Provide tipsets directly from the block store, not from the tipset index which is + // being rebuilt by this traversal. + tipsetProvider := TipSetProviderFromBlocks(ctx, store) + for iterator := IterAncestors(ctx, tipsetProvider, headParent); !iterator.Complete(); err = iterator.Next(ctx) { + if err != nil { + return err + } + ts := iterator.Value() + + tipSetMetadata, err := store.LoadTipsetMetadata(ctx, ts) + if err != nil { + return err + } + + store.tipIndex.Put(tipSetMetadata) + + if ts.Height() <= loopBack { + break + } + } + log.Infof("finished loading %d tipsets from %s", latestHeight, headTS.String()) + + // Set actual head. + return store.SetHead(ctx, headTS) +} + +// loadHead loads the latest known head from disk. +func (store *Store) loadHead(ctx context.Context) (*types.TipSet, error) { + tskBytes, err := store.ds.Get(ctx, HeadKey) + if err != nil { + return nil, errors.Wrap(err, "failed to read HeadKey") + } + + var tsk types.TipSetKey + err = tsk.UnmarshalCBOR(bytes.NewReader(tskBytes)) + if err != nil { + return nil, errors.Wrap(err, "failed to cast headCids") + } + + return store.GetTipSet(ctx, tsk) +} + +// LoadTipsetMetadata load tipset status (state root and reciepts) +func (store *Store) LoadTipsetMetadata(ctx context.Context, ts *types.TipSet) (*TipSetMetadata, error) { + h := ts.Height() + key := datastore.NewKey(makeKey(ts.String(), h)) + + tsStateBytes, err := store.ds.Get(ctx, key) + if err != nil { + return nil, errors.Wrapf(err, "failed to read tipset key %s", ts.String()) + } + + var metadata TSState + err = metadata.UnmarshalCBOR(bytes.NewReader(tsStateBytes)) + if err != nil { + return nil, errors.Wrapf(err, "failed to decode tip set metadata %s", ts.String()) + } + return &TipSetMetadata{ + TipSet: ts, + TipSetStateRoot: metadata.StateRoot, + TipSetReceipts: metadata.Receipts, + }, nil +} + +// PutTipSetMetadata persists the blocks of a tipset and the tipset index. +func (store *Store) PutTipSetMetadata(ctx context.Context, tsm *TipSetMetadata) error { + // Update tipindex. + store.tipIndex.Put(tsm) + + // Persist the state mapping. + return store.writeTipSetMetadata(ctx, tsm) +} + +// Ls returns an iterator over tipsets from head to genesis. +func (store *Store) Ls(ctx context.Context, fromTS *types.TipSet, count int) ([]*types.TipSet, error) { + tipsets := []*types.TipSet{fromTS} + fromKey := fromTS.Parents() + for i := 0; i < count-1; i++ { + ts, err := store.GetTipSet(ctx, fromKey) + if err != nil { + return nil, err + } + tipsets = append(tipsets, ts) + fromKey = ts.Parents() + } + types.ReverseTipSet(tipsets) + return tipsets, nil +} + +// GetBlock returns the block identified by `cid`. +func (store *Store) GetBlock(ctx context.Context, blockID cid.Cid) (*types.BlockHeader, error) { + var block types.BlockHeader + err := store.stateAndBlockSource.Get(ctx, blockID, &block) + if err != nil { + return nil, errors.Wrapf(err, "failed to get block %s", blockID.String()) + } + return &block, nil +} + +// GetBlock returns the block identified by `cid`. +func (store *Store) PutObject(ctx context.Context, obj interface{}) (cid.Cid, error) { + return store.stateAndBlockSource.Put(ctx, obj) +} + +// GetTipSet returns the tipset identified by `key`. +func (store *Store) GetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + if key.IsEmpty() { + return store.GetHead(), nil + } + + val, has := store.tsCache.Get(key) + if has { + return val.(*types.TipSet), nil + } + + cids := key.Cids() + blks := make([]*types.BlockHeader, len(cids)) + for idx, c := range cids { + blk, err := store.GetBlock(ctx, c) + if err != nil { + return nil, err + } + + blks[idx] = blk + } + + ts, err := types.NewTipSet(blks) + if err != nil { + return nil, err + } + store.tsCache.Add(key, ts) + + return ts, nil +} + +// GetTipSetByHeight looks back for a tipset at the specified epoch. +// If there are no blocks at the specified epoch, a tipset at an earlier epoch +// will be returned. +func (store *Store) GetTipSetByHeight(ctx context.Context, ts *types.TipSet, h abi.ChainEpoch, prev bool) (*types.TipSet, error) { + if ts == nil { + ts = store.head + } + + if h > ts.Height() { + return nil, fmt.Errorf("looking for tipset with height greater than start point") + } + + if h == ts.Height() { + return ts, nil + } + + lbts, err := store.chainIndex.GetTipSetByHeight(ctx, ts, h) + if err != nil { + return nil, err + } + + if lbts.Height() < h { + log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h) + lbts, err = store.chainIndex.GetTipsetByHeightWithoutCache(ctx, ts, h) + if err != nil { + return nil, err + } + } + + if lbts.Height() == h || !prev { + return lbts, nil + } + + return store.GetTipSet(ctx, lbts.Parents()) +} + +// GetTipSetState returns the aggregate state of the tipset identified by `key`. +func (store *Store) GetTipSetState(ctx context.Context, ts *types.TipSet) (tree.Tree, error) { + if ts == nil { + ts = store.head + } + stateCid, err := store.tipIndex.GetTipSetStateRoot(ctx, ts) + if err != nil { + return nil, err + } + return tree.LoadState(ctx, store.stateAndBlockSource, stateCid) +} + +// GetGenesisBlock returns the genesis block held by the chain store. +func (store *Store) GetGenesisBlock(ctx context.Context) (*types.BlockHeader, error) { + return store.GetBlock(ctx, store.GenesisCid()) +} + +// GetTipSetStateRoot returns the aggregate state root CID of the tipset identified by `key`. +func (store *Store) GetTipSetStateRoot(ctx context.Context, key *types.TipSet) (cid.Cid, error) { + return store.tipIndex.GetTipSetStateRoot(ctx, key) +} + +// GetTipSetReceiptsRoot returns the root CID of the message receipts for the tipset identified by `key`. +func (store *Store) GetTipSetReceiptsRoot(ctx context.Context, key *types.TipSet) (cid.Cid, error) { + return store.tipIndex.GetTipSetReceiptsRoot(ctx, key) +} + +func (store *Store) GetTipsetMetadata(ctx context.Context, ts *types.TipSet) (*TipSetMetadata, error) { + tsStat, err := store.tipIndex.Get(ctx, ts) + if err != nil { + return nil, err + } + return &TipSetMetadata{ + TipSetStateRoot: tsStat.StateRoot, + TipSet: ts, + TipSetReceipts: tsStat.Receipts, + }, nil +} + +// HasTipSetAndState returns true iff the default store's tipindex is indexing +// the tipset identified by `key`. +func (store *Store) HasTipSetAndState(ctx context.Context, ts *types.TipSet) bool { + return store.tipIndex.Has(ctx, ts) +} + +// GetLatestBeaconEntry get latest beacon from the height. there're no beacon values in the block, try to +// get beacon in the parents tipset. the max find depth is 20. +func (store *Store) GetLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { + cur := ts + for i := 0; i < 20; i++ { + cbe := cur.At(0).BeaconEntries + if len(cbe) > 0 { + return &cbe[len(cbe)-1], nil + } + + if cur.Height() == 0 { + return nil, fmt.Errorf("made it back to genesis block without finding beacon entry") + } + + next, err := store.GetTipSet(ctx, cur.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) + } + cur = next + } + + if os.Getenv("VENUS_IGNORE_DRAND") == "_yes_" { + return &types.BeaconEntry{ + Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, + }, nil + } + + return nil, fmt.Errorf("found NO beacon entries in the 20 blocks prior to given tipset") +} + +// nolint +func (store *Store) walkBack(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + if to > from.Height() { + return nil, fmt.Errorf("looking for tipset with height greater than start point") + } + + if to == from.Height() { + return from, nil + } + + ts := from + + for { + pts, err := store.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, err + } + + if to > pts.Height() { + // in case pts is lower than the epoch we're looking for (null blocks) + // return a tipset above that height + return ts, nil + } + if to == pts.Height() { + return pts, nil + } + + ts = pts + } +} + +// SetHead sets the passed in tipset as the new head of this chain. +func (store *Store) SetHead(ctx context.Context, newTS *types.TipSet) error { + log.Infof("SetHead %s %d", newTS.String(), newTS.Height()) + // Add logging to debug sporadic test failure. + if !newTS.Defined() { + log.Errorf("publishing empty tipset") + log.Error(debug.Stack()) + return nil + } + + // reorg tipset + dropped, added, update, err := func() ([]*types.TipSet, []*types.TipSet, bool, error) { + var dropped []*types.TipSet + var added []*types.TipSet + var err error + store.mu.Lock() + defer store.mu.Unlock() + + if store.head != nil { + if store.head.Equals(newTS) { + return nil, nil, false, nil + } + // reorg + oldHead := store.head + dropped, added, err = CollectTipsToCommonAncestor(ctx, store, oldHead, newTS) + if err != nil { + return nil, nil, false, err + } + } else { + added = []*types.TipSet{newTS} + } + + // Ensure consistency by storing this new head on disk. + if errInner := store.writeHead(ctx, newTS.Key()); errInner != nil { + return nil, nil, false, errors.Wrap(errInner, "failed to write new Head to datastore") + } + store.head = newTS + return dropped, added, true, nil + }() + if err != nil { + return err + } + + if !update { + return nil + } + + // todo wrap by go function + Reverse(added) + + // do reorg + store.reorgCh <- reorg{ + old: dropped, + new: added, + } + return nil +} + +func (store *Store) reorgWorker(ctx context.Context) chan reorg { + headChangeNotifee := func(rev, app []*types.TipSet) error { + notif := make([]*types.HeadChange, len(rev)+len(app)) + for i, revert := range rev { + notif[i] = &types.HeadChange{ + Type: types.HCRevert, + Val: revert, + } + } + + for i, apply := range app { + notif[i+len(rev)] = &types.HeadChange{ + Type: types.HCApply, + Val: apply, + } + } + + // Publish an event that we have a new head. + store.headEvents.Pub(notif, types.HeadChangeTopic) + return nil + } + + out := make(chan reorg, 32) + notifees := []ReorgNotifee{headChangeNotifee} + + go func() { + defer log.Warn("reorgWorker quit") + for { + select { + case n := <-store.reorgNotifeeCh: + notifees = append(notifees, n) + + case r := <-out: + var toremove map[int]struct{} + for i, hcf := range notifees { + err := hcf(r.old, r.new) + + switch err { + case nil: + + case ErrNotifeeDone: + if toremove == nil { + toremove = make(map[int]struct{}) + } + toremove[i] = struct{}{} + + default: + log.Error("head change func errored (BAD): ", err) + } + } + + if len(toremove) > 0 { + newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove)) + for i, hcf := range notifees { + _, remove := toremove[i] + if remove { + continue + } + newNotifees = append(newNotifees, hcf) + } + notifees = newNotifees + } + + case <-ctx.Done(): + return + } + } + }() + return out +} + +// SubHeadChanges returns channel with chain head updates. +// First message is guaranteed to be of len == 1, and type == 'current'. +// Then event in the message may be HCApply and HCRevert. +func (store *Store) SubHeadChanges(ctx context.Context) chan []*types.HeadChange { + store.mu.RLock() + subCh := store.headEvents.Sub(types.HeadChangeTopic) + head := store.head + store.mu.RUnlock() + + out := make(chan []*types.HeadChange, 16) + out <- []*types.HeadChange{{ + Type: types.HCCurrent, + Val: head, + }} + + go func() { + defer close(out) + var unsubOnce sync.Once + + for { + select { + case val, ok := <-subCh: + if !ok { + log.Warn("chain head sub exit loop") + return + } + + select { + case out <- val.([]*types.HeadChange): + default: + log.Errorf("closing head change subscription due to slow reader") + return + } + if len(out) > 5 { + log.Warnf("head change sub is slow, has %d buffered entries", len(out)) + } + case <-ctx.Done(): + unsubOnce.Do(func() { + go store.headEvents.Unsub(subCh) + }) + } + } + }() + return out +} + +// SubscribeHeadChanges subscribe head change event +func (store *Store) SubscribeHeadChanges(f ReorgNotifee) { + store.reorgNotifeeCh <- f +} + +// ReadOnlyStateStore provides a read-only IPLD store for access to chain state. +func (store *Store) ReadOnlyStateStore() util.ReadOnlyIpldStore { + return util.ReadOnlyIpldStore{IpldStore: store.stateAndBlockSource} +} + +// writeHead writes the given cid set as head to disk. +func (store *Store) writeHead(ctx context.Context, cids types.TipSetKey) error { + log.Debugf("WriteHead %s", cids.String()) + buf := new(bytes.Buffer) + err := cids.MarshalCBOR(buf) + if err != nil { + return err + } + + return store.ds.Put(ctx, HeadKey, buf.Bytes()) +} + +// writeTipSetMetadata writes the tipset key and the state root id to the +// datastore. +func (store *Store) writeTipSetMetadata(ctx context.Context, tsm *TipSetMetadata) error { + if tsm.TipSetStateRoot == cid.Undef { + return errors.New("attempting to write state root cid.Undef") + } + + if tsm.TipSetReceipts == cid.Undef { + return errors.New("attempting to write receipts cid.Undef") + } + + metadata := TSState{ + StateRoot: tsm.TipSetStateRoot, + Receipts: tsm.TipSetReceipts, + } + buf := new(bytes.Buffer) + err := metadata.MarshalCBOR(buf) + if err != nil { + return err + } + // datastore keeps key:stateRoot (k,v) pairs. + h := tsm.TipSet.Height() + key := datastore.NewKey(makeKey(tsm.TipSet.String(), h)) + + return store.ds.Put(ctx, key, buf.Bytes()) +} + +// deleteTipSetMetadata delete the state root id from the datastore for the tipset key. +func (store *Store) DeleteTipSetMetadata(ctx context.Context, ts *types.TipSet) error { // nolint + store.tipIndex.Del(ts) + h := ts.Height() + key := datastore.NewKey(makeKey(ts.String(), h)) + return store.ds.Delete(ctx, key) +} + +// GetHead returns the current head tipset cids. +func (store *Store) GetHead() *types.TipSet { + store.mu.RLock() + defer store.mu.RUnlock() + if !store.head.Defined() { + return types.UndefTipSet + } + + return store.head +} + +// GenesisCid returns the genesis cid of the chain tracked by the default store. +func (store *Store) GenesisCid() cid.Cid { + return store.genesis +} + +// GenesisRootCid returns the genesis root cid of the chain tracked by the default store. +func (store *Store) GenesisRootCid() cid.Cid { + genesis, _ := store.GetBlock(context.TODO(), store.GenesisCid()) + return genesis.ParentStateRoot +} + +func recurseLinks(ctx context.Context, bs blockstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { + if root.Prefix().Codec != cid.DagCBOR { + return in, nil + } + + data, err := bs.Get(ctx, root) + if err != nil { + return nil, fmt.Errorf("recurse links get (%s) failed: %w", root, err) + } + + var rerr error + err = cbg.ScanForLinks(bytes.NewReader(data.RawData()), func(c cid.Cid) { + if rerr != nil { + // No error return on ScanForLinks :( + return + } + + // traversed this already... + if !walked.Visit(c) { + return + } + + in = append(in, c) + var err error + in, err = recurseLinks(ctx, bs, walked, c, in) + if err != nil { + rerr = err + } + }) + if err != nil { + return nil, fmt.Errorf("scanning for links failed: %w", err) + } + + return in, rerr +} + +func (store *Store) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error { + h := &car.CarHeader{ + Roots: ts.Cids(), + Version: 1, + } + + if err := car.WriteHeader(h, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + + return store.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { + blk, err := store.bsstore.Get(ctx, c) + if err != nil { + return fmt.Errorf("writing object to car, bs.Get: %w", err) + } + + if err := carutil.LdWrite(w, c.Bytes(), blk.RawData()); err != nil { + return fmt.Errorf("failed to write block to car output: %w", err) + } + + return nil + }) +} + +func (store *Store) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error { + if ts == nil { + ts = store.GetHead() + } + + seen := cid.NewSet() + walked := cid.NewSet() + + blocksToWalk := ts.Cids() + currentMinHeight := ts.Height() + + walkChain := func(blk cid.Cid) error { + if !seen.Visit(blk) { + return nil + } + + if err := cb(blk); err != nil { + return err + } + + data, err := store.bsstore.Get(ctx, blk) + if err != nil { + return fmt.Errorf("getting block: %w", err) + } + + var b types.BlockHeader + if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil { + return fmt.Errorf("unmarshaling block header (cid=%s): %w", blk, err) + } + + if currentMinHeight > b.Height { + currentMinHeight = b.Height + if currentMinHeight%builtin.EpochsInDay == 0 { + log.Infow("export", "height", currentMinHeight) + } + } + + var cids []cid.Cid + if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { + if walked.Visit(b.Messages) { + mcids, err := recurseLinks(ctx, store.bsstore, walked, b.Messages, []cid.Cid{b.Messages}) + if err != nil { + return fmt.Errorf("recursing messages failed: %w", err) + } + cids = mcids + } + } + + if b.Height > 0 { + blocksToWalk = append(blocksToWalk, b.Parents...) + } else { + // include the genesis block + cids = append(cids, b.Parents...) + } + + out := cids + + if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { + if walked.Visit(b.ParentStateRoot) { + cids, err := recurseLinks(ctx, store.bsstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + if err != nil { + return fmt.Errorf("recursing genesis state failed: %w", err) + } + + out = append(out, cids...) + } + + if !skipMsgReceipts && walked.Visit(b.ParentMessageReceipts) { + out = append(out, b.ParentMessageReceipts) + } + } + + for _, c := range out { + if seen.Visit(c) { + prefix := c.Prefix() + + // Don't include identity CIDs. + if prefix.MhType == mh.IDENTITY { + continue + } + + // We only include raw and dagcbor, for now. + // Raw for "code" CIDs. + switch prefix.Codec { + case cid.Raw, cid.DagCBOR: + default: + continue + } + + if err := cb(c); err != nil { + return err + } + + } + } + + return nil + } + + log.Infow("export started") + exportStart := constants.Clock.Now() + + for len(blocksToWalk) > 0 { + next := blocksToWalk[0] + blocksToWalk = blocksToWalk[1:] + if err := walkChain(next); err != nil { + return fmt.Errorf("walk chain failed: %w", err) + } + } + + log.Infow("export finished", "duration", constants.Clock.Now().Sub(exportStart).Seconds()) + + return nil +} + +// Import import a car file into local db +func (store *Store) Import(ctx context.Context, r io.Reader) (*types.TipSet, error) { + br, err := carv2.NewBlockReader(r) + if err != nil { + return nil, fmt.Errorf("loadcar failed: %w", err) + } + + parallelPuts := 5 + putThrottle := make(chan error, parallelPuts) + for i := 0; i < parallelPuts; i++ { + putThrottle <- nil + } + + var buf []blocks.Block + for { + blk, err := br.Next() + if err != nil { + if err == io.EOF { + if len(buf) > 0 { + if err := store.bsstore.PutMany(ctx, buf); err != nil { + return nil, err + } + } + + break + } + return nil, err + } + + buf = append(buf, blk) + + if len(buf) > 1000 { + if lastErr := <-putThrottle; lastErr != nil { // consume one error to have the right to add one + return nil, lastErr + } + + go func(buf []blocks.Block) { + putThrottle <- store.bsstore.PutMany(ctx, buf) + }(buf) + buf = nil + } + } + + // check errors + for i := 0; i < parallelPuts; i++ { + if lastErr := <-putThrottle; lastErr != nil { + return nil, lastErr + } + } + + root, err := store.GetTipSet(ctx, types.NewTipSetKey(br.Roots...)) + if err != nil { + return nil, fmt.Errorf("failed to load root tipset from chainfile: %w", err) + } + + // Notice here is different with lotus, because the head tipset in lotus is not computed, + // but in venus the head tipset is computed, so here we will fallback a pre tipset + // and the chain store must has a metadata for each tipset, below code is to build the tipset metadata + + var ( + startHeight = root.Height() + curTipset = root + ) + + log.Info("import height: ", root.Height(), " root: ", root.String(), " parents: ", root.At(0).Parents) + for { + if curTipset.Height() <= 0 { + break + } + curTipsetKey := curTipset.Parents() + curParentTipset, err := store.GetTipSet(ctx, curTipsetKey) + if err != nil { + return nil, fmt.Errorf("failed to load root tipset from chainfile: %w", err) + } + + if curParentTipset.Height() == 0 { + break + } + + if _, err := tree.LoadState(ctx, store.stateAndBlockSource, curTipset.At(0).ParentStateRoot); err != nil { + log.Infof("last ts height: %d, cids: %s, total import: %d", curTipset.Height(), curTipset.Key(), startHeight-curTipset.Height()) + break + } + + // save fake root + err = store.PutTipSetMetadata(context.Background(), &TipSetMetadata{ + TipSetStateRoot: curTipset.At(0).ParentStateRoot, + TipSet: curParentTipset, + TipSetReceipts: curTipset.At(0).ParentMessageReceipts, + }) + if err != nil { + return nil, err + } + curTipset = curParentTipset + } + + return root, nil +} + +// SetCheckPoint set current checkpoint +func (store *Store) SetCheckPoint(checkPoint types.TipSetKey) { + store.checkPoint = checkPoint +} + +// WriteCheckPoint writes the given cids to disk. +func (store *Store) WriteCheckPoint(ctx context.Context, cids types.TipSetKey) error { + log.Infof("WriteCheckPoint %v", cids) + buf := new(bytes.Buffer) + err := cids.MarshalCBOR(buf) + if err != nil { + return err + } + return store.ds.Put(ctx, CheckPoint, buf.Bytes()) +} + +func (store *Store) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st tree.Tree) (types.CirculatingSupply, error) { + return store.circulatingSupplyCalculator.GetCirculatingSupplyDetailed(ctx, height, st) +} + +func (store *Store) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) { + return store.circulatingSupplyCalculator.GetFilVested(ctx, height) +} + +// StateCirculatingSupply get circulate supply at specify epoch +func (store *Store) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { + ts, err := store.GetTipSet(ctx, tsk) + if err != nil { + return abi.TokenAmount{}, err + } + + root, err := store.GetTipSetStateRoot(ctx, ts) + if err != nil { + return abi.TokenAmount{}, err + } + + sTree, err := tree.LoadState(ctx, store.stateAndBlockSource, root) + if err != nil { + return abi.TokenAmount{}, err + } + + return store.getCirculatingSupply(ctx, ts.Height(), sTree) +} + +func (store *Store) getCirculatingSupply(ctx context.Context, height abi.ChainEpoch, st tree.Tree) (abi.TokenAmount, error) { + adtStore := adt.WrapStore(ctx, store.stateAndBlockSource) + circ := big.Zero() + unCirc := big.Zero() + err := st.ForEach(func(a address.Address, actor *types.Actor) error { + switch { + case actor.Balance.IsZero(): + // Do nothing for zero-balance actors + break + case a == _init.Address || + a == reward.Address || + a == verifreg.Address || + // The power actor itself should never receive funds + a == power.Address || + a == builtin.SystemActorAddr || + a == builtin.CronActorAddr || + a == builtin.BurntFundsActorAddr || + a == builtin.SaftAddress || + a == builtin.ReserveAddress: + + unCirc = big.Add(unCirc, actor.Balance) + + case a == market.Address: + mst, err := market.Load(adtStore, actor) + if err != nil { + return err + } + + lb, err := mst.TotalLocked() + if err != nil { + return err + } + + circ = big.Add(circ, big.Sub(actor.Balance, lb)) + unCirc = big.Add(unCirc, lb) + + case builtin.IsAccountActor(actor.Code) || builtin.IsPaymentChannelActor(actor.Code): + circ = big.Add(circ, actor.Balance) + + case builtin.IsStorageMinerActor(actor.Code): + mst, err := miner.Load(adtStore, actor) + if err != nil { + return err + } + + ab, err := mst.AvailableBalance(actor.Balance) + + if err == nil { + circ = big.Add(circ, ab) + unCirc = big.Add(unCirc, big.Sub(actor.Balance, ab)) + } else { + // Assume any error is because the miner state is "broken" (lower actor balance than locked funds) + // In this case, the actor's entire balance is considered "uncirculating" + unCirc = big.Add(unCirc, actor.Balance) + } + + case builtin.IsMultisigActor(actor.Code): + mst, err := multisig.Load(adtStore, actor) + if err != nil { + return err + } + + lb, err := mst.LockedBalance(height) + if err != nil { + return err + } + + ab := big.Sub(actor.Balance, lb) + circ = big.Add(circ, big.Max(ab, big.Zero())) + unCirc = big.Add(unCirc, big.Min(actor.Balance, lb)) + default: + return fmt.Errorf("unexpected actor: %s", a) + } + + return nil + }) + if err != nil { + return abi.TokenAmount{}, err + } + + total := big.Add(circ, unCirc) + if !total.Equals(types.TotalFilecoinInt) { + return abi.TokenAmount{}, fmt.Errorf("total filecoin didn't add to expected amount: %s != %s", total, types.TotalFilecoinInt) + } + + return circ, nil +} + +// GetCheckPoint get the check point from store or disk. +func (store *Store) GetCheckPoint() types.TipSetKey { + return store.checkPoint +} + +// Stop stops all activities and cleans up. +func (store *Store) Stop() { + store.headEvents.Shutdown() +} + +// ReorgOps used to reorganize the blockchain. Whenever a new tipset is approved, +// the new tipset compared with the local tipset to obtain which tipset need to be revert and which tipsets are applied +func (store *Store) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { + return ReorgOps(store.GetTipSet, a, b) +} + +// ReorgOps takes two tipsets (which can be at different heights), and walks +// their corresponding chains backwards one step at a time until we find +// a common ancestor. It then returns the respective chain segments that fork +// from the identified ancestor, in reverse order, where the first element of +// each slice is the supplied tipset, and the last element is the common +// ancestor. +// +// If an error happens along the way, we return the error with nil slices. +// todo should move this code into store.ReorgOps. anywhere use this function should invoke store.ReorgOps +func ReorgOps(lts func(context.Context, types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { + left := a + right := b + + var leftChain, rightChain []*types.TipSet + for !left.Equals(right) { + if left.Height() > right.Height() { + leftChain = append(leftChain, left) + par, err := lts(context.TODO(), left.Parents()) + if err != nil { + return nil, nil, err + } + + left = par + } else { + rightChain = append(rightChain, right) + par, err := lts(context.TODO(), right.Parents()) + if err != nil { + log.Infof("failed to fetch right.Parents: %s", err) + return nil, nil, err + } + + right = par + } + } + + return leftChain, rightChain, nil +} + +// PutMessage put message in local db +func (store *Store) PutMessage(ctx context.Context, m storable) (cid.Cid, error) { + return PutMessage(ctx, store.bsstore, m) +} + +// Blockstore return local blockstore +// todo remove this method, and code that need blockstore should get from blockstore submodule +func (store *Store) Blockstore() blockstoreutil.Blockstore { // nolint + return store.bsstore +} + +// GetParentReceipt get the receipt of parent tipset at specify message slot +func (store *Store) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { + ctx := context.TODO() + // block headers use adt0, for now. + a, err := blockadt.AsArray(adt.WrapStore(ctx, store.stateAndBlockSource), b.ParentMessageReceipts) + if err != nil { + return nil, fmt.Errorf("amt load: %w", err) + } + + var r types.MessageReceipt + if found, err := a.Get(uint64(i), &r); err != nil { + return nil, err + } else if !found { + return nil, fmt.Errorf("failed to find receipt %d", i) + } + + return &r, nil +} + +// GetLookbackTipSetForRound get loop back tipset and state root +func (store *Store) GetLookbackTipSetForRound(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch, version network.Version) (*types.TipSet, cid.Cid, error) { + var lbr abi.ChainEpoch + + lb := policy.GetWinningPoStSectorSetLookback(version) + if round > lb { + lbr = round - lb + } + + // more null blocks than our lookback + h := ts.Height() + if lbr >= h { + // This should never happen at this point, but may happen before + // network version 3 (where the lookback was only 10 blocks). + st, err := store.GetTipSetStateRoot(ctx, ts) + if err != nil { + return nil, cid.Undef, err + } + return ts, st, nil + } + + // Get the tipset after the lookback tipset, or the next non-null one. + nextTS, err := store.GetTipSetByHeight(ctx, ts, lbr+1, false) + if err != nil { + return nil, cid.Undef, fmt.Errorf("failed to get lookback tipset+1: %v", err) + } + + nextTh := nextTS.Height() + if lbr > nextTh { + return nil, cid.Undef, fmt.Errorf("failed to find non-null tipset %s (%d) which is known to exist, found %s (%d)", ts.Key(), h, nextTS.Key(), nextTh) + } + + pKey := nextTS.Parents() + lbts, err := store.GetTipSet(ctx, pKey) + if err != nil { + return nil, cid.Undef, fmt.Errorf("failed to resolve lookback tipset: %v", err) + } + + return lbts, nextTS.Blocks()[0].ParentStateRoot, nil +} + +// Actor + +// LsActors returns a channel with actors from the latest state on the chain +func (store *Store) LsActors(ctx context.Context) (map[address.Address]*types.Actor, error) { + st, err := store.GetTipSetState(ctx, store.head) + if err != nil { + return nil, err + } + + result := make(map[address.Address]*types.Actor) + err = st.ForEach(func(key address.Address, a *types.Actor) error { + result[key] = a + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// GetActorAt returns an actor at a specified tipset key. +func (store *Store) GetActorAt(ctx context.Context, ts *types.TipSet, addr address.Address) (*types.Actor, error) { + st, err := store.GetTipSetState(ctx, ts) + if err != nil { + return nil, errors.Wrap(err, "failed to load latest state") + } + + idAddr, err := store.LookupID(ctx, ts, addr) + if err != nil { + return nil, err + } + + actr, found, err := st.GetActor(ctx, idAddr) + if err != nil { + return nil, err + } + if !found { + return nil, types.ErrActorNotFound + } + return actr, nil +} + +// LookupID resolves ID address for actor +func (store *Store) LookupID(ctx context.Context, ts *types.TipSet, addr address.Address) (address.Address, error) { + st, err := store.GetTipSetState(ctx, ts) + if err != nil { + return address.Undef, errors.Wrap(err, "failed to load latest state") + } + + return st.LookupID(addr) +} + +// ResolveToKeyAddr get key address of specify address. +// if ths addr is bls/secpk address, return directly, other get the pubkey and generate address +func (store *Store) ResolveToKeyAddr(ctx context.Context, ts *types.TipSet, addr address.Address) (address.Address, error) { + st, err := store.StateView(ctx, ts) + if err != nil { + return address.Undef, errors.Wrap(err, "failed to load latest state") + } + + return st.ResolveToKeyAddr(ctx, addr) +} + +// StateView return state view at ts epoch +func (store *Store) StateView(ctx context.Context, ts *types.TipSet) (*state.View, error) { + if ts == nil { + ts = store.head + } + root, err := store.GetTipSetStateRoot(ctx, ts) + if err != nil { + return nil, errors.Wrapf(err, "failed to get state root for %s", ts.Key().String()) + } + + return state.NewView(store.stateAndBlockSource, root), nil +} + +// AccountView return account view at ts state +func (store *Store) AccountView(ctx context.Context, ts *types.TipSet) (state.AccountView, error) { + if ts == nil { + ts = store.head + } + root, err := store.GetTipSetStateRoot(ctx, ts) + if err != nil { + return nil, errors.Wrapf(err, "failed to get state root for %s", ts.Key().String()) + } + + return state.NewView(store.stateAndBlockSource, root), nil +} + +// ParentStateView get parent state view of ts +func (store *Store) ParentStateView(ts *types.TipSet) (*state.View, error) { + return state.NewView(store.stateAndBlockSource, ts.At(0).ParentStateRoot), nil +} + +// Store wrap adt store +func (store *Store) Store(ctx context.Context) adt.Store { + return adt.WrapStore(ctx, cbor.NewCborStore(store.bsstore)) +} diff --git a/pkg/chain/store_test.go b/pkg/chain/store_test.go new file mode 100644 index 0000000000..8ef8821cb5 --- /dev/null +++ b/pkg/chain/store_test.go @@ -0,0 +1,549 @@ +// stm: #unit +package chain_test + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/util/test" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type CborBlockStore struct { + *chain.Store + cborStore cbor.IpldStore +} + +func (cbor *CborBlockStore) PutBlocks(ctx context.Context, blocks []*types.BlockHeader) { + for _, blk := range blocks { + _, _ = cbor.cborStore.Put(ctx, blk) + } +} + +// Default Chain diagram below. Note that blocks in the same tipset are in parentheses. +// +// genesis -> (link1blk1, link1blk2) -> (link2blk1, link2blk2, link2blk3) -> link3blk1 -> (null block) -> (null block) -> (link4blk1, link4blk2) + +// newChainStore creates a new chain store for tests. +func newChainStore(r repo.Repo, genTS *types.TipSet) *CborBlockStore { + tempBlock := r.Datastore() + cborStore := cbor.NewCborStore(tempBlock) + return &CborBlockStore{ + Store: chain.NewStore(r.ChainDatastore(), tempBlock, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()), + cborStore: cborStore, + } +} + +// requirePutTestChain puts the count tipsets preceding head in the source to +// the input chain store. +func requirePutTestChain(ctx context.Context, t *testing.T, cborStore *CborBlockStore, head types.TipSetKey, source *chain.Builder, count int) { + tss := source.RequireTipSets(ctx, head, count) + for _, ts := range tss { + tsas := &chain.TipSetMetadata{ + TipSet: ts, + TipSetStateRoot: ts.At(0).ParentStateRoot, + TipSetReceipts: testhelpers.EmptyReceiptsCID, + } + requirePutBlocksToCborStore(t, cborStore.cborStore, tsas.TipSet.Blocks()...) + require.NoError(t, cborStore.Store.PutTipSetMetadata(ctx, tsas)) + } +} + +type HeadAndTipsetGetter interface { + GetHead() types.TipSetKey + GetTipSet(types.TipSetKey) (types.TipSet, error) +} + +func requirePutBlocksToCborStore(t *testing.T, cst cbor.IpldStore, blocks ...*types.BlockHeader) { + for _, block := range blocks { + _, err := cst.Put(context.Background(), block) + require.NoError(t, err) + } +} + +/* Putting and getting tipsets and states. */ + +// Adding tipsets to the store doesn't error. +func TestPutTipSet(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + r := repo.NewInMemoryRepo() + cs := newChainStore(r, genTS) + + genTsas := &chain.TipSetMetadata{ + TipSet: genTS, + TipSetStateRoot: genTS.At(0).ParentStateRoot, + TipSetReceipts: testhelpers.EmptyReceiptsCID, + } + err := cs.Store.PutTipSetMetadata(ctx, genTsas) + assert.NoError(t, err) +} + +// Tipsets can be retrieved by key (all block cids). +func TestGetByKey(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + r := repo.NewInMemoryRepo() + cs := newChainStore(r, genTS) + + // Construct test chain data + link1 := builder.AppendOn(ctx, genTS, 2) + link2 := builder.AppendOn(ctx, link1, 3) + link3 := builder.AppendOn(ctx, link2, 1) + link4 := builder.BuildOn(ctx, link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) + + // Put the test chain to the store + requirePutTestChain(ctx, t, cs, link4.Key(), builder, 5) + + // Check that we can get all tipsets by key + gotGTS := requireGetTipSet(ctx, t, cs, genTS.Key()) + gotGTSSR := requireGetTipSetStateRoot(ctx, t, cs, genTS) + + got1TS := requireGetTipSet(ctx, t, cs, link1.Key()) + got1TSSR := requireGetTipSetStateRoot(ctx, t, cs, link1) + + got2TS := requireGetTipSet(ctx, t, cs, link2.Key()) + got2TSSR := requireGetTipSetStateRoot(ctx, t, cs, link2) + + got3TS := requireGetTipSet(ctx, t, cs, link3.Key()) + got3TSSR := requireGetTipSetStateRoot(ctx, t, cs, link3) + + got4TS := requireGetTipSet(ctx, t, cs, link4.Key()) + got4TSSR := requireGetTipSetStateRoot(ctx, t, cs, link4) + assert.ObjectsAreEqualValues(genTS, gotGTS) + assert.ObjectsAreEqualValues(link1, got1TS) + assert.ObjectsAreEqualValues(link2, got2TS) + assert.ObjectsAreEqualValues(link3, got3TS) + assert.ObjectsAreEqualValues(link4, got4TS) + + assert.Equal(t, genTS.At(0).ParentStateRoot, gotGTSSR) + assert.Equal(t, link1.At(0).ParentStateRoot, got1TSSR) + assert.Equal(t, link2.At(0).ParentStateRoot, got2TSSR) + assert.Equal(t, link3.At(0).ParentStateRoot, got3TSSR) + assert.Equal(t, link4.At(0).ParentStateRoot, got4TSSR) +} + +// Tipsets can be retrieved by key (all block cids). +func TestRevertChange(t *testing.T) { + tf.UnitTest(t) + ctx := context.TODO() + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + cs := newChainStore(builder.Repo(), genTS) + genesis := builder.Genesis() + + link1 := builder.AppendOn(ctx, genesis, 1) + link2 := builder.AppendOn(ctx, link1, 1) + link3 := builder.AppendOn(ctx, link2, 1) + + err := cs.Store.SetHead(ctx, link3) + require.NoError(t, err) + + link4 := builder.AppendOn(ctx, genesis, 2) + link5 := builder.AppendOn(ctx, link4, 2) + link6 := builder.AppendOn(ctx, link5, 2) + + ch := cs.Store.SubHeadChanges(ctx) + currentA := <-ch + test.Equal(t, currentA[0].Type, types.HCCurrent) + test.Equal(t, currentA[0].Val, link3) + + err = cs.Store.SetHead(ctx, link6) + require.NoError(t, err) + headChanges := <-ch + + if len(headChanges) == 1 { + // maybe link3, if link3 fetch next + headChanges = <-ch + } + test.Equal(t, headChanges[0].Type, types.HCRevert) + test.Equal(t, headChanges[0].Val, link3) + test.Equal(t, headChanges[1].Type, types.HCRevert) + test.Equal(t, headChanges[1].Val, link2) + test.Equal(t, headChanges[2].Type, types.HCRevert) + test.Equal(t, headChanges[2].Val, link1) + + test.Equal(t, headChanges[3].Type, types.HCApply) + test.Equal(t, headChanges[3].Val, link4) + test.Equal(t, headChanges[4].Type, types.HCApply) + test.Equal(t, headChanges[4].Val, link5) + test.Equal(t, headChanges[5].Type, types.HCApply) + test.Equal(t, headChanges[5].Val, link6) +} + +/* Head and its state is set and notified properly. */ + +// The constructor call sets the genesis cid for the chain store. +func TestSetGenesis(t *testing.T) { + tf.UnitTest(t) + + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + r := repo.NewInMemoryRepo() + cs := newChainStore(r, genTS) + + require.Equal(t, genTS.At(0).Cid(), cs.Store.GenesisCid()) +} + +func assertSetHead(t *testing.T, cborStore *CborBlockStore, ts *types.TipSet) { + ctx := context.Background() + err := cborStore.Store.SetHead(ctx, ts) + assert.NoError(t, err) +} + +// Set and Get Head. +func TestHead(t *testing.T) { + tf.UnitTest(t) + + ctx := context.TODO() + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + r := builder.Repo() + bs := builder.BlockStore() + cs := chain.NewStore(r.ChainDatastore(), bs, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()) + cboreStore := &CborBlockStore{ + Store: chain.NewStore(r.ChainDatastore(), bs, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()), + } + // Construct test chain data + link1 := builder.AppendOn(ctx, genTS, 2) + link2 := builder.AppendOn(ctx, link1, 3) + link3 := builder.AppendOn(ctx, link2, 1) + link4 := builder.BuildOn(ctx, link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) + + // Head starts as an empty cid set + assert.Equal(t, types.UndefTipSet, cs.GetHead()) + + // Set Head + assertSetHead(t, cboreStore, genTS) + assert.ObjectsAreEqualValues(genTS.Key(), cs.GetHead()) + + // Move head forward + assertSetHead(t, cboreStore, link4) + assert.ObjectsAreEqualValues(link4.Key(), cs.GetHead()) + + // Move head back + assertSetHead(t, cboreStore, link1) + assert.ObjectsAreEqualValues(link1.Key(), cs.GetHead()) +} + +func assertEmptyCh(t *testing.T, ch <-chan []*types.HeadChange) { + select { + case <-ch: + assert.True(t, false) + default: + } +} + +// Head events are propagated on HeadEvents. +func TestHeadEvents(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + chainStore := newChainStore(builder.Repo(), genTS) + // Construct test chain data + link1 := builder.AppendOn(ctx, genTS, 2) + link2 := builder.AppendOn(ctx, link1, 3) + link3 := builder.AppendOn(ctx, link2, 1) + link4 := builder.BuildOn(ctx, link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) + chA := chainStore.Store.SubHeadChanges(ctx) + chB := chainStore.Store.SubHeadChanges(ctx) + // HCurrent + <-chA + <-chB + + defer ctx.Done() + + headSets := []*types.TipSet{genTS, link1, link2, link3, link4, link3, link2, link1, genTS} + heads := []*types.TipSet{genTS, link1, link2, link3, link4, link4, link3, link2, link1} + types := []types.HeadChangeType{ + types.HCApply, types.HCApply, types.HCApply, types.HCApply, types.HCApply, types.HCRevert, + types.HCRevert, types.HCRevert, types.HCRevert, + } + waitAndCheck := func(index int) { + headA := <-chA + headB := <-chB + assert.Equal(t, headA[0].Type, types[index]) + test.Equal(t, headA, headB) + test.Equal(t, headA[0].Val, heads[index]) + } + + // Heads arrive in the expected order + for i := 0; i < 9; i++ { + assertSetHead(t, chainStore, headSets[i]) + waitAndCheck(i) + } + // No extra notifications + assertEmptyCh(t, chA) + assertEmptyCh(t, chB) +} + +/* Loading */ +// Load does not error and gives the chain store access to all blocks and +// tipset indexes along the heaviest chain. +func TestLoadAndReboot(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + rPriv := repo.NewInMemoryRepo() + bs := rPriv.Datastore() + ds := rPriv.ChainDatastore() + cst := cbor.NewCborStore(bs) + + // Construct test chain data + link1 := builder.AppendOn(ctx, genTS, 2) + link2 := builder.AppendOn(ctx, link1, 3) + link3 := builder.AppendOn(ctx, link2, 1) + link4 := builder.BuildOn(ctx, link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) + + // Add blocks to blockstore + requirePutBlocksToCborStore(t, cst, genTS.ToSlice()...) + requirePutBlocksToCborStore(t, cst, link1.ToSlice()...) + requirePutBlocksToCborStore(t, cst, link2.ToSlice()...) + requirePutBlocksToCborStore(t, cst, link3.ToSlice()...) + requirePutBlocksToCborStore(t, cst, link4.ToSlice()...) + + cborStore := &CborBlockStore{ + Store: chain.NewStore(ds, bs, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()), + cborStore: cst, + } + requirePutTestChain(ctx, t, cborStore, link4.Key(), builder, 5) + assertSetHead(t, cborStore, genTS) // set the genesis block + + assertSetHead(t, cborStore, link4) + cborStore.Store.Stop() + + // rebuild chain with same datastore and cborstore + rebootChain := chain.NewStore(ds, bs, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()) + rebootCbore := &CborBlockStore{ + Store: rebootChain, + } + + // stm: @CHAIN_STORE_LOAD_001 + err := rebootChain.Load(ctx) + assert.NoError(t, err) + + // Check that chain store has index + // Get a tipset and state by key + got2 := requireGetTipSet(ctx, t, rebootCbore, link2.Key()) + assert.ObjectsAreEqualValues(link2, got2) + + // Check the head + test.Equal(t, link4, rebootChain.GetHead()) + + { + assert.NoError(t, rebootChain.Blockstore().DeleteBlock(ctx, link3.Blocks()[0].Cid())) + newStore := chain.NewStore(ds, bs, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()) + // error occurs while getting tipset identified by parent's cid block, + // because block[0] has been deleted. + // stm: @CHAIN_STORE_LOAD_003 + assert.Error(t, newStore.Load(ctx)) + } + + { + assert.NoError(t, ds.Put(ctx, chain.HeadKey, []byte("bad chain head data"))) + newStore := chain.NewStore(ds, bs, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()) + // error occurs while getting tipset identified by parent's cid block + // stm: @CHAIN_STORE_LOAD_002 + assert.Error(t, newStore.Load(ctx)) + } +} + +func TestLoadTipsetMeta(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + genTS := builder.Genesis() + rPriv := repo.NewInMemoryRepo() + bs := rPriv.Datastore() + ds := rPriv.ChainDatastore() + cst := cbor.NewCborStore(bs) + + count := 30 + links := make([]*types.TipSet, count) + links[0] = genTS + + for i := 1; i < count-1; i++ { + links[i] = builder.AppendOn(ctx, links[i-1], rand.Intn(2)+1) + } + head := builder.BuildOn(ctx, links[count-2], 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) }) + links[count-1] = head + + // Add blocks to blockstore + for _, ts := range links { + requirePutBlocksToCborStore(t, cst, ts.ToSlice()...) + } + + chain.DefaultChainIndexCacheSize = 2 + chain.DefaultTipsetLruCacheSize = 2 + + cs := chain.NewStore(ds, bs, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()) + cborStore := &CborBlockStore{Store: cs, cborStore: cst} + + requirePutTestChain(ctx, t, cborStore, head.Key(), builder, 5) + assertSetHead(t, cborStore, head) + + // stm: @CHAIN_STORE_LOAD_METADATA_001 + meta, err := cs.LoadTipsetMetadata(ctx, head) + assert.NoError(t, err) + assert.NotNil(t, meta) + + flyingTipset := builder.BuildOrphaTipset(head, 2, nil) + { // Chain store load tipset meta + // should not be found. + // stm: @CHAIN_STORE_LOAD_METADATA_002 + _, err = cs.LoadTipsetMetadata(ctx, flyingTipset) + assert.Error(t, err) + // put invalid data for newTs. + key := datastore.NewKey(fmt.Sprintf("p-%s h-%d", flyingTipset.String(), flyingTipset.Height())) + assert.NoError(t, ds.Put(ctx, key, []byte("invalid tipset data"))) + // error getting object from store providing key, + // stm: @CHAIN_STORE_LOAD_METADATA_002 + _, err = cs.LoadTipsetMetadata(ctx, flyingTipset) + assert.Error(t, err) + assert.NoError(t, ds.Delete(ctx, key)) + } + { // Chain store get blocks + // stm: @CHAIN_STORE_GET_BLOCK_001 + block, err := cs.GetBlock(ctx, head.Key().Cids()[0]) + assert.NoError(t, err) + assert.Equal(t, block.Cid(), head.Blocks()[0].Cid()) + // error getting block from ilpd storage + // stm: @CHAIN_STORE_LOAD_002 + _, err = cs.GetBlock(ctx, flyingTipset.Cids()[0]) + assert.Error(t, err) + } + { // Chain store get tipset + // stm: @CHAIN_STORE_GET_TIPSET_001 + ts, err := cs.GetTipSet(ctx, head.Key()) + assert.NoError(t, err) + assert.Equal(t, ts.Key(), head.Key()) + + // If the key is empty, return current head tipset cids. + // stm: @CHAIN_STORE_GET_TIPSET_002 + ts, err = cs.GetTipSet(ctx, types.EmptyTSK) + assert.NoError(t, err) + assert.Equal(t, ts.Key(), head.Key()) + + // The head is cached now. + // stm: @CHAIN_STORE_GET_TIPSET_003 + ts, err = cs.GetTipSet(ctx, head.Key()) + assert.NoError(t, err) + assert.Equal(t, ts.Key(), head.Key()) + + // error getting blocks + // stm: @CHAIN_STORE_GET_TIPSET_004 + _, err = cs.GetTipSet(ctx, flyingTipset.Key()) + assert.Error(t, err) + } + { // Chain store get tipset by height + targetTS := links[abi.ChainEpoch(count/2)] + // stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_001 + ts, err := cs.GetTipSetByHeight(ctx, head, targetTS.Height(), true) + assert.NoError(t, err) + assert.Equal(t, ts.Key(), targetTS.Key()) + + // The epoch is greater than the tipset's height + // stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_002 + _, err = cs.GetTipSetByHeight(ctx, head, head.Height()+1, true) + assert.Error(t, err) + + // targetTs.Height - 1 would make sure tipset was not cached + targetTS = links[targetTS.Height()-1] + blockCid := targetTS.Cids()[0] + block, err := bs.Get(ctx, blockCid) + assert.NoError(t, err) + assert.NoError(t, bs.DeleteBlock(ctx, targetTS.Cids()[0])) + // error occurs retrieving the tipset from the chain index + // stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_004 + _, err = cs.GetTipSetByHeight(ctx, head, targetTS.Height(), true) + assert.Error(t, err) + + // restore deleted block. + assert.NoError(t, bs.Put(ctx, block)) + } + { // Get tipset state + parentHead := links[len(links)-2] + // stm: @CHAIN_STORE_GET_TIPSET_STATE_ROOT_001 + stateRoot, err := cs.GetTipSetStateRoot(ctx, parentHead) + assert.NoError(t, err) + assert.Equal(t, head.ParentState(), stateRoot) + + // stm: @CHAIN_STORE_GET_TIPSET_STATE_ROOT_001 + _, err = cs.GetTipSetStateRoot(ctx, flyingTipset) + assert.Error(t, err) + + // error occurs while trying to return tipsetStateRoot from tipIndex. not exist + // stm: @CHAIN_STORE_GET_TIPSET_STATE_002 + _, err = cs.GetTipSetState(ctx, flyingTipset) + assert.Error(t, err) + } + { // Get genesis block + genesisBlock, err := cs.GetGenesisBlock(ctx) + assert.NoError(t, err) + assert.Equal(t, genesisBlock.Cid(), genTS.Blocks()[0].Cid()) + } + { // Beacon entry + ts := links[6] + // stm: @CHAIN_STORE_GET_LATEST_BEACON_ENTRY_001 + entry, err := cs.GetLatestBeaconEntry(ctx, ts) + assert.NoError(t, err) + assert.Greater(t, len(entry.Data), 0) + + // no beacon entries is found in the 20 block prior to given tipset + // stm: @CHAIN_STORE_GET_LATEST_BEACON_ENTRY_004 + _, err = cs.GetLatestBeaconEntry(ctx, head) + assert.Error(t, err) + + deletedCid := ts.Parents().Cids()[0] + block, err := bs.Get(ctx, deletedCid) + assert.NoError(t, err) + assert.NoError(t, bs.DeleteBlock(ctx, deletedCid)) + // loading parents failed. + // stm: @CHAIN_STORE_GET_LATEST_BEACON_ENTRY_003 + _, err = cs.GetLatestBeaconEntry(ctx, ts) + assert.Error(t, err) + + // recover deleted block + assert.NoError(t, bs.Put(ctx, block)) + } +} + +func requireGetTipSet(ctx context.Context, t *testing.T, chainStore *CborBlockStore, key types.TipSetKey) *types.TipSet { + ts, err := chainStore.Store.GetTipSet(ctx, key) + require.NoError(t, err) + return ts +} + +type tipSetStateRootGetter interface { + GetTipSetStateRoot(context.Context, *types.TipSet) (cid.Cid, error) +} + +func requireGetTipSetStateRoot(ctx context.Context, t *testing.T, chainStore tipSetStateRootGetter, ts *types.TipSet) cid.Cid { + stateCid, err := chainStore.GetTipSetStateRoot(ctx, ts) + require.NoError(t, err) + return stateCid +} diff --git a/pkg/chain/testing.go b/pkg/chain/testing.go new file mode 100644 index 0000000000..1d63e91b35 --- /dev/null +++ b/pkg/chain/testing.go @@ -0,0 +1,950 @@ +package chain + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "sync" + "testing" + + aexchange "github.com/filecoin-project/venus/pkg/net/exchange" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + + "github.com/ipld/go-car" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/venus/fixtures/assets" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/testhelpers" + "github.com/filecoin-project/venus/pkg/util" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// Builder builds fake chains and acts as a provider and fetcher for the chain thus generated. +// All blocks are unique (even if they share parents) and form valid chains of parents and heights, +// but do not carry valid tickets. Each block contributes a weight of 1. +// state root CIDs are computed by an abstract StateBuilder. The default FakeStateBuilder produces +// state CIDs that are distinct but not CIDs of any real state tree. A more sophisticated +// builder could actually apply the messages to a state tree (not yet implemented). +// The builder is deterministic: two builders receiving the same sequence of calls will produce +// exactly the same chain. +type Builder struct { + t *testing.T + genesis *types.TipSet + store *Store + minerAddress address.Address + stateBuilder StateBuilder + stamper TimeStamper + repo repo.Repo + bs blockstoreutil.Blockstore + cstore cbor.IpldStore + mstore *MessageStore + seq uint64 // For unique tickets + eval *FakeStateEvaluator + + // Cache of the state root CID computed for each tipset key. + tipStateCids map[string]cid.Cid + + stmgr IStmgr + evaLock sync.Mutex +} + +func (f *Builder) IStmgr() IStmgr { + f.evaLock.Lock() + defer f.evaLock.Unlock() + return f.stmgr +} + +func (f *Builder) FakeStateEvaluator() *FakeStateEvaluator { + f.evaLock.Lock() + defer f.evaLock.Unlock() + + if f.eval != nil { + return f.eval + } + f.eval = &FakeStateEvaluator{ + ChainStore: f.store, + MessageStore: f.mstore, + ChsWorkingOn: make(map[types.TipSetKey]chan struct{}, 1), + } + return f.eval +} + +func (f *Builder) LoadTipSetMessage(ctx context.Context, ts *types.TipSet) ([]types.BlockMessagesInfo, error) { + // gather message + applied := make(map[address.Address]uint64) + selectMsg := func(m *types.Message) (bool, error) { + // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise + if _, ok := applied[m.From]; !ok { + applied[m.From] = m.Nonce + } + + if applied[m.From] != m.Nonce { + return false, nil + } + + applied[m.From]++ + + return true, nil + } + blockMsg := []types.BlockMessagesInfo{} + for i := 0; i < ts.Len(); i++ { + blk := ts.At(i) + secpMsgs, blsMsgs, err := f.LoadMetaMessages(ctx, blk.Messages) + if err != nil { + return nil, errors.Wrapf(err, "syncing tip %s failed loading message list %s for block %s", ts.Key(), blk.Messages, blk.Cid()) + } + + var sBlsMsg []types.ChainMsg + var sSecpMsg []types.ChainMsg + for _, msg := range blsMsgs { + b, err := selectMsg(msg) + if err != nil { + return nil, fmt.Errorf("failed to decide whether to select message for block: %w", err) + } + if b { + sBlsMsg = append(sBlsMsg, msg) + } + } + + for _, msg := range secpMsgs { + b, err := selectMsg(&msg.Message) + if err != nil { + return nil, fmt.Errorf("failed to decide whether to select message for block: %w", err) + } + if b { + sSecpMsg = append(sSecpMsg, msg) + } + } + + blockMsg = append(blockMsg, types.BlockMessagesInfo{ + BlsMessages: sBlsMsg, + SecpkMessages: sSecpMsg, + Block: blk, + }) + } + + return blockMsg, nil +} + +func (f *Builder) Cstore() cbor.IpldStore { + return f.cstore +} + +func (f *Builder) Genesis() *types.TipSet { + return f.genesis +} + +func (f *Builder) Mstore() *MessageStore { + return f.mstore +} + +func (f *Builder) BlockStore() blockstoreutil.Blockstore { + return f.bs +} + +func (f *Builder) Repo() repo.Repo { + return f.repo +} + +func (f *Builder) Store() *Store { + return f.store +} + +func (f *Builder) RemovePeer(peer peer.ID) {} + +var ( + _ BlockProvider = (*Builder)(nil) + _ TipSetProvider = (*Builder)(nil) + _ MessageProvider = (*Builder)(nil) +) + +type fakeStmgr struct { + cs *Store + eva *FakeStateEvaluator +} + +func (f *fakeStmgr) GetActorAt(ctx context.Context, a address.Address, set *types.TipSet) (*types.Actor, error) { + return f.cs.GetActorAt(ctx, set, a) +} + +func (f *fakeStmgr) RunStateTransition(ctx context.Context, set *types.TipSet) (root cid.Cid, receipts cid.Cid, err error) { + return f.eva.RunStateTransition(ctx, set) +} + +var _ IStmgr = &fakeStmgr{} + +// NewBuilder builds a new chain faker with default fake state building. +func NewBuilder(t *testing.T, miner address.Address) *Builder { + return NewBuilderWithDeps(t, miner, &FakeStateBuilder{}, &ZeroTimestamper{}) +} + +// NewBuilderWithDeps builds a new chain faker. +// Blocks will have `miner` set as the miner address, or a default if empty. +func NewBuilderWithDeps(t *testing.T, miner address.Address, sb StateBuilder, stamper TimeStamper) *Builder { + if miner.Empty() { + var err error + miner, err = address.NewSecp256k1Address([]byte("miner")) + require.NoError(t, err) + } + + repo := repo.NewInMemoryRepo() + bs := repo.Datastore() + ds := repo.ChainDatastore() + cst := cbor.NewCborStore(bs) + + b := &Builder{ + t: t, + minerAddress: miner, + stateBuilder: sb, + stamper: stamper, + repo: repo, + bs: bs, + cstore: cst, + mstore: NewMessageStore(bs, config.DefaultForkUpgradeParam), + tipStateCids: make(map[string]cid.Cid), + } + ctx := context.TODO() + _, err := b.mstore.StoreMessages(ctx, []*types.SignedMessage{}, []*types.Message{}) + require.NoError(t, err) + _, err = b.mstore.StoreReceipts(ctx, []types.MessageReceipt{}) + require.NoError(t, err) + // append genesis + nullState := testhelpers.CidFromString(t, "null") + b.tipStateCids[types.NewTipSetKey().String()] = nullState + + // create a fixed genesis + b.genesis = b.GeneratorGenesis() + b.store = NewStore(ds, bs, b.genesis.At(0).Cid(), NewMockCirculatingSupplyCalculator()) + + for _, block := range b.genesis.Blocks() { + // add block to cstore + _, err := b.cstore.Put(context.TODO(), block) + require.NoError(t, err) + } + + stateRoot, receiptRoot := b.genesis.Blocks()[0].ParentStateRoot, b.genesis.Blocks()[0].ParentMessageReceipts + + b.tipStateCids[b.genesis.Key().String()] = stateRoot + require.NoError(t, err) + tipsetMeta := &TipSetMetadata{ + TipSetStateRoot: stateRoot, + TipSet: b.genesis, + TipSetReceipts: receiptRoot, + } + require.NoError(t, b.store.PutTipSetMetadata(context.TODO(), tipsetMeta)) + err = b.store.SetHead(context.TODO(), b.genesis) + require.NoError(t, err) + + b.stmgr = &fakeStmgr{cs: b.store, eva: b.FakeStateEvaluator()} + + return b +} + +// AppendBlockOnBlocks creates and returns a new block child of `parents`, with no messages. +func (f *Builder) AppendBlockOnBlocks(ctx context.Context, parents ...*types.BlockHeader) *types.BlockHeader { + var tip *types.TipSet + if len(parents) > 0 { + tip = testhelpers.RequireNewTipSet(f.t, parents...) + } + return f.AppendBlockOn(ctx, tip) +} + +// AppendBlockOn creates and returns a new block child of `parent`, with no messages. +func (f *Builder) AppendBlockOn(ctx context.Context, parent *types.TipSet) *types.BlockHeader { + return f.Build(ctx, parent, 1, nil).At(0) +} + +// AppendOn creates and returns a new `width`-block tipset child of `parents`, with no messages. +func (f *Builder) AppendOn(ctx context.Context, parent *types.TipSet, width int) *types.TipSet { + return f.Build(ctx, parent, width, nil) +} + +func (f *Builder) FlushHead(ctx context.Context) error { + _, _, e := f.FakeStateEvaluator().RunStateTransition(ctx, f.store.GetHead()) + return e +} + +// AppendManyBlocksOnBlocks appends `height` blocks to the chain. +func (f *Builder) AppendManyBlocksOnBlocks(ctx context.Context, height int, parents ...*types.BlockHeader) *types.BlockHeader { + var tip *types.TipSet + if len(parents) > 0 { + tip = testhelpers.RequireNewTipSet(f.t, parents...) + } + return f.BuildManyOn(ctx, height, tip, nil).At(0) +} + +// AppendManyBlocksOn appends `height` blocks to the chain. +func (f *Builder) AppendManyBlocksOn(ctx context.Context, height int, parent *types.TipSet) *types.BlockHeader { + return f.BuildManyOn(ctx, height, parent, nil).At(0) +} + +// AppendManyOn appends `height` tipsets to the chain. +func (f *Builder) AppendManyOn(ctx context.Context, height int, parent *types.TipSet) *types.TipSet { + return f.BuildManyOn(ctx, height, parent, nil) +} + +// BuildOnBlock creates and returns a new block child of singleton tipset `parent`. See Build. +func (f *Builder) BuildOnBlock(ctx context.Context, parent *types.BlockHeader, build func(b *BlockBuilder)) *types.BlockHeader { + var tip *types.TipSet + if parent != nil { + tip = testhelpers.RequireNewTipSet(f.t, parent) + } + return f.BuildOneOn(ctx, tip, build).At(0) +} + +// BuildOneOn creates and returns a new single-block tipset child of `parent`. +func (f *Builder) BuildOneOn(ctx context.Context, parent *types.TipSet, build func(b *BlockBuilder)) *types.TipSet { + return f.Build(ctx, parent, 1, singleBuilder(build)) +} + +// BuildOn creates and returns a new `width` block tipset child of `parent`. +func (f *Builder) BuildOn(ctx context.Context, parent *types.TipSet, width int, build func(b *BlockBuilder, i int)) *types.TipSet { + return f.Build(ctx, parent, width, build) +} + +// BuildManyOn builds a chain by invoking Build `height` times. +func (f *Builder) BuildManyOn(ctx context.Context, height int, parent *types.TipSet, build func(b *BlockBuilder)) *types.TipSet { + require.True(f.t, height > 0, "") + for i := 0; i < height; i++ { + parent = f.Build(ctx, parent, 1, singleBuilder(build)) + } + return parent +} + +// Build creates and returns a new tipset child of `parent`. +// The tipset carries `width` > 0 blocks with the same height and parents, but different tickets. +// Note: the blocks will all have the same miner, which is unrealistic and forbidden by consensus; +// generalise this to random miner addresses when that is rejected by the syncer. +// The `build` function is invoked to modify the block before it is stored. +func (f *Builder) Build(ctx context.Context, parent *types.TipSet, width int, build func(b *BlockBuilder, i int)) *types.TipSet { + tip := f.BuildOrphaTipset(parent, width, build) + + for _, block := range tip.Blocks() { + // add block to cstore + _, err := f.cstore.Put(context.TODO(), block) + require.NoError(f.t, err) + } + + // Compute and remember state for the tipset. + stateRoot, _ := f.ComputeState(ctx, tip) + f.tipStateCids[tip.Key().String()] = stateRoot + return tip +} + +func (f *Builder) BuildOrphaTipset(parent *types.TipSet, width int, build func(b *BlockBuilder, i int)) *types.TipSet { + require.True(f.t, width > 0) + var blocks []*types.BlockHeader + height := abi.ChainEpoch(0) + if parent.Defined() { + var err error + height = parent.At(0).Height + 1 + require.NoError(f.t, err) + } else { + parent = types.UndefTipSet + } + + parentWeight, err := f.stateBuilder.Weigh(context.TODO(), parent) + require.NoError(f.t, err) + + emptyBLSSig := crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte(""), + // Data: (*bls.Aggregate([]bls.Signature{}))[:], + } + + for i := 0; i < width; i++ { + ticket := types.Ticket{} + ticket.VRFProof = make([]byte, binary.Size(f.seq)) + binary.BigEndian.PutUint64(ticket.VRFProof, f.seq) + f.seq++ + + b := &types.BlockHeader{ + Ticket: &ticket, + Miner: f.minerAddress, + BeaconEntries: nil, + ParentWeight: parentWeight, + Parents: parent.Key().Cids(), + Height: height, + Messages: testhelpers.EmptyTxMetaCID, + ParentMessageReceipts: testhelpers.EmptyReceiptsCID, + BLSAggregate: &emptyBLSSig, + // Omitted fields below + // ParentStateRoot: stateRoot, + // EPoStInfo: ePoStInfo, + // ForkSignaling: forkSig, + Timestamp: f.stamper.Stamp(height), + BlockSig: &crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: []byte{}}, + ElectionProof: &types.ElectionProof{VRFProof: []byte{0x0c, 0x0d}, WinCount: int64(10)}, + } + + if build != nil { + build(&BlockBuilder{b, f.t, f.mstore}, i) + } + + // Compute state root for this block. + ctx := context.Background() + prevState := f.StateForKey(ctx, parent.Key()) + smsgs, umsgs, err := f.mstore.LoadMetaMessages(ctx, b.Messages) + require.NoError(f.t, err) + + var sBlsMsg []types.ChainMsg + var sSecpMsg []types.ChainMsg + for _, m := range umsgs { + sBlsMsg = append(sBlsMsg, m) + } + + for _, m := range smsgs { + sSecpMsg = append(sSecpMsg, m) + } + blkMsgInfo := types.BlockMessagesInfo{ + BlsMessages: sBlsMsg, + SecpkMessages: sSecpMsg, + Block: b, + } + stateRootRaw, _, err := f.stateBuilder.ComputeState(prevState, []types.BlockMessagesInfo{blkMsgInfo}) + require.NoError(f.t, err) + b.ParentStateRoot = stateRootRaw + + blocks = append(blocks, b) + } + return testhelpers.RequireNewTipSet(f.t, blocks...) +} + +// StateForKey loads (or computes) the state root for a tipset key. +func (f *Builder) StateForKey(ctx context.Context, key types.TipSetKey) cid.Cid { + state, found := f.tipStateCids[key.String()] + if found { + return state + } + // No state yet computed for this tip (perhaps because the blocks in it have not previously + // been considered together as a tipset). + tip, err := f.GetTipSet(ctx, key) + require.NoError(f.t, err) + state, _ = f.ComputeState(ctx, tip) + return state +} + +// GetBlockstoreValue gets data straight out of the underlying blockstore by cid +func (f *Builder) GetBlockstoreValue(ctx context.Context, c cid.Cid) (blocks.Block, error) { + return f.bs.Get(ctx, c) +} + +// ComputeState computes the state for a tipset from its parent state. +func (f *Builder) ComputeState(ctx context.Context, tip *types.TipSet) (cid.Cid, []types.MessageReceipt) { + parentKey := tip.Parents() + // Load the state of the parent tipset and compute the required state (recursively). + prev := f.StateForKey(ctx, parentKey) + blockMsgInfo := f.tipMessages(tip) + state, receipt, err := f.stateBuilder.ComputeState(prev, blockMsgInfo) + require.NoError(f.t, err) + return state, receipt +} + +// tipMessages returns the messages of a tipset. Each block's messages are +// grouped into a slice and a slice of these slices is returned. +func (f *Builder) tipMessages(tip *types.TipSet) []types.BlockMessagesInfo { + ctx := context.Background() + var blockMessageInfos []types.BlockMessagesInfo + for i := 0; i < tip.Len(); i++ { + smsgs, blsMsg, err := f.mstore.LoadMetaMessages(ctx, tip.At(i).Messages) + require.NoError(f.t, err) + + var sBlsMsg []types.ChainMsg + var sSecpMsg []types.ChainMsg + for _, m := range blsMsg { + sBlsMsg = append(sBlsMsg, m) + } + + for _, m := range smsgs { + sSecpMsg = append(sSecpMsg, m) + } + blockMessageInfos = append(blockMessageInfos, types.BlockMessagesInfo{ + BlsMessages: sBlsMsg, + SecpkMessages: sSecpMsg, + Block: tip.At(i), + }) + } + return blockMessageInfos +} + +// Wraps a simple build function in one that also accepts an index, propagating a nil function. +func singleBuilder(build func(b *BlockBuilder)) func(b *BlockBuilder, i int) { + if build == nil { + return nil + } + return func(b *BlockBuilder, i int) { build(b) } +} + +// /// BlockHeader builder ///// + +// BlockBuilder mutates blocks as they are generated. +type BlockBuilder struct { + block *types.BlockHeader + t *testing.T + messages *MessageStore +} + +// SetTicket sets the block's ticket. +func (bb *BlockBuilder) SetTicket(raw []byte) { + bb.block.Ticket = &types.Ticket{VRFProof: raw} +} + +// SetTimestamp sets the block's timestamp. +func (bb *BlockBuilder) SetTimestamp(timestamp uint64) { + bb.block.Timestamp = timestamp +} + +// IncHeight increments the block's height, implying a number of null blocks before this one +// is mined. +func (bb *BlockBuilder) IncHeight(nullBlocks abi.ChainEpoch) { + bb.block.Height += nullBlocks +} + +// SetBlockSig set a new signature +func (bb *BlockBuilder) SetBlockSig(signature crypto.Signature) { + bb.block.BlockSig = &signature +} + +// AddMessages adds a message & receipt collection to the block. +func (bb *BlockBuilder) AddMessages(secpmsgs []*types.SignedMessage, blsMsgs []*types.Message) { + ctx := context.Background() + + meta, err := bb.messages.StoreMessages(ctx, secpmsgs, blsMsgs) + require.NoError(bb.t, err) + + bb.block.Messages = meta +} + +// SetStateRoot sets the block's state root. +func (bb *BlockBuilder) SetStateRoot(root cid.Cid) { + bb.block.ParentStateRoot = root +} + +// /// state builder ///// + +// StateBuilder abstracts the computation of state root CIDs from the chain builder. +type StateBuilder interface { + ComputeState(prev cid.Cid, blockmsg []types.BlockMessagesInfo) (cid.Cid, []types.MessageReceipt, error) + Weigh(ctx context.Context, tip *types.TipSet) (big.Int, error) +} + +// FakeStateBuilder computes a fake state CID by hashing the CIDs of a block's parents and messages. +type FakeStateBuilder struct{} + +// ComputeState computes a fake state from a previous state root CID and the messages contained +// in list-of-lists of messages in blocks. Note that if there are no messages, the resulting state +// is the same as the input state. +// This differs from the true state transition function in that messages that are duplicated +// between blocks in the tipset are not ignored. +func (FakeStateBuilder) ComputeState(prev cid.Cid, blockmsg []types.BlockMessagesInfo) (cid.Cid, []types.MessageReceipt, error) { + receipts := []types.MessageReceipt{} + + // Accumulate the cids of the previous state and of all messages in the tipset. + inputs := []cid.Cid{prev} + for _, blockMessages := range blockmsg { + for _, msg := range append(blockMessages.BlsMessages, blockMessages.SecpkMessages...) { + mCId := msg.Cid() + inputs = append(inputs, mCId) + receipts = append(receipts, types.MessageReceipt{ + ExitCode: 0, + Return: mCId.Bytes(), + GasUsed: 3, + }) + } + } + + if len(inputs) == 1 { + // If there are no messages, the state doesn't change! + return prev, receipts, nil + } + + root, err := util.MakeCid(inputs) + if err != nil { + return cid.Undef, []types.MessageReceipt{}, err + } + return root, receipts, nil +} + +// Weigh computes a tipset's weight as its parent weight plus one for each block in the tipset. +func (FakeStateBuilder) Weigh(context context.Context, tip *types.TipSet) (big.Int, error) { + parentWeight := big.Zero() + if tip.Defined() { + parentWeight = tip.ParentWeight() + } + + return big.Add(parentWeight, big.NewInt(int64(tip.Len()))), nil +} + +// /// Timestamper ///// + +// TimeStamper is an object that timestamps blocks +type TimeStamper interface { + Stamp(abi.ChainEpoch) uint64 +} + +// ZeroTimestamper writes a default of 0 to the timestamp +type ZeroTimestamper struct{} + +// Stamp returns a stamp for the current block +func (zt *ZeroTimestamper) Stamp(height abi.ChainEpoch) uint64 { + return uint64(0) +} + +// ClockTimestamper writes timestamps based on a blocktime and genesis time +type ClockTimestamper struct { + c clock.ChainEpochClock +} + +// NewClockTimestamper makes a new stamper for creating production valid timestamps +func NewClockTimestamper(chainClock clock.ChainEpochClock) *ClockTimestamper { + return &ClockTimestamper{ + c: chainClock, + } +} + +// Stamp assigns a valid timestamp given genesis time and block time to +// a block of the provided height. +func (ct *ClockTimestamper) Stamp(height abi.ChainEpoch) uint64 { + startTime := ct.c.StartTimeOfEpoch(height) + + return uint64(startTime.Unix()) +} + +// /// state evaluator ///// + +// FakeStateEvaluator is a syncStateEvaluator that delegates to the FakeStateBuilder. +type FakeStateEvaluator struct { + ChainStore *Store + MessageStore *MessageStore + FakeStateBuilder + ChsWorkingOn map[types.TipSetKey]chan struct{} + stLk sync.Mutex +} + +// RunStateTransition delegates to StateBuilder.ComputeState +func (e *FakeStateEvaluator) RunStateTransition(ctx context.Context, ts *types.TipSet) (rootCid cid.Cid, receiptCid cid.Cid, err error) { + key := ts.Key() + e.stLk.Lock() + workingCh, exist := e.ChsWorkingOn[key] + + if exist { + e.stLk.Unlock() + select { + case <-workingCh: + e.stLk.Lock() + case <-ctx.Done(): + return cid.Undef, cid.Undef, ctx.Err() + } + } + if m, _ := e.ChainStore.LoadTipsetMetadata(ctx, ts); m != nil { + e.stLk.Unlock() + return m.TipSetStateRoot, m.TipSetReceipts, nil + } + + workingCh = make(chan struct{}) + e.ChsWorkingOn[key] = workingCh + e.stLk.Unlock() + + defer func() { + e.stLk.Lock() + delete(e.ChsWorkingOn, key) + if err == nil { + _ = e.ChainStore.PutTipSetMetadata(ctx, &TipSetMetadata{ + TipSetStateRoot: rootCid, + TipSet: ts, + TipSetReceipts: receiptCid, + }) + } + e.stLk.Unlock() + close(workingCh) + }() + + // gather message + blockMessageInfo, err := e.MessageStore.LoadTipSetMessage(ctx, ts) + if err != nil { + return cid.Undef, cid.Undef, fmt.Errorf("failed to gather message in tipset %v", err) + } + var receipts []types.MessageReceipt + rootCid, receipts, err = e.ComputeState(ts.At(0).ParentStateRoot, blockMessageInfo) + if err != nil { + return cid.Undef, cid.Undef, errors.Wrap(err, "error compute state") + } + + receiptCid, err = e.MessageStore.StoreReceipts(ctx, receipts) + if err != nil { + return cid.Undef, cid.Undef, fmt.Errorf("failed to save receipt: %v", err) + } + + return rootCid, receiptCid, nil +} + +func (e *FakeStateEvaluator) ValidateFullBlock(ctx context.Context, blk *types.BlockHeader) error { + parent, err := e.ChainStore.GetTipSet(ctx, types.NewTipSetKey(blk.Parents...)) + if err != nil { + return err + } + + root, receipts, err := e.RunStateTransition(ctx, parent) + if err != nil { + return err + } + + return e.ChainStore.PutTipSetMetadata(ctx, &TipSetMetadata{ + TipSetStateRoot: root, + TipSet: parent, + TipSetReceipts: receipts, + }) +} + +// /// Chain selector ///// + +// FakeChainSelector is a syncChainSelector that delegates to the FakeStateBuilder +type FakeChainSelector struct { + FakeStateBuilder +} + +// IsHeavier compares chains weighed with StateBuilder.Weigh. +func (e *FakeChainSelector) IsHeavier(ctx context.Context, a, b *types.TipSet) (bool, error) { + aw, err := e.Weigh(ctx, a) + if err != nil { + return false, err + } + bw, err := e.Weigh(ctx, b) + if err != nil { + return false, err + } + return aw.GreaterThan(bw), nil +} + +// Weight delegates to the statebuilder +func (e *FakeChainSelector) Weight(ctx context.Context, ts *types.TipSet) (big.Int, error) { + return e.Weigh(ctx, ts) +} + +// /// Interface and accessor implementations ///// + +// GetBlock returns the block identified by `c`. +func (f *Builder) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) { + var block types.BlockHeader + if err := f.cstore.Get(ctx, c, &block); err != nil { + return nil, err + } + return &block, nil +} + +// GetBlocks returns the blocks identified by `cids`. +func (f *Builder) GetBlocksByIds(ctx context.Context, cids []cid.Cid) ([]*types.BlockHeader, error) { + ret := make([]*types.BlockHeader, len(cids)) + for i, c := range cids { + var block types.BlockHeader + if err := f.cstore.Get(ctx, c, &block); err != nil { + return nil, err + } + ret[i] = &block + } + return ret, nil +} + +// GetTipSet returns the tipset identified by `key`. +func (f *Builder) GetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + var blocks []*types.BlockHeader + for _, bid := range key.Cids() { + var blk types.BlockHeader + if err := f.cstore.Get(ctx, bid, &blk); err != nil { + return nil, fmt.Errorf("no block %s", bid) + } + blocks = append(blocks, &blk) + } + return types.NewTipSet(blocks) +} + +// FetchTipSets fetchs the tipset at `tsKey` from the fetchers blockStore backed by the Builder. +func (f *Builder) FetchTipSets(ctx context.Context, key types.TipSetKey, from peer.ID, done func(t *types.TipSet) (bool, error)) ([]*types.TipSet, error) { + var tips []*types.TipSet + for { + tip, err := f.GetTipSet(ctx, key) + if err != nil { + return nil, err + } + tips = append(tips, tip) + ok, err := done(tip) + if err != nil { + return nil, err + } + if ok { + break + } + key = tip.Parents() + } + return tips, nil +} + +// FetchTipSetHeaders fetchs the tipset at `tsKey` from the fetchers blockStore backed by the Builder. +func (f *Builder) FetchTipSetHeaders(ctx context.Context, key types.TipSetKey, from peer.ID, done func(t *types.TipSet) (bool, error)) ([]*types.TipSet, error) { + return f.FetchTipSets(ctx, key, from, done) +} + +// GetTipSetStateRoot returns the state root that was computed for a tipset. +func (f *Builder) GetTipSetStateRoot(key types.TipSetKey) (cid.Cid, error) { + found, ok := f.tipStateCids[key.String()] + if !ok { + return cid.Undef, errors.Errorf("no state for %s", key) + } + return found, nil +} + +func (f *Builder) GetTipSetByHeight(ctx context.Context, ts *types.TipSet, h abi.ChainEpoch, prev bool) (*types.TipSet, error) { + if !ts.Defined() { + return ts, nil + } + if epoch := ts.Height(); epoch == h { + return ts, nil + } + + for { + ts = f.RequireTipSet(ctx, ts.Parents()) + height := ts.Height() + if height >= 0 && height == h { + return ts, nil + } else if height < h { + return ts, nil + } + } +} + +// RequireTipSet returns a tipset by key, which must exist. +func (f *Builder) RequireTipSet(ctx context.Context, key types.TipSetKey) *types.TipSet { + tip, err := f.GetTipSet(ctx, key) + require.NoError(f.t, err) + return tip +} + +// RequireTipSets returns a chain of tipsets from key, which must exist and be long enough. +func (f *Builder) RequireTipSets(ctx context.Context, head types.TipSetKey, count int) []*types.TipSet { + var tips []*types.TipSet + for i := 0; i < count; i++ { + tip := f.RequireTipSet(ctx, head) + tips = append(tips, tip) + head = tip.Parents() + } + return tips +} + +// LoadReceipts returns the message collections tracked by the builder. +func (f *Builder) LoadReceipts(ctx context.Context, c cid.Cid) ([]types.MessageReceipt, error) { + return f.mstore.LoadReceipts(ctx, c) +} + +// LoadTxMeta returns the tx meta wrapper tracked by the builder. +func (f *Builder) LoadTxMeta(ctx context.Context, metaCid cid.Cid) (types.MessageRoot, error) { + return f.mstore.LoadTxMeta(ctx, metaCid) +} + +// StoreReceipts stores message receipts and returns a commitment. +func (f *Builder) StoreReceipts(ctx context.Context, receipts []types.MessageReceipt) (cid.Cid, error) { + return f.mstore.StoreReceipts(ctx, receipts) +} + +// StoreTxMeta stores a tx meta +func (f *Builder) StoreTxMeta(ctx context.Context, meta types.MessageRoot) (cid.Cid, error) { + return f.mstore.StoreTxMeta(ctx, meta) +} + +func (f *Builder) LoadUnsignedMessagesFromCids(ctx context.Context, blsCids []cid.Cid) ([]*types.Message, error) { + return f.mstore.LoadUnsignedMessagesFromCids(ctx, blsCids) +} + +func (f *Builder) LoadSignedMessagesFromCids(ctx context.Context, secpCids []cid.Cid) ([]*types.SignedMessage, error) { + return f.mstore.LoadSignedMessagesFromCids(ctx, secpCids) +} + +// LoadMessages returns the message collections tracked by the builder. +func (f *Builder) LoadMetaMessages(ctx context.Context, metaCid cid.Cid) ([]*types.SignedMessage, []*types.Message, error) { + return f.mstore.LoadMetaMessages(ctx, metaCid) +} + +func (f *Builder) ReadMsgMetaCids(ctx context.Context, metaCid cid.Cid) ([]cid.Cid, []cid.Cid, error) { + return f.mstore.ReadMsgMetaCids(ctx, metaCid) +} + +// /// exchange ///// +func (f *Builder) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { + ts, err := f.GetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + result := []*types.TipSet{ts} + for i := 1; i < count; i++ { + if ts.Height() == 0 { + break + } + ts, err = f.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, err + } + result = append(result, ts) + } + return result, nil +} + +func (f *Builder) GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*exchange.CompactedMessages, error) { + result := []*exchange.CompactedMessages{} + for _, ts := range tipsets { + bmsgs, bmincl, smsgs, smincl, err := aexchange.GatherMessages(ctx, f, f.mstore, ts) + if err != nil { + return nil, err + } + compactMsg := &exchange.CompactedMessages{} + compactMsg.Bls = bmsgs + compactMsg.BlsIncludes = bmincl + compactMsg.Secpk = smsgs + compactMsg.SecpkIncludes = smincl + result = append(result, compactMsg) + } + return result, nil +} + +func (f *Builder) GetFullTipSet(ctx context.Context, peer []peer.ID, tsk types.TipSetKey) (*types.FullTipSet, error) { + panic("implement me") +} + +func (f *Builder) AddPeer(peer peer.ID) {} + +func (f *Builder) GeneratorGenesis() *types.TipSet { + b, err := assets.GetGenesis(types.NetworkCalibnet) + require.NoError(f.t, err) + source := io.NopCloser(bytes.NewReader(b)) + + ch, err := car.LoadCar(context.Background(), f.bs, source) + require.NoError(f.t, err) + + // need to check if we are being handed a car file with a single genesis block or an entire chain. + bsBlk, err := f.bs.Get(context.Background(), ch.Roots[0]) + require.NoError(f.t, err) + + cur, err := types.DecodeBlock(bsBlk.RawData()) + require.NoError(f.t, err) + + ts, err := types.NewTipSet([]*types.BlockHeader{cur}) + require.NoError(f.t, err) + + return ts +} diff --git a/pkg/chain/tip_index.go b/pkg/chain/tip_index.go new file mode 100644 index 0000000000..dbf44d4130 --- /dev/null +++ b/pkg/chain/tip_index.go @@ -0,0 +1,122 @@ +package chain + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + "github.com/pkg/errors" +) + +// ErrNotFound is returned when the key for a "Get" lookup is not in the index. +var ErrNotFound = errors.New("Key not found in tipindex") + +// TipSetMetadata is the type stored at the leaves of the TipStateCache. It contains +// a tipset pointing to blocks, the root cid of the chain's state after +// applying the messages in this tipset to it's parent state, and the cid of the receipts +// for these messages. +type TipSetMetadata struct { + // TipSetStateRoot is the root of aggregate state after applying tipset + TipSetStateRoot cid.Cid + + // TipSet is the set of blocks that forms the tip set + TipSet *types.TipSet + + // TipSetReceipts receipts from all message contained within this tipset + TipSetReceipts cid.Cid +} + +type tipLoader interface { + GetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) + LoadTipsetMetadata(ctx context.Context, ts *types.TipSet) (*TipSetMetadata, error) +} + +// TipStateCache tracks tipsets and their states by tipset block ids. +// All methods are thread safe. +type TipStateCache struct { + // cache allows lookup of recorded TipSet states by TipSet ID. + cache map[string]TSState + + loader tipLoader + + l sync.RWMutex +} + +// NewTipStateCache is the TipStateCache constructor. +func NewTipStateCache(loader tipLoader) *TipStateCache { + return &TipStateCache{ + cache: make(map[string]TSState, 2880), // one day height + loader: loader, + } +} + +// Put adds an entry to both of TipStateCache's internal indexes. +// After this call the input TipSetMetadata can be looked up by the ID of the tipset. +func (ti *TipStateCache) Put(tsm *TipSetMetadata) { + ti.l.Lock() + defer ti.l.Unlock() + + ti.cache[tsm.TipSet.String()] = TSState{ + StateRoot: tsm.TipSetStateRoot, + Receipts: tsm.TipSetReceipts, + } +} + +// Get returns the tipset given by the input ID and its state. +func (ti *TipStateCache) Get(ctx context.Context, ts *types.TipSet) (TSState, error) { + ti.l.RLock() + state, ok := ti.cache[ts.String()] + ti.l.RUnlock() + if !ok { + tipSetMetadata, err := ti.loader.LoadTipsetMetadata(ctx, ts) + if err != nil { + return TSState{}, errors.New("state not exit") + } + ti.Put(tipSetMetadata) + + return TSState{ + StateRoot: tipSetMetadata.TipSetStateRoot, + Receipts: tipSetMetadata.TipSetReceipts, + }, nil + } + return state, nil +} + +// GetTipSetStateRoot returns the tipsetStateRoot from func (ti *TipStateCache) Get(tsKey string). +func (ti *TipStateCache) GetTipSetStateRoot(ctx context.Context, ts *types.TipSet) (cid.Cid, error) { + state, err := ti.Get(ctx, ts) + if err != nil { + return cid.Cid{}, err + } + return state.StateRoot, nil +} + +// GetTipSetReceiptsRoot returns the tipsetReceipts from func (ti *TipStateCache) Get(tsKey string). +func (ti *TipStateCache) GetTipSetReceiptsRoot(ctx context.Context, ts *types.TipSet) (cid.Cid, error) { + state, err := ti.Get(ctx, ts) + if err != nil { + return cid.Cid{}, err + } + return state.Receipts, nil +} + +// Has returns true iff the tipset with the input ID is stored in +// the TipStateCache. +func (ti *TipStateCache) Has(ctx context.Context, ts *types.TipSet) bool { + _, err := ti.Get(ctx, ts) + return err == nil +} + +func (ti *TipStateCache) Del(ts *types.TipSet) { + ti.l.Lock() + defer ti.l.Unlock() + delete(ti.cache, ts.String()) +} + +// makeKey returns a unique string for every parent set key and height input +func makeKey(pKey string, h abi.ChainEpoch) string { + return fmt.Sprintf("p-%s h-%d", pKey, h) +} diff --git a/pkg/chain/traversal.go b/pkg/chain/traversal.go new file mode 100644 index 0000000000..a434f3b81c --- /dev/null +++ b/pkg/chain/traversal.go @@ -0,0 +1,195 @@ +package chain + +import ( + "context" + "errors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +// TipSetProvider provides tipsets for traversal. +type TipSetProvider interface { + GetTipSet(ctx context.Context, tsKey types.TipSetKey) (*types.TipSet, error) +} + +// IterAncestors returns an iterator over tipset ancestors, yielding first the start tipset and +// then its parent tipsets until (and including) the genesis tipset. +func IterAncestors(ctx context.Context, store TipSetProvider, start *types.TipSet) *TipsetIterator { + return &TipsetIterator{ctx, store, start} +} + +// TipsetIterator is an iterator over tipsets. +type TipsetIterator struct { + ctx context.Context + store TipSetProvider + value *types.TipSet +} + +// Value returns the iterator's current value, if not Complete(). +func (it *TipsetIterator) Value() *types.TipSet { + return it.value +} + +// Complete tests whether the iterator is exhausted. +func (it *TipsetIterator) Complete() bool { + return !it.value.Defined() +} + +// Next advances the iterator to the next value. +func (it *TipsetIterator) Next(ctx context.Context) error { + select { + case <-it.ctx.Done(): + return it.ctx.Err() + default: + if it.value.Height() == 0 { + it.value = &types.TipSet{} + } else { + var err error + parentKey := it.value.Parents() + it.value, err = it.store.GetTipSet(ctx, parentKey) + return err + } + return nil + } +} + +// BlockProvider provides blocks. +type BlockProvider interface { + GetBlock(ctx context.Context, cid cid.Cid) (*types.BlockHeader, error) +} + +// LoadTipSetBlocks loads all the blocks for a tipset from the store. +func LoadTipSetBlocks(ctx context.Context, store BlockProvider, key types.TipSetKey) (*types.TipSet, error) { + var blocks []*types.BlockHeader + for _, bid := range key.Cids() { + blk, err := store.GetBlock(ctx, bid) + if err != nil { + return nil, err + } + blocks = append(blocks, blk) + } + return types.NewTipSet(blocks) +} + +type tipsetFromBlockProvider struct { + ctx context.Context // Context to use when loading blocks + blocks BlockProvider // Provides blocks +} + +// TipSetProviderFromBlocks builds a tipset provider backed by a block provider. +// Blocks will be loaded with the provided context, since GetTipSet does not accept a +// context parameter. This can and should be removed when GetTipSet does take a context. +func TipSetProviderFromBlocks(ctx context.Context, blocks BlockProvider) TipSetProvider { + return &tipsetFromBlockProvider{ctx, blocks} +} + +// GetTipSet loads the blocks for a tipset. +func (p *tipsetFromBlockProvider) GetTipSet(ctx context.Context, tsKey types.TipSetKey) (*types.TipSet, error) { + return LoadTipSetBlocks(p.ctx, p.blocks, tsKey) +} + +// CollectTipsToCommonAncestor traverses chains from two tipsets (called old and new) until their common +// ancestor, collecting all tipsets that are in one chain but not the other. +// The resulting lists of tipsets are ordered by decreasing height. +func CollectTipsToCommonAncestor(ctx context.Context, store TipSetProvider, oldHead, newHead *types.TipSet) (oldTips, newTips []*types.TipSet, err error) { + oldIter := IterAncestors(ctx, store, oldHead) + newIter := IterAncestors(ctx, store, newHead) + + commonAncestor, err := FindCommonAncestor(ctx, oldIter, newIter) + if err != nil { + return + } + commonHeight := commonAncestor.Height() + + // Refresh iterators modified by FindCommonAncestors + oldIter = IterAncestors(ctx, store, oldHead) + newIter = IterAncestors(ctx, store, newHead) + + // Add 1 to the height argument so that the common ancestor is not + // included in the outputs. + oldTips, err = CollectTipSetsOfHeightAtLeast(ctx, oldIter, commonHeight+1) + if err != nil { + return + } + newTips, err = CollectTipSetsOfHeightAtLeast(ctx, newIter, commonHeight+1) + return +} + +// ErrNoCommonAncestor is returned when two chains assumed to have a common ancestor do not. +var ErrNoCommonAncestor = errors.New("no common ancestor") + +// FindCommonAncestor returns the common ancestor of the two tipsets pointed to +// by the input iterators. If they share no common ancestor ErrNoCommonAncestor +// will be returned. +func FindCommonAncestor(ctx context.Context, leftIter, rightIter *TipsetIterator) (*types.TipSet, error) { + for !rightIter.Complete() && !leftIter.Complete() { + left := leftIter.Value() + right := rightIter.Value() + + leftHeight := left.Height() + rightHeight := right.Height() + + // Found common ancestor. + if left.Equals(right) { + return left, nil + } + + // Update the pointers. Pointers move back one tipset if they + // point to a tipset at the same height or higher than the + // other pointer's tipset. + if rightHeight >= leftHeight { + if err := rightIter.Next(ctx); err != nil { + return nil, err + } + } + + if leftHeight >= rightHeight { + if err := leftIter.Next(ctx); err != nil { + return nil, err + } + } + } + return nil, ErrNoCommonAncestor +} + +// CollectTipSetsOfHeightAtLeast collects all tipsets with a height greater +// than or equal to minHeight from the input tipset. +func CollectTipSetsOfHeightAtLeast(ctx context.Context, iterator *TipsetIterator, minHeight abi.ChainEpoch) ([]*types.TipSet, error) { + var ret []*types.TipSet + var err error + var h abi.ChainEpoch + for ; !iterator.Complete(); err = iterator.Next(ctx) { + if err != nil { + return nil, err + } + h = iterator.Value().Height() + if h < minHeight { + return ret, nil + } + ret = append(ret, iterator.Value()) + } + return ret, nil +} + +// FindLatestDRAND returns the latest DRAND entry in the chain beginning at start +func FindLatestDRAND(ctx context.Context, start *types.TipSet, reader TipSetProvider) (*types.BeaconEntry, error) { + iterator := IterAncestors(ctx, reader, start) + var err error + for ; !iterator.Complete(); err = iterator.Next(ctx) { + if err != nil { + return nil, err + } + ts := iterator.Value() + // DRAND entries must be the same for all blocks on the tipset as + // an invariant of the tipset provider + + entries := ts.At(0).BeaconEntries + if len(entries) > 0 { + return &entries[len(entries)-1], nil + } + // No entries, simply move on to the next ancestor + } + return nil, errors.New("no DRAND entries in chain") +} diff --git a/pkg/chain/traversal_test.go b/pkg/chain/traversal_test.go new file mode 100644 index 0000000000..71d79140fe --- /dev/null +++ b/pkg/chain/traversal_test.go @@ -0,0 +1,231 @@ +package chain_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/chain" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func TestIterAncestors(t *testing.T) { + tf.UnitTest(t) + miner, err := address.NewSecp256k1Address([]byte("address")) + require.NoError(t, err) + + t.Run("iterates", func(t *testing.T) { + ctx := context.Background() + store := chain.NewBuilder(t, miner) + + root := store.AppendBlockOnBlocks(ctx) + b11 := store.AppendBlockOnBlocks(ctx, root) + b12 := store.AppendBlockOnBlocks(ctx, root) + b21 := store.AppendBlockOnBlocks(ctx, b11, b12) + + t0 := testhelpers.RequireNewTipSet(t, root) + t1 := testhelpers.RequireNewTipSet(t, b11, b12) + t2 := testhelpers.RequireNewTipSet(t, b21) + + it := chain.IterAncestors(ctx, store, t2) + assert.False(t, it.Complete()) + assert.True(t, t2.Equals(it.Value())) + + assert.NoError(t, it.Next(ctx)) + assert.False(t, it.Complete()) + assert.True(t, t1.Equals(it.Value())) + + assert.NoError(t, it.Next(ctx)) + assert.False(t, it.Complete()) + assert.True(t, t0.Equals(it.Value())) + + assert.NoError(t, it.Next(ctx)) + assert.True(t, it.Complete()) + }) + + t.Run("respects context", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + store := chain.NewBuilder(t, miner) + + root := store.AppendBlockOnBlocks(ctx) + b11 := store.AppendBlockOnBlocks(ctx, root) + b12 := store.AppendBlockOnBlocks(ctx, root) + b21 := store.AppendBlockOnBlocks(ctx, b11, b12) + + testhelpers.RequireNewTipSet(t, root) + t1 := testhelpers.RequireNewTipSet(t, b11, b12) + t2 := testhelpers.RequireNewTipSet(t, b21) + + it := chain.IterAncestors(ctx, store, t2) + assert.False(t, it.Complete()) + assert.True(t, t2.Equals(it.Value())) + + assert.NoError(t, it.Next(ctx)) + assert.False(t, it.Complete()) + assert.True(t, t1.Equals(it.Value())) + + cancel() + + assert.Error(t, it.Next(ctx)) + }) +} + +// Happy path +func TestCollectTipSetsOfHeightAtLeast(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + + chainLen := 15 + head := builder.AppendManyOn(ctx, chainLen, types.UndefTipSet) + + stopHeight := abi.ChainEpoch(4) + iterator := chain.IterAncestors(ctx, builder, head) + tipsets, err := chain.CollectTipSetsOfHeightAtLeast(ctx, iterator, stopHeight) + assert.NoError(t, err) + latestHeight := tipsets[0].Height() + assert.Equal(t, abi.ChainEpoch(14), latestHeight) + earliestHeight := tipsets[len(tipsets)-1].Height() + assert.Equal(t, abi.ChainEpoch(4), earliestHeight) + assert.Equal(t, 11, len(tipsets)) +} + +// Height at least 0. +func TestCollectTipSetsOfHeightAtLeastZero(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + + chainLen := 25 + head := builder.AppendManyOn(ctx, chainLen, types.UndefTipSet) + + stopHeight := abi.ChainEpoch(0) + iterator := chain.IterAncestors(ctx, builder, head) + tipsets, err := chain.CollectTipSetsOfHeightAtLeast(ctx, iterator, stopHeight) + assert.NoError(t, err) + latestHeight := tipsets[0].Height() + assert.Equal(t, abi.ChainEpoch(24), latestHeight) + earliestHeight := tipsets[len(tipsets)-1].Height() + assert.Equal(t, abi.ChainEpoch(0), earliestHeight) + assert.Equal(t, chainLen, len(tipsets)) +} + +// The starting epoch is a null block. +func TestCollectTipSetsOfHeightAtLeastStartingEpochIsNull(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + head := builder.Genesis() + + // Add 30 tipsets to the head of the chainStore. + head = builder.AppendManyOn(ctx, 30, head) + + // Now add 10 null blocks and 1 tipset. + head = builder.BuildOneOn(ctx, head, func(b *chain.BlockBuilder) { + b.IncHeight(10) + }) + + // Now add 19 more tipsets. + head = builder.AppendManyOn(ctx, 19, head) + + stopHeight := abi.ChainEpoch(35) + iterator := chain.IterAncestors(ctx, builder, head) + tipsets, err := chain.CollectTipSetsOfHeightAtLeast(ctx, iterator, stopHeight) + assert.NoError(t, err) + latestHeight := tipsets[0].Height() + assert.Equal(t, abi.ChainEpoch(60), latestHeight) + earliestHeight := tipsets[len(tipsets)-1].Height() + assert.Equal(t, abi.ChainEpoch(41), earliestHeight) + assert.Equal(t, 20, len(tipsets)) +} + +func TestFindCommonAncestorSameChain(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + head := builder.Genesis() + // Add 30 tipsets to the head of the chainStore. + head = builder.AppendManyOn(ctx, 30, head) + headIterOne := chain.IterAncestors(ctx, builder, head) + headIterTwo := chain.IterAncestors(ctx, builder, head) + commonAncestor, err := chain.FindCommonAncestor(ctx, headIterOne, headIterTwo) + assert.NoError(t, err) + assert.Equal(t, head, commonAncestor) +} + +func TestFindCommonAncestorFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + head := builder.Genesis() + + // Add 3 tipsets to the head of the chainStore. + commonHeadTip := builder.AppendManyOn(ctx, 3, head) + + // Grow the fork chain + lenFork := 10 + forkHead := builder.AppendManyOn(ctx, lenFork, commonHeadTip) + + // Grow the main chain + lenMainChain := 14 + mainHead := builder.AppendManyOn(ctx, lenMainChain, commonHeadTip) + + forkItr := chain.IterAncestors(ctx, builder, forkHead) + mainItr := chain.IterAncestors(ctx, builder, mainHead) + commonAncestor, err := chain.FindCommonAncestor(ctx, mainItr, forkItr) + assert.NoError(t, err) + assert.ObjectsAreEqualValues(commonHeadTip, commonAncestor) +} + +func TestFindCommonAncestorNoFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + head := builder.Genesis() + + // Add 30 tipsets to the head of the chainStore. + head = builder.AppendManyOn(ctx, 30, head) + headIterOne := chain.IterAncestors(ctx, builder, head) + + // Now add 19 more tipsets. + expectedAncestor := head + head = builder.AppendManyOn(ctx, 19, head) + headIterTwo := chain.IterAncestors(ctx, builder, head) + + commonAncestor, err := chain.FindCommonAncestor(ctx, headIterOne, headIterTwo) + assert.NoError(t, err) + assert.True(t, expectedAncestor.Equals(commonAncestor)) +} + +// This test exercises an edge case fork that our previous common ancestor +// utility handled incorrectly. +func TestFindCommonAncestorNullBlockFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder := chain.NewBuilder(t, address.Undef) + head := builder.Genesis() + + // Add 10 tipsets to the head of the chainStore. + commonHead := builder.AppendManyOn(ctx, 10, head) + + // From the common ancestor, add a block following a null block. + headAfterNull := builder.BuildOneOn(ctx, commonHead, func(b *chain.BlockBuilder) { + b.IncHeight(1) + }) + afterNullItr := chain.IterAncestors(ctx, builder, headAfterNull) + + // Add a block (with no null) on another fork. + headNoNull := builder.AppendOn(ctx, commonHead, 1) + noNullItr := chain.IterAncestors(ctx, builder, headNoNull) + + commonAncestor, err := chain.FindCommonAncestor(ctx, afterNullItr, noNullItr) + assert.NoError(t, err) + assert.ObjectsAreEqualValues(commonHead, commonAncestor) +} diff --git a/pkg/chain/utils.go b/pkg/chain/utils.go new file mode 100644 index 0000000000..5cdc4f938e --- /dev/null +++ b/pkg/chain/utils.go @@ -0,0 +1,37 @@ +package chain + +import ( + "context" + + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + + blockFormat "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type storable interface { + ToStorageBlock() (blockFormat.Block, error) +} + +func PutMessage(ctx context.Context, bs blockstoreutil.Blockstore, m storable) (cid.Cid, error) { + b, err := m.ToStorageBlock() + if err != nil { + return cid.Undef, err + } + + if err := bs.Put(ctx, b); err != nil { + return cid.Undef, err + } + + return b.Cid(), nil +} + +// Reverse reverses the order of the slice `chain`. +func Reverse(chain []*types.TipSet) { + // https://github.com/golang/go/wiki/SliceTricks#reversing + for i := len(chain)/2 - 1; i >= 0; i-- { + opp := len(chain) - 1 - i + chain[i], chain[opp] = chain[opp], chain[i] + } +} diff --git a/pkg/chain/waiter.go b/pkg/chain/waiter.go new file mode 100644 index 0000000000..efaccfbacb --- /dev/null +++ b/pkg/chain/waiter.go @@ -0,0 +1,341 @@ +package chain + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/pkg/errors" +) + +type MsgLookup struct { + Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed + Receipt types.MessageReceipt + ReturnDec interface{} + TipSet types.TipSetKey + Height abi.ChainEpoch +} + +// Abstracts over a store of blockchain state. +type waiterChainReader interface { + GetHead() *types.TipSet + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + LookupID(context.Context, *types.TipSet, address.Address) (address.Address, error) + GetActorAt(context.Context, *types.TipSet, address.Address) (*types.Actor, error) + GetTipSetReceiptsRoot(context.Context, *types.TipSet) (cid.Cid, error) + SubHeadChanges(context.Context) chan []*types.HeadChange +} + +type IStmgr interface { + GetActorAt(context.Context, address.Address, *types.TipSet) (*types.Actor, error) + RunStateTransition(context.Context, *types.TipSet) (root cid.Cid, receipts cid.Cid, err error) +} + +// Waiter waits for a message to appear on chain. +type Waiter struct { + chainReader waiterChainReader + messageProvider MessageProvider + cst cbor.IpldStore + bs bstore.Blockstore + Stmgr IStmgr +} + +// WaitPredicate is a function that identifies a message and returns true when found. +type WaitPredicate func(msg *types.Message, msgCid cid.Cid) bool + +// NewWaiter returns a new Waiter. +func NewWaiter(chainStore waiterChainReader, messages MessageProvider, bs bstore.Blockstore, cst cbor.IpldStore) *Waiter { + return &Waiter{ + chainReader: chainStore, + cst: cst, + bs: bs, + messageProvider: messages, + } +} + +// Find searches the blockchain history (but doesn't wait). +func (w *Waiter) Find(ctx context.Context, msg types.ChainMsg, lookback abi.ChainEpoch, ts *types.TipSet, allowReplaced bool) (*types.ChainMessage, bool, error) { + if ts == nil { + ts = w.chainReader.GetHead() + } + + return w.findMessage(ctx, ts, msg, lookback, allowReplaced) +} + +// WaitPredicate invokes the callback when the passed predicate succeeds. +// See api description. +// +// Note: this method does too much -- the callback should just receive the tipset +// containing the message and the caller should pull the receipt out of the block +// if in fact that's what it wants to do, using something like receiptFromTipset. +// Something like receiptFromTipset is necessary because not every message in +// a block will have a receipt in the tipset: it might be a duplicate message. +// This method will always check for the message in the current head tipset. +// A lookback parameter > 1 will cause this method to check for the message in +// up to that many previous tipsets on the chain of the current head. +func (w *Waiter) WaitPredicate(ctx context.Context, msg types.ChainMsg, confidence uint64, lookback abi.ChainEpoch, allowReplaced bool) (*types.ChainMessage, error) { + ch := w.chainReader.SubHeadChanges(ctx) + chainMsg, found, err := w.waitForMessage(ctx, ch, msg, confidence, lookback, allowReplaced) + if err != nil { + return nil, err + } + if found { + return chainMsg, nil + } + return nil, nil +} + +// Wait uses WaitPredicate to invoke the callback when a message with the given cid appears on chain. +func (w *Waiter) Wait(ctx context.Context, msg types.ChainMsg, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.ChainMessage, error) { + mid := msg.VMMessage().Cid() + log.Infof("Calling Waiter.Wait CID: %s", mid.String()) + + return w.WaitPredicate(ctx, msg, confidence, lookbackLimit, allowReplaced) +} + +// findMessage looks for a matching in the chain and returns the message, +// block and receipt, when it is found. Returns the found message/block or nil +// if now block with the given CID exists in the chain. +// The lookback parameter is the number of tipsets in the past this method will check before giving up. +func (w *Waiter) findMessage(ctx context.Context, from *types.TipSet, m types.ChainMsg, lookback abi.ChainEpoch, allowReplaced bool) (*types.ChainMessage, bool, error) { + limitHeight := from.Height() - lookback + noLimit := lookback == constants.LookbackNoLimit + + cur := from + curActor, err := w.Stmgr.GetActorAt(ctx, m.VMMessage().From, cur) + if err != nil { + return nil, false, fmt.Errorf("failed to load from actor") + } + + mNonce := m.VMMessage().Nonce + + for { + // If we've reached the genesis block, or we've reached the limit of + // how far back to look + if cur.Height() == 0 || !noLimit && cur.Height() <= limitHeight { + // it ain't here! + return nil, false, nil + } + + select { + case <-ctx.Done(): + return nil, false, nil + default: + } + + // we either have no messages from the sender, or the latest message we found has a lower nonce than the one being searched for, + // either way, no reason to lookback, it ain't there + if curActor == nil || curActor.Nonce == 0 || curActor.Nonce < mNonce { + return nil, false, nil + } + + pts, err := w.chainReader.GetTipSet(ctx, cur.Parents()) + if err != nil { + return nil, false, fmt.Errorf("failed to load tipset during msg wait searchback: %w", err) + } + + act, err := w.Stmgr.GetActorAt(ctx, m.VMMessage().From, pts) + actorNoExist := errors.Is(err, types.ErrActorNotFound) + if err != nil && !actorNoExist { + return nil, false, fmt.Errorf("failed to load the actor: %w", err) + } + + // check that between cur and parent tipset the nonce fell into range of our message + if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) { + msg, found, err := w.receiptForTipset(ctx, cur, m, allowReplaced) + if err != nil { + log.Errorf("Waiter.Wait: %s", err) + return nil, false, err + } + if found { + return msg, true, nil + } + } + + cur = pts + curActor = act + } +} + +// waitForMessage looks for a matching message in a channel of tipsets and returns +// the message, block and receipt, when it is found. Reads until the channel is +// closed or the context done. Returns the found message/block (or nil if the +// channel closed without finding it), whether it was found, or an error. +// notice matching mesage by message from and nonce. the return message may not be +// expected, because there maybe another message have the same from and nonce value +func (w *Waiter) waitForMessage(ctx context.Context, ch <-chan []*types.HeadChange, msg types.ChainMsg, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.ChainMessage, bool, error) { + current, ok := <-ch + if !ok { + return nil, false, fmt.Errorf("SubHeadChanges stream was invalid") + } + // todo message wait + if len(current) != 1 { + return nil, false, fmt.Errorf("SubHeadChanges first entry should have been one item") + } + + if current[0].Type != types.HCCurrent { + return nil, false, fmt.Errorf("expected current head on SHC stream (got %s)", current[0].Type) + } + + currentHead := current[0].Val + chainMsg, found, err := w.receiptForTipset(ctx, currentHead, msg, allowReplaced) + if err != nil { + return nil, false, err + } + if found { + return chainMsg, found, nil + } + + var backRcp *types.ChainMessage + backSearchWait := make(chan struct{}) + go func() { + r, foundMsg, err := w.findMessage(ctx, currentHead, msg, lookbackLimit, allowReplaced) + if err != nil { + log.Warnf("failed to look back through chain for message: %w", err) + return + } + if foundMsg { + backRcp = r + close(backSearchWait) + } + }() + + var candidateTS *types.TipSet + var candidateRcp *types.ChainMessage + heightOfHead := currentHead.Height() + reverts := map[string]bool{} + + for { + select { + case notif, ok := <-ch: + if !ok { + return nil, false, err + } + for _, val := range notif { + switch val.Type { + case types.HCRevert: + if val.Val.Equals(candidateTS) { + candidateTS = nil + candidateRcp = nil + } + if backSearchWait != nil { + reverts[val.Val.Key().String()] = true + } + case types.HCApply: + if candidateTS != nil && val.Val.Height() >= candidateTS.Height()+abi.ChainEpoch(confidence) { + return candidateRcp, true, nil + } + + r, foundMsg, err := w.receiptForTipset(ctx, val.Val, msg, allowReplaced) + if err != nil { + return nil, false, err + } + if r != nil { + if confidence == 0 { + return r, foundMsg, err + } + candidateTS = val.Val + candidateRcp = r + } + heightOfHead = val.Val.Height() + } + } + case <-backSearchWait: + // check if we found the message in the chain and that is hasn't been reverted since we started searching + if backRcp != nil && !reverts[backRcp.TS.Key().String()] { + // if head is at or past confidence interval, return immediately + if heightOfHead >= backRcp.TS.Height()+abi.ChainEpoch(confidence) { + return backRcp, true, nil + } + + // wait for confidence interval + candidateTS = backRcp.TS + candidateRcp = backRcp + } + reverts = nil + backSearchWait = nil + case <-ctx.Done(): + return nil, false, err + } + } +} + +func (w *Waiter) receiptForTipset(ctx context.Context, ts *types.TipSet, msg types.ChainMsg, allowReplaced bool) (*types.ChainMessage, bool, error) { + // The genesis block + if ts.Height() == 0 { + return nil, false, nil + } + + pts, err := w.chainReader.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, false, err + } + blockMessageInfos, err := w.messageProvider.LoadTipSetMessage(ctx, pts) + if err != nil { + return nil, false, err + } + expectedMsg := msg.VMMessage() + expectedCid := msg.Cid() + expectedNonce := msg.VMMessage().Nonce + expectedFrom := msg.VMMessage().From + for _, bms := range blockMessageInfos { + for _, msg := range append(bms.BlsMessages, bms.SecpkMessages...) { + msgCid := msg.Cid() + if msg.VMMessage().From == expectedFrom { // cheaper to just check origin first + if msg.VMMessage().Nonce == expectedNonce { + if allowReplaced && msg.VMMessage().EqualCall(expectedMsg) { + if expectedCid != msgCid { + log.Warnw("found message with equal nonce and call params but different CID", + "wanted", expectedCid, "found", msgCid, "nonce", expectedNonce, "from", expectedFrom) + } + recpt, err := w.receiptByIndex(ctx, pts, msgCid, blockMessageInfos) + if err != nil { + return nil, false, errors.Wrap(err, "error retrieving receipt from tipset") + } + return &types.ChainMessage{TS: ts, Message: msg.VMMessage(), Block: bms.Block, Receipt: recpt}, true, nil + } + + // this should be that message + return nil, false, fmt.Errorf("found message with equal nonce as the one we are looking for (F: n %d, TS: %s n%d)", + expectedMsg.Nonce, msg.Cid(), msg.VMMessage().Nonce) + } + } + } + } + + return nil, false, nil +} + +func (w *Waiter) receiptByIndex(ctx context.Context, ts *types.TipSet, targetCid cid.Cid, blockMsgs []types.BlockMessagesInfo) (*types.MessageReceipt, error) { + var receiptCid cid.Cid + var err error + if _, receiptCid, err = w.Stmgr.RunStateTransition(ctx, ts); err != nil { + return nil, fmt.Errorf("RunStateTransition failed:%w", err) + } + + receipts, err := w.messageProvider.LoadReceipts(ctx, receiptCid) + if err != nil { + return nil, err + } + + receiptIndex := 0 + for _, blkInfo := range blockMsgs { + // todo aggrate bls and secp msg to one msg + for _, msg := range append(blkInfo.BlsMessages, blkInfo.SecpkMessages...) { + if msg.Cid().Equals(targetCid) { + if receiptIndex >= len(receipts) { + return nil, errors.Errorf("could not find message receipt at index %d", receiptIndex) + } + return &receipts[receiptIndex], nil + } + receiptIndex++ + } + } + return nil, errors.Errorf("could not find message cid %s in dedupped messages", targetCid.String()) +} diff --git a/pkg/chain/waiter_test.go b/pkg/chain/waiter_test.go new file mode 100644 index 0000000000..16c05cc08d --- /dev/null +++ b/pkg/chain/waiter_test.go @@ -0,0 +1,73 @@ +// stm: #unit +package chain + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/venus/pkg/constants" + + "github.com/filecoin-project/go-address" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/assert" +) + +var mockSigner, _ = testhelpers.NewMockSignersAndKeyInfo(10) + +var newSignedMessage = testhelpers.NewSignedMessageForTestGetter(mockSigner) + +func setupTest(t *testing.T) (cbor.IpldStore, *Store, *MessageStore, *Waiter) { + builder := NewBuilder(t, address.Undef) + waiter := NewWaiter(builder.store, builder.mstore, builder.bs, builder.cstore) + waiter.Stmgr = builder.IStmgr() + + return builder.cstore, builder.store, builder.mstore, waiter +} + +func TestWaitRespectsContextCancel(t *testing.T) { + tf.UnitTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + _, _, _, waiter := setupTest(t) + + var err error + var chainMessage *types.ChainMessage + doneCh := make(chan error) + go func() { + defer close(doneCh) + // stm: @CHAIN_WAITER_FIND_001 + _, _, err = waiter.Find(ctx, newSignedMessage(0), 100, nil, true) + doneCh <- err + + // stm: @CHAIN_WAITER_WAIT_001 + chainMessage, err = waiter.Wait(ctx, newSignedMessage(0), constants.DefaultConfidence, constants.DefaultMessageWaitLookback, true) + doneCh <- err + }() + + cancel() + +LabelFor: + for { + select { + case err := <-doneCh: + if err == nil { + break LabelFor + } + fmt.Println(err) + case <-time.After(2 * time.Second): + assert.Fail(t, "Wait should have returned when context was canceled") + } + } + + assert.Nil(t, chainMessage) + +} diff --git a/pkg/chainsync/chainsync.go b/pkg/chainsync/chainsync.go new file mode 100644 index 0000000000..4c4c17c050 --- /dev/null +++ b/pkg/chainsync/chainsync.go @@ -0,0 +1,69 @@ +package chainsync + +import ( + "context" + + chain2 "github.com/filecoin-project/venus/app/submodule/chain" + "github.com/filecoin-project/venus/pkg/chainsync/types" + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/net/exchange" + "github.com/filecoin-project/venus/pkg/statemanger" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + types2 "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/venus/pkg/chainsync/dispatcher" + "github.com/filecoin-project/venus/pkg/chainsync/syncer" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/fork" +) + +// BlockProposer allows callers to propose new blocks for inclusion in the chain. +type BlockProposer interface { + SetConcurrent(number int64) + Concurrent() int64 + SyncTracker() *types.TargetTracker + SendHello(ci *types2.ChainInfo) error + SendOwnBlock(ci *types2.ChainInfo) error + SendGossipBlock(ci *types2.ChainInfo) error +} + +var _ = (BlockProposer)((*dispatcher.Dispatcher)(nil)) + +// Manager sync the chain. +type Manager struct { + dispatcher *dispatcher.Dispatcher +} + +// NewManager creates a new chain sync manager. +func NewManager( + stmgr *statemanger.Stmgr, + hv *consensus.BlockValidator, + submodule *chain2.ChainSubmodule, + cs syncer.ChainSelector, + bsstore blockstoreutil.Blockstore, + exchangeClient exchange.Client, + c clock.Clock, + fork fork.IFork, +) (Manager, error) { + chainSyncer, err := syncer.NewSyncer(stmgr, hv, cs, submodule.ChainReader, + submodule.MessageStore, bsstore, + exchangeClient, c, fork) + if err != nil { + return Manager{}, err + } + + return Manager{ + dispatcher: dispatcher.NewDispatcher(chainSyncer), + }, nil +} + +// Start starts the chain sync manager. +func (m *Manager) Start(ctx context.Context) error { + m.dispatcher.Start(ctx) + return nil +} + +// BlockProposer returns the block proposer. +func (m *Manager) BlockProposer() BlockProposer { + return m.dispatcher +} diff --git a/pkg/chainsync/dispatcher/dispatcher.go b/pkg/chainsync/dispatcher/dispatcher.go new file mode 100644 index 0000000000..1776f61e92 --- /dev/null +++ b/pkg/chainsync/dispatcher/dispatcher.go @@ -0,0 +1,266 @@ +package dispatcher + +import ( + "container/list" + "context" + "runtime/debug" + "sync" + atmoic2 "sync/atomic" + "time" + + "github.com/filecoin-project/venus/pkg/chainsync/types" + types2 "github.com/filecoin-project/venus/venus-shared/types" + "github.com/streadway/handy/atomic" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("chainsync.dispatcher") + +// DefaultInQueueSize is the bucketSize of the channel used for receiving targets from producers. +const DefaultInQueueSize = 5 + +// DefaultWorkQueueSize is the bucketSize of the work queue +const DefaultWorkQueueSize = 15 + +// dispatchSyncer is the interface of the logic syncing incoming chains +type dispatchSyncer interface { + Head() *types2.TipSet + HandleNewTipSet(context.Context, *types.Target) error +} + +// NewDispatcher creates a new syncing dispatcher with default queue sizes. +func NewDispatcher(catchupSyncer dispatchSyncer) *Dispatcher { + return NewDispatcherWithSizes(catchupSyncer, DefaultWorkQueueSize, DefaultInQueueSize) +} + +// NewDispatcherWithSizes creates a new syncing dispatcher. +func NewDispatcherWithSizes(syncer dispatchSyncer, workQueueSize, inQueueSize int) *Dispatcher { + return &Dispatcher{ + workTracker: types.NewTargetTracker(workQueueSize), + syncer: syncer, + incoming: make(chan *types.Target, inQueueSize), + control: make(chan interface{}, 1), + registeredCb: func(t *types.Target, err error) {}, + cancelControler: list.New(), + maxCount: 1, + } +} + +// cbMessage registers a user callback to be fired following every successful +// sync. +type cbMessage struct { + cb func(*types.Target, error) +} + +// Dispatcher receives, sorts and dispatches targets to the catchupSyncer to control +// chain syncing. +// +// New targets arrive over the incoming channel. The dispatcher then puts them +// into the workTracker which sorts them by their claimed chain height. The +// dispatcher pops the highest priority target from the queue and then attempts +// to sync the target using its internal catchupSyncer. +// +// The dispatcher has a simple control channel. It reads this for external +// controls. Currently there is only one kind of control message. It registers +// a callback that the dispatcher will call after every non-erroring sync. +type Dispatcher struct { + // workTracker is a priority queue of target chain heads that should be + // synced + workTracker *types.TargetTracker + // incoming is the queue of incoming sync targets to the dispatcher. + incoming chan *types.Target + // syncer is used for dispatching sync targets for chain heads to sync + // local chain state to these targets. + syncer dispatchSyncer + + // registeredCb is a callback registered over the control channel. It + // is called after every successful sync. + registeredCb func(*types.Target, error) + // control is a queue of control messages not yet processed. + control chan interface{} + + cancelControler *list.List + lk sync.Mutex + conCurrent atomic.Int + maxCount int64 +} + +// SyncTracker returns the target tracker of syncing +func (d *Dispatcher) SyncTracker() *types.TargetTracker { + return d.workTracker +} + +// SendHello handles chain information from bootstrap peers. +func (d *Dispatcher) SendHello(ci *types2.ChainInfo) error { + return d.addTracker(ci) +} + +// SendOwnBlock handles chain info from a node's own mining system +func (d *Dispatcher) SendOwnBlock(ci *types2.ChainInfo) error { + return d.addTracker(ci) +} + +// SendGossipBlock handles chain info from new blocks sent on pubsub +func (d *Dispatcher) SendGossipBlock(ci *types2.ChainInfo) error { + return d.addTracker(ci) +} + +func (d *Dispatcher) addTracker(ci *types2.ChainInfo) error { + d.incoming <- &types.Target{ + ChainInfo: *ci, + Base: d.syncer.Head(), + Current: d.syncer.Head(), + Start: time.Now(), + } + return nil +} + +// Start launches the business logic for the syncing subsystem. +func (d *Dispatcher) Start(syncingCtx context.Context) { + go d.processIncoming(syncingCtx) + + go d.syncWorker(syncingCtx) +} + +func (d *Dispatcher) processIncoming(ctx context.Context) { + defer func() { + log.Info("exiting sync dispatcher") + if r := recover(); r != nil { + log.Errorf("panic: %v", r) + debug.PrintStack() + } + }() + + for { + // Handle shutdown + select { + case <-ctx.Done(): + log.Info("context done") + return + case ctrl := <-d.control: + log.Infof("processing control: %v", ctrl) + d.processCtrl(ctrl) + case target := <-d.incoming: + // Sort new targets by putting on work queue. + if d.workTracker.Add(target) { + log.Infow("received new tipset", "height", target.Head.Height(), "blocks", target.Head.Len(), "from", + target.ChainInfo.Sender, "current work len", d.workTracker.Len(), "incoming channel len", len(d.incoming)) + } + } + } +} + +// SetConcurrent set the max goroutine to syncing target +func (d *Dispatcher) SetConcurrent(number int64) { + d.lk.Lock() + defer d.lk.Unlock() + d.maxCount = number + diff := d.conCurrent.Get() - d.maxCount + if diff > 0 { + ele := d.cancelControler.Back() + for ele != nil && diff > 0 { + ele.Value.(context.CancelFunc)() + preEle := ele.Prev() + d.cancelControler.Remove(ele) + ele = preEle + diff-- + } + } +} + +// Concurrent get current max syncing goroutine +func (d *Dispatcher) Concurrent() int64 { + d.lk.Lock() + defer d.lk.Unlock() + return d.maxCount +} + +func (d *Dispatcher) selectTarget(lastTarget *types.Target, ch <-chan struct{}) (*types.Target, bool) { +exitFor: + for { // we are purpose to consume all notifies in channel + select { + case _, isok := <-ch: + if !isok { + return nil, false + } + default: + break exitFor + } + } + return d.workTracker.Select() +} + +func (d *Dispatcher) syncWorker(ctx context.Context) { + defer func() { + log.Infof("dispatcher.syncworker exit.") + }() + + const chKey = "sync-worker" + ch := d.workTracker.SubNewTarget(chKey, 20) + unsolvedNotify := int64(0) + var lastTarget *types.Target + for { + select { + // must make sure, 'ch' is not blocked, or may cause syncing problems + case _, isok := <-ch: + if !isok { + break + } + if syncTarget, popped := d.selectTarget(lastTarget, ch); popped { + lastTarget = syncTarget + if d.conCurrent.Get() < d.maxCount { + atmoic2.StoreInt64(&unsolvedNotify, 0) + syncTarget.State = types.StateInSyncing + ctx, cancel := context.WithCancel(ctx) + d.cancelControler.PushBack(cancel) + d.conCurrent.Add(1) + go func() { + err := d.syncer.HandleNewTipSet(ctx, syncTarget) + if err != nil { + log.Infof("failed sync of %v at %d %s", syncTarget.Head.Key(), syncTarget.Head.Height(), err) + } + d.workTracker.Remove(syncTarget) + d.registeredCb(syncTarget, err) + d.conCurrent.Add(-1) + + // new 'target' notify may ignored, because of 'conCurrent' reaching 'maxCount', + // that means there is a new 'target' waiting for solving. + if atmoic2.LoadInt64(&unsolvedNotify) > 0 { + ch <- struct{}{} + } + }() + } else { + atmoic2.StoreInt64(&unsolvedNotify, 1) + } + } + case <-ctx.Done(): + atmoic2.StoreInt64(&unsolvedNotify, 0) + d.workTracker.UnsubNewTarget(chKey) + ch = nil + log.Infof("context.done in dispatcher.syncworker.") + return + } + } +} + +// RegisterCallback registers a callback on the dispatcher that +// will fire after every successful target sync. +func (d *Dispatcher) RegisterCallback(cb func(*types.Target, error)) { + d.control <- cbMessage{cb: cb} +} + +func (d *Dispatcher) processCtrl(ctrlMsg interface{}) { + // processCtrl takes a control message, determines its type, and performs the + // specified action. + // + // Using interfaces is overkill for now but is the way to make this + // extensible. (Delete this comment if we add more than one control) + switch typedMsg := ctrlMsg.(type) { + case cbMessage: + d.registeredCb = typedMsg.cb + default: + // We don't know this type, log and ignore + log.Info("dispatcher control can not handle type %T", typedMsg) + } +} diff --git a/pkg/chainsync/dispatcher/dispatcher_test.go b/pkg/chainsync/dispatcher/dispatcher_test.go new file mode 100644 index 0000000000..f852d62650 --- /dev/null +++ b/pkg/chainsync/dispatcher/dispatcher_test.go @@ -0,0 +1,194 @@ +// stm: #unit +package dispatcher_test + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + fbig "github.com/filecoin-project/go-state-types/big" + acrypto "github.com/filecoin-project/go-state-types/crypto" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + syncTypes "github.com/filecoin-project/venus/pkg/chainsync/types" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/pkg/chainsync/dispatcher" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +type mockSyncer struct { + headsCalled []*types.TipSet +} + +func (fs *mockSyncer) Head() *types.TipSet { + return types.UndefTipSet +} + +func (fs *mockSyncer) HandleNewTipSet(_ context.Context, ci *syncTypes.Target) error { + fs.headsCalled = append(fs.headsCalled, ci.Head) + return nil +} + +func TestDispatchStartHappy(t *testing.T) { + tf.UnitTest(t) + s := &mockSyncer{ + headsCalled: make([]*types.TipSet, 0), + } + testDispatch := dispatcher.NewDispatcher(s) + + cis := []*types.ChainInfo{ + // We need to put these in priority order to avoid a race. + // If we send 0 before 42, it is possible the dispatcher will + // pick up 0 and start processing before it sees 42. + chainInfoWithHeightAndWeight(t, 42, 1), + chainInfoWithHeightAndWeight(t, 16, 2), + chainInfoWithHeightAndWeight(t, 3, 3), + chainInfoWithHeightAndWeight(t, 2, 4), + chainInfoWithHeightAndWeight(t, 1, 5), + } + // stm: @CHAINSYNC_DISPATCHER_SET_CONCURRENT_001 + testDispatch.SetConcurrent(2) + + // stm: @CHAINSYNC_DISPATCHER_CONCURRENT_001 + assert.Equal(t, testDispatch.Concurrent(), int64(2)) + + // stm: @CHAINSYNC_DISPATCHER_START_001 + testDispatch.Start(context.Background()) + + t.Logf("waiting for 'syncWorker' input channel standby for 100(ms)") + time.Sleep(time.Millisecond * 100) + + // set up a blocking channel and register to unblock after 5 synced + waitCh := make(chan struct{}) + // stm: @CHAINSYNC_DISPATCHER_REGISTER_CALLBACK_001 + testDispatch.RegisterCallback(func(target *syncTypes.Target, _ error) { + if target.Head.Key().Equals(cis[4].Head.Key()) { + waitCh <- struct{}{} + } + }) + + // receive requests before Start() to test deterministic order + for _, ci := range cis { + go func(info *types.ChainInfo) { + assert.NoError(t, testDispatch.SendHello(info)) + }(ci) + } + + select { + case <-waitCh: + case <-time.After(time.Second * 5): + assert.Failf(t, "", "couldn't waited a correct chain syncing target in 5(s)") + } +} + +func TestQueueHappy(t *testing.T) { + tf.UnitTest(t) + testQ := syncTypes.NewTargetTracker(20) + + // Add syncRequests out of order + sR0 := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 0, 1001))} + sR1 := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 1, 1001))} + sR2 := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 2, 1001))} + sR47 := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 47, 1001))} + + testQ.Add(sR2) + testQ.Add(sR47) + testQ.Add(sR0) + testQ.Add(sR1) + + assert.Equal(t, 1, testQ.Len()) + + // Pop in order + out0 := requirePop(t, testQ) + + weight := out0.ChainInfo.Head.ParentWeight() + assert.Equal(t, int64(1001), weight.Int.Int64()) +} + +func TestQueueDuplicates(t *testing.T) { + tf.UnitTest(t) + testQ := syncTypes.NewTargetTracker(20) + + // Add syncRequests with same height + sR0 := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 0, 1001))} + sR0dup := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 0, 1001))} + + testQ.Add(sR0) + testQ.Add(sR0dup) + + // Only one of these makes it onto the queue + assert.Equal(t, 1, testQ.Len()) + + // Pop + first := requirePop(t, testQ) + assert.Equal(t, abi.ChainEpoch(0), first.ChainInfo.Head.Height()) + testQ.Remove(first) +} + +func TestQueueEmptyPopErrors(t *testing.T) { + tf.UnitTest(t) + testQ := syncTypes.NewTargetTracker(20) + sR0 := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 0, 1002))} + sR47 := &syncTypes.Target{ChainInfo: *(chainInfoWithHeightAndWeight(t, 47, 1001))} + + // Add 2 + testQ.Add(sR47) + testQ.Add(sR0) + + // Pop 3 + assert.Equal(t, 1, testQ.Len()) + first := requirePop(t, testQ) + testQ.Remove(first) + assert.Equal(t, 0, testQ.Len()) +} + +// requirePop is a helper requiring that pop does not error +func requirePop(t *testing.T, q *syncTypes.TargetTracker) *syncTypes.Target { + req, popped := q.Select() + require.True(t, popped) + return req +} + +// chainInfoWithHeightAndWeight is a helper that constructs a unique chain info off of +// an int. The tipset key is a faked cid from the string of that integer and +// the height is that integer. +func chainInfoWithHeightAndWeight(t *testing.T, h int, weight int64) *types.ChainInfo { + newAddress := testhelpers.NewForTestGetter() + posts := []proof2.PoStProof{{PoStProof: abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, ProofBytes: []byte{0x07}}} + blk := &types.BlockHeader{ + Miner: newAddress(), + Ticket: &types.Ticket{VRFProof: []byte{0x03, 0x01, 0x02}}, + ElectionProof: &types.ElectionProof{VRFProof: []byte{0x0c, 0x0d}}, + BeaconEntries: []types.BeaconEntry{ + { + Round: 44, + Data: []byte{0xc0}, + }, + }, + Height: abi.ChainEpoch(h), + Messages: testhelpers.CidFromString(t, "someothercid"), + ParentMessageReceipts: testhelpers.CidFromString(t, "someothercid"), + Parents: []cid.Cid{testhelpers.CidFromString(t, "someothercid")}, + ParentWeight: fbig.NewInt(weight), + ForkSignaling: 2, + ParentStateRoot: testhelpers.CidFromString(t, "someothercid"), + Timestamp: 4, + ParentBaseFee: abi.NewTokenAmount(20), + WinPoStProof: posts, + BlockSig: &acrypto.Signature{ + Type: acrypto.SigTypeBLS, + Data: []byte{0x4}, + }, + } + b, _ := types.NewTipSet([]*types.BlockHeader{blk}) + return &types.ChainInfo{ + Head: b, + } +} diff --git a/pkg/chainsync/slashfilter/mysqldb.go b/pkg/chainsync/slashfilter/mysqldb.go new file mode 100644 index 0000000000..3360bb6379 --- /dev/null +++ b/pkg/chainsync/slashfilter/mysqldb.go @@ -0,0 +1,155 @@ +package slashfilter + +import ( + "context" + "fmt" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + logging "github.com/ipfs/go-log/v2" + "gorm.io/driver/mysql" + "gorm.io/gorm" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var log = logging.Logger("mysql") + +type MysqlSlashFilter struct { + _db *gorm.DB +} + +// MinedBlock record mined block +type MinedBlock struct { + ParentEpoch int64 `gorm:"column:parent_epoch;type:bigint(20);NOT NULL"` + ParentKey string `gorm:"column:parent_key;type:varchar(2048);NOT NULL"` + + Epoch int64 `gorm:"column:epoch;type:bigint(20);NOT NULL"` + Miner string `gorm:"column:miner;type:varchar(256);NOT NULL"` + Cid string `gorm:"column:cid;type:varchar(256);NOT NULL"` +} + +// NewMysqlSlashFilter create a new slash filter base on mysql database +func NewMysqlSlashFilter(cfg config.MySQLConfig) (ISlashFilter, error) { + db, err := gorm.Open(mysql.Open(cfg.ConnectionString)) + if err != nil { + return nil, fmt.Errorf("[db connection failed] Connection : %s %w", cfg.ConnectionString, err) + } + + if cfg.Debug { + db = db.Debug() + } + + if err := db.AutoMigrate(MinedBlock{}); err != nil { + return nil, err + } + + sqlDB, err := db.DB() + if err != nil { + return nil, err + } + + // Set the maximum number of idle connections in the connection pool. + sqlDB.SetMaxIdleConns(cfg.MaxIdleConn) + // Set the maximum number of open database connections. + sqlDB.SetMaxOpenConns(cfg.MaxOpenConn) + // The maximum time that the connection can be reused is set. + sqlDB.SetConnMaxLifetime(time.Second * cfg.ConnMaxLifeTime) + + log.Info("init mysql success for LocalSlashFilter!") + return &MysqlSlashFilter{ + _db: db, + }, nil +} + +// checkSameHeightFault check whether the miner mined multi block on the same height +func (f *MysqlSlashFilter) checkSameHeightFault(bh *types.BlockHeader) error { + var bk MinedBlock + err := f._db.Model(&MinedBlock{}).Take(&bk, "miner=? and epoch=?", bh.Miner.String(), bh.Height).Error + if err == gorm.ErrRecordNotFound { + return nil + } + + other, err := cid.Decode(bk.Cid) + if err != nil { + return err + } + + if other == bh.Cid() { + return nil + } + + return fmt.Errorf("produced block would trigger double-fork mining faults consensus fault; miner: %s; bh: %s, other: %s", bh.Miner, bh.Cid(), other) +} + +// checkSameParentFault check whether the miner mined block on the same parent +func (f *MysqlSlashFilter) checkSameParentFault(bh *types.BlockHeader) error { + var bk MinedBlock + err := f._db.Model(&MinedBlock{}).Take(&bk, "miner=? and parent_key=?", bh.Miner.String(), types.NewTipSetKey(bh.Parents...).String()).Error + if err == gorm.ErrRecordNotFound { + return nil + } + + other, err := cid.Decode(bk.Cid) + if err != nil { + return err + } + + if other == bh.Cid() { + return nil + } + + return fmt.Errorf("produced block would trigger time-offset mining faults consensus fault; miner: %s; bh: %s, other: %s", bh.Miner, bh.Cid(), other) +} + +// MinedBlock check whether the block mined is slash +func (f *MysqlSlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { + if err := f.checkSameHeightFault(bh); err != nil { + return err + } + + if err := f.checkSameParentFault(bh); err != nil { + return err + } + + { + // parent-grinding fault (didn't mine on top of our own block) + + // First check if we have mined a block on the parent epoch + var bk MinedBlock + err := f._db.Model(&MinedBlock{}).Take(&bk, "miner=? and parent_epoch=?", bh.Miner.String(), parentEpoch).Error + if err == nil { + // if exit + parent, err := cid.Decode(bk.Cid) + if err != nil { + return err + } + + var found bool + for _, c := range bh.Parents { + if c.Equals(parent) { + found = true + } + } + + if !found { + return fmt.Errorf("produced block would trigger 'parent-grinding fault' consensus fault; miner: %s; bh: %s, expected parent: %s", bh.Miner, bh.Cid(), parent) + } + } else if err != gorm.ErrRecordNotFound { + // other error except not found + return err + } + // if not exit good block + } + + return f._db.Save(&MinedBlock{ + ParentEpoch: int64(parentEpoch), + ParentKey: types.NewTipSetKey(bh.Parents...).String(), + Epoch: int64(bh.Height), + Miner: bh.Miner.String(), + Cid: bh.Cid().String(), + }).Error +} diff --git a/pkg/chainsync/slashfilter/slashfilter.go b/pkg/chainsync/slashfilter/slashfilter.go new file mode 100644 index 0000000000..eb8cb54cdf --- /dev/null +++ b/pkg/chainsync/slashfilter/slashfilter.go @@ -0,0 +1,123 @@ +package slashfilter + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// ISlashFilter used to detect whether the miner mined a invalidated block , support local db and mysql storage +type ISlashFilter interface { + MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error +} + +// LocalSlashFilter use badger db to save mined block for detect slash consensus block +type LocalSlashFilter struct { + byEpoch ds.Datastore // double-fork mining faults, parent-grinding fault + byParents ds.Datastore // time-offset mining faults +} + +// NewLocalSlashFilter create a slash filter base on badger db +func NewLocalSlashFilter(dstore ds.Batching) ISlashFilter { + return &LocalSlashFilter{ + byEpoch: namespace.Wrap(dstore, ds.NewKey("/slashfilter/epoch")), + byParents: namespace.Wrap(dstore, ds.NewKey("/slashfilter/parents")), + } +} + +// MinedBlock check whether the block mined is slash +func (f *LocalSlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { + epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) + { + // double-fork mining (2 blocks at one epoch) + if err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { + return err + } + } + + parentsKey := ds.NewKey(fmt.Sprintf("/%s/%s", bh.Miner, types.NewTipSetKey(bh.Parents...).String())) + { + // time-offset mining faults (2 blocks with the same parents) + if err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { + return err + } + } + + { + // parent-grinding fault (didn't mine on top of our own block) + + // First check if we have mined a block on the parent epoch + parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch)) + have, err := f.byEpoch.Has(ctx, parentEpochKey) + if err != nil { + return err + } + + if have { + // If we had, make sure it's in our parent tipset + cidb, err := f.byEpoch.Get(ctx, parentEpochKey) + if err != nil { + return fmt.Errorf("getting other block cid: %w", err) + } + + _, parent, err := cid.CidFromBytes(cidb) + if err != nil { + return err + } + + var found bool + for _, c := range bh.Parents { + if c.Equals(parent) { + found = true + } + } + + if !found { + return fmt.Errorf("produced block would trigger 'parent-grinding fault' consensus fault; miner: %s; bh: %s, expected parent: %s", bh.Miner, bh.Cid(), parent) + } + } + } + + if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil { + return fmt.Errorf("putting byParents entry: %w", err) + } + + if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil { + return fmt.Errorf("putting byEpoch entry: %w", err) + } + + return nil +} + +func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { + fault, err := t.Has(ctx, key) + if err != nil { + return err + } + + if fault { + cidb, err := t.Get(ctx, key) + if err != nil { + return fmt.Errorf("getting other block cid: %w", err) + } + + _, other, err := cid.CidFromBytes(cidb) + if err != nil { + return err + } + + if other == bh.Cid() { + return nil + } + + return fmt.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other) + } + + return nil +} diff --git a/pkg/chainsync/syncer/syncer.go b/pkg/chainsync/syncer/syncer.go new file mode 100644 index 0000000000..8bde9b5995 --- /dev/null +++ b/pkg/chainsync/syncer/syncer.go @@ -0,0 +1,833 @@ +package syncer + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/hashicorp/go-multierror" + + "golang.org/x/sync/errgroup" + + syncTypes "github.com/filecoin-project/venus/pkg/chainsync/types" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/metrics" + "github.com/filecoin-project/venus/pkg/metrics/tracing" + "github.com/filecoin-project/venus/pkg/net/exchange" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + logging "github.com/ipfs/go-log/v2" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// Syncer updates its chain.Store according to the methods of its +// consensus.Protocol. It uses a bad tipset cache and a limit on new +// blocks to traverse during chain collection. The Syncer can query the +// network for blocks. The Syncer maintains the following invariant on +// its bsstore: all tipsets that pass the syncer's validity checks are added to the +// chain bsstore along with their state root CID. +// +// Ideally the code that syncs the chain according to consensus rules should +// be independent of any particular implementation of consensus. Currently the +// Syncer is coupled to details of Expected Consensus. This dependence +// exists in the widen function, the fact that widen is called on only one +// tipset in the incoming chain, and assumptions regarding the existence of +// grandparent state in the bsstore. + +var ( + // ErrForkTooLong is return when the syncing chain has fork with local + ErrForkTooLong = fmt.Errorf("fork longer than threshold") + // ErrChainHasBadTipSet is returned when the syncer traverses a chain with a cached bad tipset. + ErrChainHasBadTipSet = errors.New("input chain contains a cached bad tipset") + // ErrNewChainTooLong is returned when processing a fork that split off from the main chain too many blocks ago. + ErrNewChainTooLong = errors.New("input chain forked from best chain past finality limit") + // ErrUnexpectedStoreState indicates that the syncer's chain bsstore is violating expected invariants. + ErrUnexpectedStoreState = errors.New("the chain bsstore is in an unexpected state") + + logSyncer = logging.Logger("chainsync.syncer") + syncOneTimer *metrics.Float64Timer + reorgCnt *metrics.Int64Counter // nolint +) + +func init() { + syncOneTimer = metrics.NewTimerMs("syncer/sync_one", "Duration of single tipset validation in milliseconds") + reorgCnt = metrics.NewInt64Counter("chain/reorg_count", "The number of reorgs that have occurred.") +} + +// StateProcessor does semantic validation on fullblocks. +type StateProcessor interface { + // RunStateTransition returns the state root CID resulting from applying the input ts to the + // prior `stateRoot`. It returns an error if the transition is invalid. + RunStateTransition(ctx context.Context, ts *types.TipSet) (root cid.Cid, receipt cid.Cid, err error) +} + +// BlockValidator used to validate full block +type BlockValidator interface { + ValidateFullBlock(ctx context.Context, blk *types.BlockHeader) error +} + +// ChainReaderWriter reads and writes the chain bsstore. +type ChainReaderWriter interface { + GetHead() *types.TipSet + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + GetTipSetStateRoot(context.Context, *types.TipSet) (cid.Cid, error) + GetTipSetReceiptsRoot(context.Context, *types.TipSet) (cid.Cid, error) + HasTipSetAndState(context.Context, *types.TipSet) bool + GetTipsetMetadata(context.Context, *types.TipSet) (*chain.TipSetMetadata, error) + PutTipSetMetadata(context.Context, *chain.TipSetMetadata) error + SetHead(context.Context, *types.TipSet) error + GetLatestBeaconEntry(context.Context, *types.TipSet) (*types.BeaconEntry, error) + GetGenesisBlock(context.Context) (*types.BlockHeader, error) +} + +// messageStore used to save and load message from db +type messageStore interface { + LoadTipSetMessage(ctx context.Context, ts *types.TipSet) ([]types.BlockMessagesInfo, error) + LoadMetaMessages(context.Context, cid.Cid) ([]*types.SignedMessage, []*types.Message, error) + LoadReceipts(context.Context, cid.Cid) ([]types.MessageReceipt, error) + StoreReceipts(context.Context, []types.MessageReceipt) (cid.Cid, error) +} + +// ChainSelector chooses the heaviest between chains. +type ChainSelector interface { + // IsHeavier returns true if tipset a is heavier than tipset b and false if + // tipset b is heavier than tipset a. + IsHeavier(ctx context.Context, a, b *types.TipSet) (bool, error) + // Weight returns the weight of a tipset after the upgrade to version 1 + Weight(ctx context.Context, ts *types.TipSet) (big.Int, error) +} + +// Syncer used to synchronize the block from the specified target, including acquiring the relevant block data and message data, +// verifying the block machine messages one by one and calculating them, checking the weight of the target after the calculation, +// and check whether it can become the latest tipset +type Syncer struct { + exchangeClient exchange.Client + // BadTipSetCache is used to filter out collections of invalid blocks. + badTipSets *syncTypes.BadTipSetCache + + // Evaluates tipset messages and stores the resulting states. + stmgr *statemanger.Stmgr + // Validates headers and message structure + blockValidator BlockValidator + // Selects the heaviest of two chains + chainSelector ChainSelector + // Provides and stores validated tipsets and their state roots. + chainStore *chain.Store + // Provides message collections given cids + messageProvider messageStore + + clock clock.Clock + headLock sync.Mutex + + bsstore blockstoreutil.Blockstore + checkPoint types.TipSetKey + + fork fork.IFork + + delayRunTx *delayRunTsTransition +} + +// NewSyncer constructs a Syncer ready for use. The chain reader must have a +// head tipset to initialize the staging field. +func NewSyncer(stmgr *statemanger.Stmgr, + hv BlockValidator, + cs ChainSelector, + s *chain.Store, + m messageStore, + bsstore blockstoreutil.Blockstore, + exchangeClient exchange.Client, + c clock.Clock, + fork fork.IFork, +) (*Syncer, error) { + if constants.InsecurePoStValidation { + logSyncer.Warn("*********************************************************************************************") + logSyncer.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ") + logSyncer.Warn("*********************************************************************************************") + } + + syncer := &Syncer{ + exchangeClient: exchangeClient, + badTipSets: syncTypes.NewBadTipSetCache(), + blockValidator: hv, + chainSelector: cs, + bsstore: bsstore, + chainStore: s, + messageProvider: m, + clock: c, + fork: fork, + stmgr: stmgr, + } + + defer func() { + syncer.delayRunTx = newDelayRunTsTransition(syncer) + syncer.delayRunTx.run() + }() + + return syncer, nil +} + +// syncOne syncs a single tipset with the chain bsstore. syncOne calculates the +// parent state of the tipset and calls into consensus to run a state transition +// in order to validate the tipset. In the case the input tipset is valid, +// syncOne calls into consensus to check its weight, and then updates the head +// of the bsstore if this tipset is the heaviest. +// todo mark bad-block +func (syncer *Syncer) syncOne(ctx context.Context, parent, next *types.TipSet) error { + logSyncer.Infof("syncOne tipset, height:%d, blocks:%s", next.Height(), next.Key().String()) + priorHeadKey := syncer.chainStore.GetHead() + // if tipset is already priorHeadKey, we've been here before. do nothing. + if priorHeadKey.Equals(next) { + return nil + } + + stopwatch := syncOneTimer.Start(ctx) + defer stopwatch.Stop(ctx) + + var err error + + if !parent.Key().Equals(syncer.checkPoint) { + var wg errgroup.Group + for i := 0; i < next.Len(); i++ { + blk := next.At(i) + wg.Go(func() error { + // Fetch the URL. + return syncer.blockValidator.ValidateFullBlock(ctx, blk) + }) + } + err = wg.Wait() + if err != nil { + var rootNotMatch bool // nolint + + if merr, isok := err.(*multierror.Error); isok { + for _, e := range merr.Errors { + if isRootNotMatch(e) { + rootNotMatch = true + break + } + } + } else { + rootNotMatch = isRootNotMatch(err) // nolint + } + + if rootNotMatch { // nolint + // todo: should here rollback, and re-compute? + _ = syncer.stmgr.Rollback(ctx, parent, next) + } + + return fmt.Errorf("validate mining failed %w", err) + } + } + + return nil +} + +func isRootNotMatch(err error) bool { + return errors.Is(err, consensus.ErrStateRootMismatch) || errors.Is(err, consensus.ErrReceiptRootMismatch) +} + +// HandleNewTipSet validates and syncs the chain rooted at the provided tipset +// to a chain bsstore. Iff catchup is false then the syncer will set the head. +func (syncer *Syncer) HandleNewTipSet(ctx context.Context, target *syncTypes.Target) (err error) { + ctx, span := trace.StartSpan(ctx, "Syncer.HandleNewTipSet") + span.AddAttributes(trace.StringAttribute("tipset", target.Head.String())) + span.AddAttributes(trace.Int64Attribute("height", int64(target.Head.Height()))) + + now := time.Now() + + defer func() { + if err != nil { + target.Err = err + target.State = syncTypes.StageSyncErrored + } else { + target.State = syncTypes.StageSyncComplete + } + tracing.AddErrorEndSpan(ctx, span, &err) + span.End() + logSyncer.Infof("handle tipset height %d, count %d, took %.4f(s)", target.Head.Height(), target.Head.Len(), time.Since(now).Seconds()) + }() + + logSyncer.Debugf("begin fetch and sync of chain with head %v from %s at height %v", target.Head.Key(), target.Sender.String(), target.Head.Height()) + head := syncer.chainStore.GetHead() + // If the store already has this tipset then the syncer is finished. + if target.Head.At(0).ParentWeight.LessThan(head.At(0).ParentWeight) { + return errors.New("do not sync to a target with less weight") + } + + if syncer.chainStore.HasTipSetAndState(ctx, target.Head) || target.Head.Key().Equals(head.Key()) { + return errors.New("do not sync to a target has synced before") + } + + tipsets, err := syncer.fetchChainBlocks(ctx, head, target.Head) + if err != nil { + return errors.Wrapf(err, "failure fetching or validating headers") + } + logSyncer.Debugf("fetch header success at %v %s ...", tipsets[0].Height(), tipsets[0].Key()) + + if err = syncer.syncSegement(ctx, target, tipsets); err == nil { + syncer.delayRunTx.update(tipsets[len(tipsets)-1]) + } + + return err +} + +func (syncer *Syncer) syncSegement(ctx context.Context, target *syncTypes.Target, tipsets []*types.TipSet) error { + parent, err := syncer.chainStore.GetTipSet(ctx, tipsets[0].Parents()) + if err != nil { + return err + } + + errProcessChan := make(chan error, 1) + errProcessChan <- nil // init + var wg sync.WaitGroup + // todo write a pipline segment processor function + if err = rangeProcess(tipsets, func(segTipset []*types.TipSet) error { + // fetch messages + startTip := segTipset[0].Height() + emdTipset := segTipset[len(segTipset)-1].Height() + logSyncer.Debugf("start to fetch message segement %d-%d", startTip, emdTipset) + _, err := syncer.fetchSegMessage(ctx, segTipset) + if err != nil { + return err + } + logSyncer.Debugf("finish to fetch message segement %d-%d", startTip, emdTipset) + err = <-errProcessChan + if err != nil { + return fmt.Errorf("process message failed %v", err) + } + wg.Add(1) + go func() { + defer wg.Done() + logSyncer.Debugf("start to process message segement %d-%d", startTip, emdTipset) + defer logSyncer.Debugf("finish to process message segement %d-%d", startTip, emdTipset) + var processErr error + parent, processErr = syncer.processTipSetSegment(ctx, target, parent, segTipset) + if processErr != nil { + errProcessChan <- processErr + return + } + if !parent.Key().Equals(syncer.checkPoint) { + logSyncer.Debugf("set chain head, height:%d, blocks:%d", parent.Height(), parent.Len()) + if err := syncer.SetHead(ctx, parent); err != nil { + errProcessChan <- err + return + } + } + errProcessChan <- nil + }() + return nil + }); err != nil { + return err + } + + wg.Wait() + select { + case err = <-errProcessChan: + return err + default: + return nil + } +} + +// fetchChainBlocks get the block data, from targettip to knowntip. +// if local db has the block used that block +// if local db not exist, get block from network(libp2p), +// if there is a fork, get the common root tipset of knowntip and targettip, and return the block data from root tipset to targettip +// local(···->A->B) + incoming(C->D->E) => ···->A->B->C->D->E +func (syncer *Syncer) fetchChainBlocks(ctx context.Context, knownTip *types.TipSet, targetTip *types.TipSet) ([]*types.TipSet, error) { + chainTipsets := []*types.TipSet{targetTip} + flushDB := func(saveTips []*types.TipSet) error { + bs := blockstoreutil.NewTemporary() + cborStore := cbor.NewCborStore(bs) + for _, tips := range saveTips { + for _, blk := range tips.Blocks() { + _, err := cborStore.Put(ctx, blk) + if err != nil { + return err + } + } + } + return blockstoreutil.CopyBlockstore(ctx, bs, syncer.bsstore) + } + + untilHeight := knownTip.Height() + count := 0 +loop: + for chainTipsets[len(chainTipsets)-1].Height() > untilHeight { + tipSet, err := syncer.chainStore.GetTipSet(ctx, targetTip.Parents()) + if err == nil { + chainTipsets = append(chainTipsets, tipSet) + targetTip = tipSet + count++ + if count%500 == 0 { + logSyncer.Info("load from local db ", "Height: ", tipSet.Height()) + } + continue + } + + windows := targetTip.Height() - untilHeight + if windows > 500 { + windows = 500 + } + + fetchHeaders, err := syncer.exchangeClient.GetBlocks(ctx, targetTip.Parents(), int(windows)) + if err != nil { + return nil, err + } + + if len(fetchHeaders) == 0 { + break loop + } + + logSyncer.Infof("fetch blocks %d height from %d-%d", len(fetchHeaders), fetchHeaders[0].Height(), fetchHeaders[len(fetchHeaders)-1].Height()) + if err = flushDB(fetchHeaders); err != nil { + return nil, err + } + for _, b := range fetchHeaders { + if b.Height() < untilHeight { + break loop + } + chainTipsets = append(chainTipsets, b) + targetTip = b + } + } + + base := chainTipsets[len(chainTipsets)-1] + if base.Equals(knownTip) { + chainTipsets = chainTipsets[:len(chainTipsets)-1] + base = chainTipsets[len(chainTipsets)-1] + } + + if base.IsChildOf(knownTip) { + // common case: receiving blocks that are building on top of our best tipset + chain.Reverse(chainTipsets) + return chainTipsets, nil + } + + knownParent, err := syncer.chainStore.GetTipSet(ctx, knownTip.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to load next local tipset: %w", err) + } + if base.IsChildOf(knownParent) { + // common case: receiving a block thats potentially part of the same tipset as our best block + chain.Reverse(chainTipsets) + return chainTipsets, nil + } + + logSyncer.Warnf("(fork detected) synced header chain") + fork, err := syncer.syncFork(ctx, base, knownTip) + if err != nil { + if errors.Is(err, ErrForkTooLong) { + // TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish? + logSyncer.Warn("adding forked chain to our bad tipset cache") + /* for _, b := range incoming.Blocks() { + syncer.bad.Add(b.Cid(), NewBadBlockReason(incoming.Cids(), "fork past finality")) + }*/ + } + return nil, fmt.Errorf("failed to sync fork: %w", err) + } + err = flushDB(fork) + if err != nil { + return nil, err + } + chainTipsets = append(chainTipsets, fork...) + chain.Reverse(chainTipsets) + return chainTipsets, nil +} + +// syncFork tries to obtain the chain fragment that links a fork into a common +// ancestor in our view of the chain. +// +// If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint), +// we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain +// fragment until the fork point to the returned []TipSet. +// +// D->E-F(targetTip) +// A => D->E>F +// B-C(knownTip) +func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { + // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? + // Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare? + tips, err := syncer.exchangeClient.GetBlocks(ctx, incoming.Parents(), int(policy.ChainFinality)) + if err != nil { + return nil, err + } + + gensisiBlock, err := syncer.chainStore.GetGenesisBlock(ctx) + if err != nil { + return nil, err + } + + nts, err := syncer.chainStore.GetTipSet(ctx, known.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to load next local tipset: %w", err) + } + + for cur := 0; cur < len(tips); { + if nts.Height() == 0 { + if !gensisiBlock.Equals(nts.At(0)) { + return nil, fmt.Errorf("somehow synced chain that linked back to a different genesis (bad genesis: %s)", nts.Key()) + } + return nil, fmt.Errorf("synced chain forked at genesis, refusing to sync; incoming: %s", incoming.ToSlice()) + } + + if nts.Equals(tips[cur]) { + return tips[:cur], nil + } + + if nts.Height() < tips[cur].Height() { + cur++ + } else { + nts, err = syncer.chainStore.GetTipSet(ctx, nts.Parents()) + if err != nil { + return nil, fmt.Errorf("loading next local tipset: %w", err) + } + } + } + + return nil, ErrForkTooLong +} + +// fetchSegMessage get message in tipset +func (syncer *Syncer) fetchSegMessage(ctx context.Context, segTipset []*types.TipSet) ([]*types.FullTipSet, error) { + // get message from local bsstore + if len(segTipset) == 0 { + return []*types.FullTipSet{}, nil + } + + chain.Reverse(segTipset) + defer chain.Reverse(segTipset) + + fullTipSets := make([]*types.FullTipSet, len(segTipset)) + defer types.ReverseFullBlock(fullTipSets) + + var leftChain []*types.TipSet + var leftFullChain []*types.FullTipSet + for index, tip := range segTipset { + fullTipset, err := syncer.getFullBlock(ctx, tip) + if err != nil { + leftChain = segTipset[index:] + leftFullChain = fullTipSets[index:] + break + } + fullTipSets[index] = fullTipset + } + + if len(leftChain) == 0 { + return fullTipSets, nil + } + // fetch message from remote nodes + bs := blockstoreutil.NewTemporary() + cborStore := cbor.NewCborStore(bs) + + messages, err := syncer.exchangeClient.GetChainMessages(ctx, leftChain) + if err != nil { + return nil, err + } + + for index, tip := range leftChain { + fts, err := zipTipSetAndMessages(bs, tip, messages[index].Bls, messages[index].Secpk, messages[index].BlsIncludes, messages[index].SecpkIncludes) + if err != nil { + return nil, fmt.Errorf("message processing failed: %w", err) + } + leftFullChain[index] = fts + + // save message + for _, m := range messages[index].Bls { + if _, err := cborStore.Put(ctx, m); err != nil { + return nil, fmt.Errorf("BLS message processing failed: %w", err) + } + } + + for _, m := range messages[index].Secpk { + if _, err := cborStore.Put(ctx, m); err != nil { + return nil, fmt.Errorf("SECP message processing failed: %w", err) + } + } + } + + err = blockstoreutil.CopyBlockstore(ctx, bs, syncer.bsstore) + if err != nil { + return nil, errors.Wrapf(err, "failure fetching full blocks") + } + return fullTipSets, nil +} + +// getFullBlock get full block from message store +func (syncer *Syncer) getFullBlock(ctx context.Context, tipset *types.TipSet) (*types.FullTipSet, error) { + fullBlocks := make([]*types.FullBlock, tipset.Len()) + for index, blk := range tipset.Blocks() { + secpMsg, blsMsg, err := syncer.messageProvider.LoadMetaMessages(ctx, blk.Messages) + if err != nil { + return nil, err + } + fullBlocks[index] = &types.FullBlock{ + Header: blk, + BLSMessages: blsMsg, + SECPMessages: secpMsg, + } + } + return types.NewFullTipSet(fullBlocks), nil +} + +// processTipSetSegment process a batch of tipset in turn, +func (syncer *Syncer) processTipSetSegment(ctx context.Context, target *syncTypes.Target, parent *types.TipSet, segTipset []*types.TipSet) (*types.TipSet, error) { + for i, ts := range segTipset { + err := syncer.syncOne(ctx, parent, ts) + if err != nil { + // While `syncOne` can indeed fail for reasons other than consensus, + // adding to the badTipSets at this point is the simplest, since we + // have access to the chain. If syncOne fails for non-consensus reasons, + // there is no assumption that the running node's data is valid at all, + // so we don't really lose anything with this simplification. + syncer.badTipSets.AddChain(segTipset[i:]) + return nil, errors.Wrapf(err, "failed to sync tipset %s, number %d of %d in chain", ts.Key().String(), i, len(segTipset)) + } + parent = ts + target.Current = ts + } + return parent, nil +} + +// Head get latest head from chain store +func (syncer *Syncer) Head() *types.TipSet { + return syncer.chainStore.GetHead() +} + +// SetHead try to sethead after complete tipset syncing, +// if the current target weight is heavier than chain store. change a new head +func (syncer *Syncer) SetHead(ctx context.Context, ts *types.TipSet) error { + syncer.headLock.Lock() + defer syncer.headLock.Unlock() + head := syncer.chainStore.GetHead() + heavier, err := syncer.chainSelector.IsHeavier(ctx, ts, head) + if err != nil { + return err + } + + // If it is the heaviest update the chainStore. + if heavier { + exceeds, err := syncer.exceedsForkLength(ctx, head, ts) + if err != nil { + return err + } + if exceeds { + return nil + } + return syncer.chainStore.SetHead(ctx, ts) + } + return nil +} + +// Check if the two tipsets have a fork length above `ForkLengthThreshold`. +// `synced` is the head of the chain we are currently synced to and `external` +// is the incoming tipset potentially belonging to a forked chain. It assumes +// the external chain has already been validated and available in the ChainStore. +// The "fast forward" case is covered in this logic as a valid fork of length 0. +// +// FIXME: We may want to replace some of the logic in `syncFork()` with this. +// +// `syncFork()` counts the length on both sides of the fork at the moment (we +// need to settle on that) but here we just enforce it on the `synced` side. +func (syncer *Syncer) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) { + if synced == nil || external == nil { + // FIXME: If `cs.heaviest` is nil we should just bypass the entire + // `MaybeTakeHeavierTipSet` logic (instead of each of the called + // functions having to handle the nil case on their own). + return false, nil + } + + var err error + // `forkLength`: number of tipsets we need to walk back from the our `synced` + // chain to the common ancestor with the new `external` head in order to + // adopt the fork. + for forkLength := 0; forkLength < int(constants.ForkLengthThreshold); forkLength++ { + // First walk back as many tipsets in the external chain to match the + // `synced` height to compare them. If we go past the `synced` height + // the subsequent match will fail but it will still be useful to get + // closer to the `synced` head parent's height in the next loop. + for external.Height() > synced.Height() { + if external.Height() == 0 { + // We reached the genesis of the external chain without a match; + // this is considered a fork outside the allowed limit (of "infinite" + // length). + return true, nil + } + + external, err = syncer.chainStore.GetTipSet(ctx, external.Parents()) + if err != nil { + return false, fmt.Errorf("failed to load parent tipset in external chain: %w", err) + } + } + + // Now check if we arrived at the common ancestor. + if synced.Equals(external) { + return false, nil + } + + // Now check to see if we've walked back to the checkpoint. + if synced.Key().Equals(syncer.checkPoint) { + return true, nil + } + + // If we didn't, go back *one* tipset on the `synced` side (incrementing + // the `forkLength`). + if synced.Height() == 0 { + // Same check as the `external` side, if we reach the start (genesis) + // there is no common ancestor. + return true, nil + } + synced, err = syncer.chainStore.GetTipSet(ctx, synced.Parents()) + if err != nil { + return false, fmt.Errorf("failed to load parent tipset in synced chain: %w", err) + } + } + + // We traversed the fork length allowed without finding a common ancestor. + return true, nil +} + +// TODO: this function effectively accepts unchecked input from the network, +// either validate it here, or ensure that its validated elsewhere (maybe make +// sure the blocksync code checks it?) +// maybe this code should actually live in blocksync?? +func zipTipSetAndMessages(bs blockstore.Blockstore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*types.FullTipSet, error) { + if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) { + return nil, fmt.Errorf("msgincl length didnt match tipset size") + } + + fts := &types.FullTipSet{} + for bi, b := range ts.Blocks() { + if msgc := len(bmi[bi]) + len(smi[bi]); msgc > constants.BlockMessageLimit { + return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc) + } + + var smsgs []*types.SignedMessage + var smsgCids []cid.Cid + for _, m := range smi[bi] { + smsgs = append(smsgs, allsmsgs[m]) + mCid := allsmsgs[m].Cid() + smsgCids = append(smsgCids, mCid) + } + + var bmsgs []*types.Message + var bmsgCids []cid.Cid + for _, m := range bmi[bi] { + bmsgs = append(bmsgs, allbmsgs[m]) + mCid := allbmsgs[m].Cid() + bmsgCids = append(bmsgCids, mCid) + } + + mrcid, err := chain.ComputeMsgMeta(bs, bmsgCids, smsgCids) + if err != nil { + return nil, err + } + + if b.Messages != mrcid { + return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key()) + } + + fb := &types.FullBlock{ + Header: b, + BLSMessages: bmsgs, + SECPMessages: smsgs, + } + + fts.Blocks = append(fts.Blocks, fb) + } + + return fts, nil +} + +const maxProcessLen = 8 + +func rangeProcess(ts []*types.TipSet, cb func(ts []*types.TipSet) error) (err error) { + for { + if len(ts) == 0 { + break + } else if len(ts) < maxProcessLen { + // break out if less than process len + err = cb(ts) + break + } else { + processTS := ts[0:maxProcessLen] + err = cb(processTS) + if err != nil { + break + } + ts = ts[maxProcessLen:] + } + logSyncer.Infof("Sync Process End,Remaining: %v, err: %v ...", len(ts), err) + } + return err +} + +type delayRunTsTransition struct { // nolint + ch chan *types.TipSet + toRunTS *types.TipSet + syncer *Syncer + runningCount int64 +} + +func newDelayRunTsTransition(syncer *Syncer) *delayRunTsTransition { + return &delayRunTsTransition{ + ch: make(chan *types.TipSet, 10), + syncer: syncer, + } +} + +func (d *delayRunTsTransition) run() { + go d.listenUpdate() +} + +func (d *delayRunTsTransition) stop() { // nolint + close(d.ch) +} + +func (d *delayRunTsTransition) update(ts *types.TipSet) { + d.ch <- ts +} + +func (d *delayRunTsTransition) listenUpdate() { + duration := time.Second * 6 + ticker := time.NewTicker(duration) + for { + select { + case t, isok := <-d.ch: + if !isok { + return + } + if d.toRunTS == nil || (d.toRunTS != nil && !d.toRunTS.Parents().Equals(t.Parents())) { + ticker.Reset(duration) + } + d.toRunTS = t + case <-ticker.C: + if d.toRunTS != nil { + if atomic.LoadInt64(&d.runningCount) < maxProcessLen { + atomic.AddInt64(&d.runningCount, 1) + go func(ts *types.TipSet) { + _, _, err := d.syncer.stmgr.RunStateTransition(context.TODO(), ts) + if err != nil { + logSyncer.Errorf("stmgr.runStateTransaction failed:%s", err.Error()) + } + atomic.AddInt64(&d.runningCount, -1) + }(d.toRunTS) + } + d.toRunTS = nil + } + } + } +} diff --git a/pkg/chainsync/syncer/syncer_integration_test.go b/pkg/chainsync/syncer/syncer_integration_test.go new file mode 100644 index 0000000000..4d6bbaf2f4 --- /dev/null +++ b/pkg/chainsync/syncer/syncer_integration_test.go @@ -0,0 +1,109 @@ +// stm: #integration +package syncer_test + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/venus/pkg/statemanger" + + "github.com/filecoin-project/venus/pkg/chainsync/types" + types2 "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/chainsync/syncer" + "github.com/filecoin-project/venus/pkg/clock" + "github.com/filecoin-project/venus/pkg/fork" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +// Syncer is capable of recovering from a fork reorg after the bsstore is loaded. +// This is a regression test to guard against the syncer assuming that the bsstore having all +// blocks from a tipset means the syncer has computed its state. +// Such a case happens when the bsstore has just loaded, but this tipset is not on its heaviest chain). +// See https://github.com/filecoin-project/venus/issues/1148#issuecomment-432008060 +func TestLoadFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + // Set up in the standard way, but retain references to the repo and cbor stores. + builder := chain.NewBuilder(t, address.Undef) + genesis := builder.Genesis() + + // Note: the chain builder is passed as the fetcher, from which blocks may be requested, but + // *not* as the bsstore, to which the syncer must ensure to put blocks. + + sel := &chain.FakeChainSelector{} + + blockValidator := builder.FakeStateEvaluator() + stmgr := statemanger.NewStateManger(builder.Store(), blockValidator, nil, nil, nil, nil) + + s, err := syncer.NewSyncer(stmgr, blockValidator, sel, builder.Store(), + builder.Mstore(), builder.BlockStore(), builder, clock.NewFake(time.Unix(1234567890, 0)), nil) + + require.NoError(t, err) + + base := builder.AppendManyOn(ctx, 3, genesis) + left := builder.AppendManyOn(ctx, 4, base) + right := builder.AppendManyOn(ctx, 3, base) + + leftTarget := &types.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types2.NewChainInfo("", "", left), + } + rightTarget := &types.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types2.NewChainInfo("", "", right), + } + // Sync the two branches, which stores all blocks in the underlying stores. + // stm: @CHAINSYNC_SYNCER_HANDLE_NEW_TIP_SET_001, @CHAINSYNC_SYNCER_SET_HEAD_001 + assert.NoError(t, s.HandleNewTipSet(ctx, leftTarget)) + assert.Error(t, s.HandleNewTipSet(ctx, rightTarget)) + verifyHead(t, builder.Store(), left) + + _, _, err = blockValidator.RunStateTransition(ctx, blockValidator.ChainStore.GetHead()) + require.NoError(t, err) + + // The syncer/bsstore assume that the fetcher populates the underlying block bsstore such that + // tipsets can be reconstructed. The chain builder used for testing doesn't do that, so do + // it manually here. + for _, tip := range []*types2.TipSet{left, right} { + for itr := chain.IterAncestors(ctx, builder, tip); !itr.Complete(); require.NoError(t, itr.Next(ctx)) { + for _, block := range itr.Value().ToSlice() { + _, err := builder.Cstore().Put(ctx, block) + require.NoError(t, err) + } + } + } + + // Load a new chain bsstore on the underlying data. It will only compute state for the + // left (heavy) branch. It has a fetcher that can't provide blocks. + newStore := chain.NewStore(builder.Repo().ChainDatastore(), builder.BlockStore(), genesis.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()) + newStore.SetCheckPoint(genesis.Key()) + require.NoError(t, newStore.Load(ctx)) + _, err = syncer.NewSyncer(stmgr, + blockValidator, + sel, + newStore, + builder.Mstore(), + builder.BlockStore(), + builder, + clock.NewFake(time.Unix(1234567890, 0)), + fork.NewMockFork()) + require.NoError(t, err) + + assert.True(t, newStore.HasTipSetAndState(ctx, left)) + assert.False(t, newStore.HasTipSetAndState(ctx, right)) +} diff --git a/pkg/chainsync/syncer/syncer_test.go b/pkg/chainsync/syncer/syncer_test.go new file mode 100644 index 0000000000..c97b3e91f4 --- /dev/null +++ b/pkg/chainsync/syncer/syncer_test.go @@ -0,0 +1,630 @@ +package syncer_test + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/venus/pkg/statemanger" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/chainsync/syncer" + syncTypes "github.com/filecoin-project/venus/pkg/chainsync/types" + "github.com/filecoin-project/venus/pkg/clock" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + "github.com/filecoin-project/venus/pkg/fork" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/util/test" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOneBlock(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + t1 := builder.AppendOn(ctx, builder.Genesis(), 1) + target := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t1), + } + + assert.NoError(t, syncer.HandleNewTipSet(ctx, target)) + + assert.NoError(t, builder.FlushHead(ctx)) + + verifyTip(t, builder.Store(), t1, t1.At(0).ParentStateRoot) + verifyHead(t, builder.Store(), t1) +} + +func TestMultiBlockTip(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + genesis := builder.Store().GetHead() + + tip := builder.AppendOn(ctx, genesis, 2) + target := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", tip), + } + + assert.NoError(t, syncer.HandleNewTipSet(ctx, target)) + assert.NoError(t, builder.FlushHead(ctx)) + + verifyTip(t, builder.Store(), tip, builder.StateForKey(ctx, tip.Key())) + verifyHead(t, builder.Store(), tip) +} + +func TestChainIncremental(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + genesis := builder.Store().GetHead() + + t1 := builder.AppendOn(ctx, genesis, 2) + + t2 := builder.AppendOn(ctx, t1, 3) + + t3 := builder.AppendOn(ctx, t2, 1) + + t4 := builder.AppendOn(ctx, t3, 2) + + target1 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t1), + } + + target2 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t2), + } + + target3 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t3), + } + target4 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t4), + } + assert.NoError(t, syncer.HandleNewTipSet(ctx, target1)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), t1, builder.StateForKey(ctx, t1.Key())) + verifyHead(t, builder.Store(), t1) + + assert.NoError(t, syncer.HandleNewTipSet(ctx, target2)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), t2, builder.StateForKey(ctx, t2.Key())) + verifyHead(t, builder.Store(), t2) + + assert.NoError(t, syncer.HandleNewTipSet(ctx, target3)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), t3, builder.StateForKey(ctx, t3.Key())) + verifyHead(t, builder.Store(), t3) + + assert.NoError(t, syncer.HandleNewTipSet(ctx, target4)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), t4, builder.StateForKey(ctx, t4.Key())) + verifyHead(t, builder.Store(), t4) +} + +func TestChainJump(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + genesis := builder.Store().GetHead() + + t1 := builder.AppendOn(ctx, genesis, 2) + t2 := builder.AppendOn(ctx, t1, 3) + t3 := builder.AppendOn(ctx, t2, 1) + t4 := builder.AppendOn(ctx, t3, 2) + + target1 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t4), + } + assert.NoError(t, syncer.HandleNewTipSet(ctx, target1)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), t1, builder.StateForKey(ctx, t1.Key())) + verifyTip(t, builder.Store(), t2, builder.StateForKey(ctx, t2.Key())) + verifyTip(t, builder.Store(), t3, builder.StateForKey(ctx, t3.Key())) + verifyTip(t, builder.Store(), t4, builder.StateForKey(ctx, t4.Key())) + verifyHead(t, builder.Store(), t4) +} + +func TestIgnoreLightFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + genesis := builder.Store().GetHead() + + forkbase := builder.AppendOn(ctx, genesis, 1) + forkHead := builder.AppendOn(ctx, forkbase, 1) + + t1 := builder.AppendOn(ctx, forkbase, 1) + t2 := builder.AppendOn(ctx, t1, 1) + t3 := builder.AppendOn(ctx, t2, 1) + t4 := builder.AppendOn(ctx, t3, 1) + + // Sync heaviest branch first. + target4 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t4), + } + assert.NoError(t, syncer.HandleNewTipSet(ctx, target4)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), t4, builder.StateForKey(ctx, t4.Key())) + verifyHead(t, builder.Store(), t4) + + // Lighter fork is processed but not change head. + + forkHeadTarget := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", forkHead), + } + assert.Error(t, syncer.HandleNewTipSet(ctx, forkHeadTarget)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyHead(t, builder.Store(), t4) +} + +func TestAcceptHeavierFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + genesis := builder.Store().GetHead() + + forkbase := builder.AppendOn(ctx, genesis, 1) + + main1 := builder.AppendOn(ctx, forkbase, 1) + main2 := builder.AppendOn(ctx, main1, 1) + main3 := builder.AppendOn(ctx, main2, 1) + main4 := builder.AppendOn(ctx, main3, 1) + + // Fork is heavier with more blocks, despite shorter (with default fake weighing function + // from FakeStateEvaluator). + fork1 := builder.AppendOn(ctx, forkbase, 3) + fork2 := builder.AppendOn(ctx, fork1, 1) + fork3 := builder.AppendOn(ctx, fork2, 1) + + main4Target := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", main4), + } + assert.NoError(t, syncer.HandleNewTipSet(ctx, main4Target)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), main4, builder.StateForKey(ctx, main4.Key())) + verifyHead(t, builder.Store(), main4) + + // Heavier fork updates head3 + fork3Target := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", fork3), + } + assert.NoError(t, syncer.HandleNewTipSet(ctx, fork3Target)) + assert.NoError(t, builder.FlushHead(ctx)) + verifyTip(t, builder.Store(), fork1, builder.StateForKey(ctx, fork1.Key())) + verifyTip(t, builder.Store(), fork2, builder.StateForKey(ctx, fork2.Key())) + verifyTip(t, builder.Store(), fork3, builder.StateForKey(ctx, fork3.Key())) + verifyHead(t, builder.Store(), fork3) +} + +func TestRejectFinalityFork(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, s := setup(ctx, t) + genesis := builder.Store().GetHead() + + head := builder.AppendManyOn(ctx, int(policy.ChainFinality+2), genesis) + target := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", head), + } + assert.NoError(t, s.HandleNewTipSet(ctx, target)) + + // Differentiate fork for a new chain. Fork has FinalityEpochs + 1 + // blocks on top of genesis so forkFinalityBase is more than FinalityEpochs + // behind head + forkFinalityBase := builder.BuildOneOn(ctx, genesis, func(bb *chain.BlockBuilder) { + bb.SetTicket([]byte{0xbe}) + }) + forkFinalityHead := builder.AppendManyOn(ctx, int(policy.ChainFinality), forkFinalityBase) + forkHeadTarget := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", forkFinalityHead), + } + assert.Error(t, s.HandleNewTipSet(ctx, forkHeadTarget)) +} + +func TestNoUncessesaryFetch(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, s := setup(ctx, t) + genesis := builder.Store().GetHead() + + head := builder.AppendManyOn(ctx, 4, genesis) + target := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", head), + } + assert.NoError(t, s.HandleNewTipSet(ctx, target)) + + // A new syncer unable to fetch blocks from the network can handle a tipset that's already + // in the bsstore and linked to genesis. + eval := builder.FakeStateEvaluator() + stmgr := statemanger.NewStateManger(builder.Store(), eval, nil, nil, nil, nil) + newSyncer, err := syncer.NewSyncer(stmgr, + eval, + &chain.FakeChainSelector{}, + builder.Store(), + builder.Mstore(), + builder.BlockStore(), + builder, + clock.NewFake(time.Unix(1234567890, 0)), + fork.NewMockFork()) + require.NoError(t, err) + + target2 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", head), + } + err = newSyncer.HandleNewTipSet(ctx, target2) + assert.Contains(t, err.Error(), "do not sync to a target has synced before") +} + +// Syncer must track state of subsets of parent tipsets tracked in the bsstore +// when they are the ancestor in a chain. This is in order to maintain the +// invariant that the aggregate state of the parents of the base of a collected chain +// is kept in the bsstore. This invariant allows chains built on subsets of +// tracked tipsets to be handled correctly. +// This test tests that the syncer stores the state of such a base tipset of a collected chain, +// i.e. a subset of an existing tipset in the bsstore. +// +// Ex: {A1, A2} -> {B1, B2, B3} in bsstore to start +// {B1, B2} -> {C1, C2} chain 1 input to syncer +// C1 -> D1 chain 2 input to syncer +// +// The last operation will fail if the state of subset {B1, B2} is not +// kept in the bsstore because syncing C1 requires retrieving parent state. +func TestSubsetParent(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, s := setup(ctx, t) + genesis := builder.Store().GetHead() + + // Set up chain with {A1, A2} -> {B1, B2, B3} + tipA1A2 := builder.AppendOn(ctx, genesis, 2) + tipB1B2B3 := builder.AppendOn(ctx, tipA1A2, 3) + target1 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", tipB1B2B3), + } + require.NoError(t, s.HandleNewTipSet(ctx, target1)) + + // Sync one tipset with a parent equal to a subset of an existing + // tipset in the bsstore: {B1, B2} -> {C1, C2} + tipB1B2 := testhelpers.RequireNewTipSet(t, tipB1B2B3.At(0), tipB1B2B3.At(1)) + tipC1C2 := builder.AppendOn(ctx, tipB1B2, 2) + + target2 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", tipC1C2), + } + assert.NoError(t, s.HandleNewTipSet(ctx, target2)) + + // Sync another tipset with a parent equal to a subset of the tipset + // just synced: C1 -> D1 + tipC1 := testhelpers.RequireNewTipSet(t, tipC1C2.At(0)) + tipD1OnC1 := builder.AppendOn(ctx, tipC1, 1) + + target3 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", tipD1OnC1), + } + assert.NoError(t, s.HandleNewTipSet(ctx, target3)) + + // A full parent also works fine: {C1, C2} -> D1 + tipD1OnC1C2 := builder.AppendOn(ctx, tipC1C2, 1) + target4 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", tipD1OnC1C2), + } + assert.NoError(t, s.HandleNewTipSet(ctx, target4)) +} + +func TestBlockNotLinkedRejected(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + genesis := builder.Store().GetHead() + + // Set up a parallel builder from which the syncer cannot fetch. + // The two builders are expected to produce exactly the same blocks from the same sequence + // of calls. + shadowBuilder := chain.NewBuilder(t, address.Undef) + gen2 := shadowBuilder.Genesis() + require.True(t, genesis.Equals(gen2)) + + // The syncer fails to fetch this block so cannot sync it. + b1 := shadowBuilder.AppendOn(ctx, genesis, 1) + b2 := shadowBuilder.AppendOn(ctx, b1, 1) + target1 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", b2), + } + assert.Error(t, syncer.HandleNewTipSet(ctx, target1)) + + // Make the same block available from the syncer's builder + builder.AppendBlockOn(ctx, genesis) + target2 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", b1), + } + assert.NoError(t, syncer.HandleNewTipSet(ctx, target2)) +} + +type poisonValidator struct { + headerFailureTS uint64 + fullFailureTS uint64 +} + +func (pv *poisonValidator) RunStateTransition(ctx context.Context, ts *types.TipSet) (cid.Cid, cid.Cid, error) { + stamp := ts.At(0).Timestamp + if pv.fullFailureTS == stamp { + return testhelpers.EmptyTxMetaCID, testhelpers.EmptyTxMetaCID, errors.New("run state transition fails on poison timestamp") + } + return testhelpers.EmptyTxMetaCID, testhelpers.EmptyTxMetaCID, nil +} + +func (pv *poisonValidator) ValidateFullBlock(ctx context.Context, blk *types.BlockHeader) error { + if pv.headerFailureTS == blk.Timestamp { + return errors.New("val semantic fails on poison timestamp") + } + return nil +} + +func newPoisonValidator(t *testing.T, headerFailure, fullFailure uint64) *poisonValidator { + return &poisonValidator{headerFailureTS: headerFailure, fullFailureTS: fullFailure} +} + +func (pv *poisonValidator) ValidateHeaderSemantic(_ context.Context, header *types.BlockHeader, _ *types.TipSet) error { + if pv.headerFailureTS == header.Timestamp { + return errors.New("val semantic fails on poison timestamp") + } + return nil +} + +// ValidateHeaderSemantic is a stub that always returns no error +func (pv *poisonValidator) ValidateMessagesSemantic(_ context.Context, _ *types.BlockHeader, _ *types.TipSet) error { + return nil +} + +func TestSemanticallyBadTipSetFails(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + eval := newPoisonValidator(t, 98, 99) + builder := chain.NewBuilder(t, address.Undef) + + stmgr := statemanger.NewStateManger(builder.Store(), eval, nil, nil, nil, nil) + builder, syncer := setupWithValidator(ctx, t, builder, stmgr, eval) + + genesis := builder.Store().GetHead() + + // Build a chain with messages that will fail semantic header validation + kis := testhelpers.MustGenerateKeyInfo(1, 42) + mm := testhelpers.NewMessageMaker(t, kis) + alice := mm.Addresses()[0] + m1 := mm.NewSignedMessage(alice, 0) + m2 := mm.NewSignedMessage(alice, 1) + m3 := mm.NewSignedMessage(alice, 3) + + link1 := builder.BuildOneOn(ctx, genesis, func(bb *chain.BlockBuilder) { + bb.AddMessages( + []*types.SignedMessage{m1, m2, m3}, + []*types.Message{}, + ) + bb.SetTimestamp(98) // poison header val + }) + + // Set up a fresh builder without any of this data + target1 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", link1), + } + err := syncer.HandleNewTipSet(ctx, target1) + require.Error(t, err) + assert.Contains(t, err.Error(), "val semantic fails") +} + +// TODO: fix test +func TestStoresMessageReceipts(t *testing.T) { + t.SkipNow() + tf.UnitTest(t) + ctx := context.Background() + builder, syncer := setup(ctx, t) + genesis := builder.Store().GetHead() + + keys := testhelpers.MustGenerateKeyInfo(1, 42) + mm := testhelpers.NewMessageMaker(t, keys) + alice := mm.Addresses()[0] + t1 := builder.Build(ctx, genesis, 4, func(b *chain.BlockBuilder, i int) { + b.AddMessages([]*types.SignedMessage{}, []*types.Message{mm.NewUnsignedMessage(alice, uint64(i))}) + }) + + target1 := &syncTypes.Target{ + Base: nil, + Current: nil, + Start: time.Time{}, + End: time.Time{}, + Err: nil, + ChainInfo: *types.NewChainInfo("", "", t1), + } + assert.NoError(t, syncer.HandleNewTipSet(ctx, target1)) + + receiptsCid, err := builder.Store().GetTipSetReceiptsRoot(ctx, t1) + + require.NoError(t, err) + + receipts, err := builder.LoadReceipts(ctx, receiptsCid) + require.NoError(t, err) + + // filter same nonce + assert.Len(t, receipts, 2) +} + +// /// Set-up ///// + +// Initializes a chain builder, bsstore and syncer. +// The chain builder has a single genesis block, which is set as the head of the bsstore. +func setup(ctx context.Context, t *testing.T) (*chain.Builder, *syncer.Syncer) { + builder := chain.NewBuilder(t, address.Undef) + eval := builder.FakeStateEvaluator() + + stmgr := statemanger.NewStateManger(builder.Store(), eval, nil, nil, nil, nil) + + return setupWithValidator(ctx, t, builder, stmgr, eval) +} + +func setupWithValidator(ctx context.Context, t *testing.T, builder *chain.Builder, + stmgr *statemanger.Stmgr, headerVal syncer.BlockValidator, +) (*chain.Builder, *syncer.Syncer) { + // Note: the chain builder is passed as the fetcher, from which blocks may be requested, but + // *not* as the bsstore, to which the syncer must ensure to put blocks. + sel := &chain.FakeChainSelector{} + syncer, err := syncer.NewSyncer(stmgr, + headerVal, + sel, + builder.Store(), + builder.Mstore(), + builder.BlockStore(), + builder, + clock.NewFake(time.Unix(1234567890, 0)), + fork.NewMockFork()) + require.NoError(t, err) + + return builder, syncer +} + +// /// Verification helpers ///// + +// Sub-interface of the bsstore used for verification. +type syncStoreReader interface { + GetHead() *types.TipSet + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + GetTipSetStateRoot(context.Context, *types.TipSet) (cid.Cid, error) +} + +// Verifies that a tipset and associated state root are stored in the chain bsstore. +func verifyTip(t *testing.T, store syncStoreReader, tip *types.TipSet, stateRoot cid.Cid) { + ctx := context.Background() + + foundTip, err := store.GetTipSet(ctx, tip.Key()) + require.NoError(t, err) + test.Equal(t, tip, foundTip) + + foundState, err := store.GetTipSetStateRoot(ctx, tip) + require.NoError(t, err) + test.Equal(t, stateRoot, foundState) +} + +// Verifies that the bsstore's head is as expected. +func verifyHead(t *testing.T, store syncStoreReader, head *types.TipSet) { + headTipSet := store.GetHead() + test.Equal(t, head, headTipSet) +} diff --git a/internal/pkg/chainsync/internal/syncer/bad_tipset_cache.go b/pkg/chainsync/types/bad_tipset_cache.go similarity index 83% rename from internal/pkg/chainsync/internal/syncer/bad_tipset_cache.go rename to pkg/chainsync/types/bad_tipset_cache.go index 46aa87a649..00ef54ae3c 100644 --- a/internal/pkg/chainsync/internal/syncer/bad_tipset_cache.go +++ b/pkg/chainsync/types/bad_tipset_cache.go @@ -1,9 +1,9 @@ -package syncer +package types import ( "sync" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" + "github.com/filecoin-project/venus/venus-shared/types" ) // BadTipSetCache keeps track of bad tipsets that the syncer should not try to @@ -17,10 +17,16 @@ type BadTipSetCache struct { bad map[string]struct{} } +func NewBadTipSetCache() *BadTipSetCache { + return &BadTipSetCache{ + bad: make(map[string]struct{}), + } +} + // AddChain adds the chain of tipsets to the BadTipSetCache. For now it just // does the simplest thing and adds all blocks of the chain to the cache. // TODO: might want to cache a random subset once cache size is limited. -func (cache *BadTipSetCache) AddChain(chain []block.TipSet) { +func (cache *BadTipSetCache) AddChain(chain []*types.TipSet) { for _, ts := range chain { cache.Add(ts.String()) } diff --git a/pkg/chainsync/types/bad_tipset_cache_test.go b/pkg/chainsync/types/bad_tipset_cache_test.go new file mode 100644 index 0000000000..5e45b6fbcb --- /dev/null +++ b/pkg/chainsync/types/bad_tipset_cache_test.go @@ -0,0 +1,32 @@ +// stm: #unit +package types + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/assert" +) + +func TestBadTipsetCache(t *testing.T) { + tf.UnitTest(t) + badTSCache := NewBadTipSetCache() + + var ts types.TipSet + testutil.Provide(t, &ts) + + // stm: @CHAINSYNC_TYPES_ADD_CHAIN_001 + badTSCache.AddChain([]*types.TipSet{&ts}) + + var tsKey types.TipSetKey + testutil.Provide(t, &tsKey, testutil.WithSliceLen(3)) + + // stm: @CHAINSYNC_TYPES_ADD_001 + badTSCache.Add(tsKey.String()) + + // stm: @CHAINSYNC_TYPES_HAS_001 + assert.True(t, badTSCache.Has(ts.Key().String())) + assert.True(t, badTSCache.Has(tsKey.String())) +} diff --git a/pkg/chainsync/types/syncstate.go b/pkg/chainsync/types/syncstate.go new file mode 100644 index 0000000000..36cb5d1b29 --- /dev/null +++ b/pkg/chainsync/types/syncstate.go @@ -0,0 +1,28 @@ +package types + +import "fmt" + +// just compatible code lotus +type SyncStateStage int + +const ( + StageIdle = SyncStateStage(iota) + StateInSyncing + StageSyncComplete + StageSyncErrored +) + +func (v SyncStateStage) String() string { + switch v { + case StageIdle: + return "wait" + case StateInSyncing: + return "syncing" + case StageSyncComplete: + return "complete" + case StageSyncErrored: + return "error" + default: + return fmt.Sprintf("", v) + } +} diff --git a/pkg/chainsync/types/target_tracker.go b/pkg/chainsync/types/target_tracker.go new file mode 100644 index 0000000000..ae9409336e --- /dev/null +++ b/pkg/chainsync/types/target_tracker.go @@ -0,0 +1,448 @@ +package types + +import ( + "container/list" + "sort" + "strconv" + "sync" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + + fbig "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +var log = logging.Logger("chainsync.target") + +// Target tracks a logical request of the syncing subsystem to run a +// syncing job against given inputs. +type Target struct { + State SyncStateStage + Base *types.TipSet + Current *types.TipSet + Start time.Time + End time.Time + Err error + types.ChainInfo +} + +// IsNeighbor the target t is neighbor or not +// the same height, the same weight, the same parent is neighbor target. the can merge +func (target *Target) IsNeighbor(t *Target) bool { + if target.Head.Height() != t.Head.Height() { + return false + } + + weightIn := t.Head.ParentWeight() + targetWeight := target.Head.ParentWeight() + if !targetWeight.Equals(weightIn) { + return false + } + + if !target.Head.Parents().Equals(t.Head.Parents()) { + return false + } + return true +} + +// HasChild is another is a child target of current. +// if the t' blocks in a subset of current target ,the t is a child of current target +func (target *Target) HasChild(t *Target) bool { + return target.Head.Key().ContainsAll(t.Head.Key()) +} + +// Key return identity of target . key=weight+height+parent +func (target *Target) Key() string { + weightIn := target.Head.ParentWeight() + return weightIn.String() + + strconv.FormatInt(int64(target.Head.Height()), 10) + + target.Head.Parents().String() +} + +// TargetTracker orders dispatcher syncRequests by the underlying `TargetBuckets`'s +// prioritization policy. +// +// It also filters the `TargetBuckets` so that it always contains targets with +// unique chain heads. +// +// It wraps the `TargetBuckets` to prevent panics during +// normal operation. +type TargetTracker struct { + bucketSize int + historySize int + q TargetBuckets + history *list.List + targetSet map[string]*Target + lowWeight fbig.Int + lk sync.Mutex + + subs map[string]chan struct{} + subLk sync.Mutex + + tipsetCache map[abi.ChainEpoch][]*types.BlockHeader +} + +// NewTargetTracker returns a new target queue. +func NewTargetTracker(size int) *TargetTracker { + return &TargetTracker{ + bucketSize: size, + historySize: 10, + history: list.New(), + q: make(TargetBuckets, 0), + targetSet: make(map[string]*Target), + lk: sync.Mutex{}, + lowWeight: fbig.NewInt(0), + subs: make(map[string]chan struct{}), + tipsetCache: make(map[abi.ChainEpoch][]*types.BlockHeader), + } +} + +func (tq *TargetTracker) SubNewTarget(key string, cacheSize int) chan struct{} { + tq.subLk.Lock() + defer tq.subLk.Unlock() + ch, isok := tq.subs[key] + if isok { + return ch + } + ch = make(chan struct{}, cacheSize) + tq.subs[key] = ch + return ch +} + +func (tq *TargetTracker) UnsubNewTarget(key string) { + tq.subLk.Lock() + defer tq.subLk.Unlock() + if ch, isok := tq.subs[key]; isok { + delete(tq.subs, key) + close(ch) + } +} + +// todo: we should pub a 'stable' target +func (tq *TargetTracker) pubNewTarget() { + tq.subLk.Lock() + defer tq.subLk.Unlock() + for _, ch := range tq.subs { + ch <- struct{}{} + } +} + +func (tq *TargetTracker) checkBlock(block *types.BlockHeader) bool { + bls, exists := tq.tipsetCache[block.Height] + if !exists { + cache := make([]*types.BlockHeader, 0, builtin.ExpectedLeadersPerEpoch) + tq.tipsetCache[block.Height] = append(cache, block) + return true + } + + for _, b := range bls { + if b.Cid() == block.Cid() { + log.Debugf("block(%s) already in tipset: %d", b.Cid().String(), b.Height) + return true + } + if b.Miner == block.Miner { + log.Warnf("miner: %s packed more than none block in single tipset: %d, it's illegal.", b.Miner.String(), b.Height) + return false + } + } + + tq.tipsetCache[block.Height] = append(bls, block) + + return true +} + +func (tq *TargetTracker) checkTipset(target *Target) (*Target, error) { + bls := target.Head.Blocks() + var newBls []*types.BlockHeader + + for _, b := range bls { + if tq.checkBlock(b) { + newBls = append(newBls, b) + } + } + var err error + target.Head, err = types.NewTipSet(newBls) + + delete(tq.tipsetCache, target.Head.Height()-policy.ChainFinality) + + return target, err +} + +// Add adds a sync target to the target queue. +// First, check whether the weight is received or not, and the message will record the minimum weight. +// If the weight is less than the current weight, it will exit automatically. +// Then, check whether the current target and the recorded target can be merged. +// If they can be merged, a new target containing more blocks will be generated. +// Try to replace a sub target in idle state. If it does not exist, the message will be displayed, +// Try to replace the target with the lowest weight and idle. +// If the above two situations do not exist, check whether the task exceeds the maximum number of saved tasks. +// If the number exceeds the maximum number, the current target will be abandoned. +// If there are any vacancies, the current target will be appended to the end. +// After each completion of this process, all targets will be reordered. First, they will be sorted according to the weight from small to large, and then they will be sorted according to the number of blocks in the group from small to large, Include as many blocks as possible. +func (tq *TargetTracker) Add(t *Target) bool { + now := time.Now() + + tq.lk.Lock() + defer tq.lk.Unlock() + + // do not sync less weight + if t.Head.At(0).ParentWeight.LessThan(tq.lowWeight) { + return false + } + + var err error + if t, err = tq.checkTipset(t); err != nil { + log.Errorf("targettracker add failed, check tipset failed: %s", err.Error()) + return false + } + + t, ok := tq.widen(t) + if !ok { + return false + } + + // replace last idle task because of less weight + var replaceIndex int + var replaceTarget *Target + // try to replace a idea child target + for i := len(tq.q) - 1; i > -1; i-- { + if t.HasChild(tq.q[i]) && tq.q[i].State == StageIdle { + replaceTarget = tq.q[i] + replaceIndex = i + log.Infof("%s replace a child target at %d", t.Head.String(), i) + break + } + } + + if replaceTarget == nil { + // replace a least weight idle + for i := len(tq.q) - 1; i > -1; i-- { + if tq.q[i].State == StageIdle { + replaceTarget = tq.q[i] + replaceIndex = i + log.Infof("%s replace a idle target at %d", t.Head.String(), i) + break + } + } + } + + if replaceTarget == nil { + if len(tq.q) < tq.bucketSize { + // append to last slot + tq.q = append(tq.q, t) + } else { + // return if target queue is full + return false + } + } else { + delete(tq.targetSet, replaceTarget.ChainInfo.Head.String()) + tq.q[replaceIndex] = t + } + + tq.targetSet[t.ChainInfo.Head.String()] = t + sortTarget(tq.q) + // update lowweight + tq.lowWeight = tq.q[len(tq.q)-1].Head.At(0).ParentWeight + + tq.pubNewTarget() + + log.Debugf("add new target height: %d, count: %d, took: %d 'ms'", t.Head.Height(), t.Head.Len(), time.Since(now).Milliseconds()) + + return true +} + +// sort by weight and than sort by block number in target buckets +func sortTarget(target TargetBuckets) { + // use weight as group key + groups := make(map[string][]*Target) + var keys []fbig.Int + for _, t := range target { + weight := t.Head.ParentWeight() + if _, ok := groups[weight.String()]; ok { + groups[weight.String()] = append(groups[weight.String()], t) + } else { + groups[weight.String()] = []*Target{t} + keys = append(keys, weight) + } + } + + // sort group by weight + sort.Slice(keys, func(i, j int) bool { + return keys[i].GreaterThan(keys[j]) + }) + + // sort target in group by block number + for _, key := range keys { + inGroup := groups[key.String()] + sort.Slice(inGroup, func(i, j int) bool { + return inGroup[i].Head.Len() > inGroup[j].Head.Len() + }) + } + + // update target buckets + count := 0 + for _, key := range keys { + for _, t := range groups[key.String()] { + target[count] = t + count++ + } + } +} + +// expand the tipset, traversing the local existing target, +// If there is a incoming tipset non-existent block in the neighbor node, then merge the block. +func (tq *TargetTracker) widen(t *Target) (*Target, bool) { + if len(tq.targetSet) == 0 { + return t, true + } + + var err error + // If already in queue drop quickly + for _, val := range tq.targetSet { + if val.Head.Key().ContainsAll(t.Head.Key()) { + return nil, false + } + } + + miners := make(map[address.Address]interface{}) + + // collect neighbor block in queue include history to get block with same weight and height + sameWeightBlks := make(map[cid.Cid]*types.BlockHeader) + for _, val := range tq.targetSet { + if val.IsNeighbor(t) { + for _, blk := range val.Head.Blocks() { + bid := blk.Cid() + if !t.Head.Key().Has(bid) { + if _, ok := sameWeightBlks[bid]; !ok { + if _, isok := miners[blk.Miner]; isok { + // TODO: a miner mined more than one blocks ? + log.Warnf("miner : %s mined more than one blocks, this is illegal", blk.Miner.String()) + return nil, false + } + sameWeightBlks[bid] = blk + miners[blk.Miner] = nil + } + } + } + } + } + + if len(sameWeightBlks) == 0 { + return t, true + } + + // apply block that t don't have + blks := t.Head.Blocks() + for _, blk := range sameWeightBlks { + blks = append(blks, blk) + } + + newHead, err := types.NewTipSet(blks) + if err != nil { + return nil, false + } + t.Head = newHead + return t, true +} + +// Pop removes and returns the highest priority syncing target. If there is +// nothing in the queue the second argument returns false +func (tq *TargetTracker) Select() (*Target, bool) { + tq.lk.Lock() + defer tq.lk.Unlock() + if tq.q.Len() == 0 { + return nil, false + } + var toSyncTarget *Target + for _, target := range tq.q { + if target.State == StageIdle { + toSyncTarget = target + break + } + } + + if toSyncTarget == nil { + return nil, false + } + return toSyncTarget, true +} + +// Remove remote a target after sync completed +// First remove target from live queue, add the target to history. +func (tq *TargetTracker) Remove(t *Target) { + tq.lk.Lock() + defer tq.lk.Unlock() + for index, target := range tq.q { + if t == target { + tq.q = append(tq.q[:index], tq.q[index+1:]...) + break + } + } + t.End = time.Now() + if tq.history.Len() > tq.historySize { + tq.history.Remove(tq.history.Front()) // remove olddest + popKey := tq.history.Front().Value.(*Target).ChainInfo.Head.String() + delete(tq.targetSet, popKey) + } + tq.history.PushBack(t) +} + +// History return sync history +func (tq *TargetTracker) History() []*Target { + tq.lk.Lock() + defer tq.lk.Unlock() + var targets []*Target + for target := tq.history.Front(); target != nil; target = target.Next() { + targets = append(targets, target.Value.(*Target)) + } + return targets +} + +// Len returns the number of targets in the queue. +func (tq *TargetTracker) Len() int { + tq.lk.Lock() + defer tq.lk.Unlock() + return tq.q.Len() +} + +// Buckets returns the number of targets in the queue. +func (tq *TargetTracker) Buckets() TargetBuckets { + return tq.q +} + +// TargetBuckets orders targets by a policy. +// +// The current simple policy is to order syncing requests by claimed chain +// height. +// +// `TargetBuckets` can panic so it shouldn't be used unwrapped +type TargetBuckets []*Target + +// Len heavily inspired by https://golang.org/pkg/container/heap/ +func (rq TargetBuckets) Len() int { return len(rq) } + +func (rq TargetBuckets) Less(i, j int) bool { + // We want Pop to give us the weight priority so we use greater than + weightI := rq[i].Head.ParentWeight() + weightJ := rq[j].Head.ParentWeight() + return weightI.GreaterThan(weightJ) +} + +func (rq TargetBuckets) Swap(i, j int) { + rq[i], rq[j] = rq[j], rq[i] +} + +func (rq *TargetBuckets) Pop() interface{} { + old := *rq + n := len(old) + item := old[n-1] + *rq = old[0 : n-1] + return item +} diff --git a/internal/pkg/clock/chainclock.go b/pkg/clock/chainclock.go similarity index 78% rename from internal/pkg/clock/chainclock.go rename to pkg/clock/chainclock.go index 55c6ceaf46..8f3d7cc88e 100644 --- a/internal/pkg/clock/chainclock.go +++ b/pkg/clock/chainclock.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" ) // DefaultEpochDuration is the default duration of epochs @@ -22,7 +22,6 @@ type ChainEpochClock interface { EpochRangeAtTimestamp(t uint64) (abi.ChainEpoch, abi.ChainEpoch) StartTimeOfEpoch(e abi.ChainEpoch) time.Time WaitForEpoch(ctx context.Context, e abi.ChainEpoch) - WaitForEpochPropDelay(ctx context.Context, e abi.ChainEpoch) WaitNextEpoch(ctx context.Context) abi.ChainEpoch Clock } @@ -33,27 +32,22 @@ type chainClock struct { genesisTime time.Time // The fixed time length of the epoch window epochDuration time.Duration - // propDelay is the time between the start of the epoch and the start - // of mining for the subsequent epoch. This delay provides time for - // blocks from the previous epoch to arrive. - propDelay time.Duration Clock } // NewChainClock returns a ChainEpochClock wrapping a default clock.Clock -func NewChainClock(genesisTime uint64, blockTime time.Duration, propDelay time.Duration) ChainEpochClock { - return NewChainClockFromClock(genesisTime, blockTime, propDelay, NewSystemClock()) +func NewChainClock(genesisTime uint64, blockTime time.Duration) ChainEpochClock { + return NewChainClockFromClock(genesisTime, blockTime, NewSystemClock()) } // NewChainClockFromClock returns a ChainEpochClock wrapping the provided // clock.Clock -func NewChainClockFromClock(genesisSeconds uint64, blockTime time.Duration, propDelay time.Duration, c Clock) ChainEpochClock { +func NewChainClockFromClock(genesisSeconds uint64, blockTime time.Duration, c Clock) ChainEpochClock { gt := time.Unix(int64(genesisSeconds), 0) return &chainClock{ genesisTime: gt, epochDuration: blockTime, - propDelay: propDelay, Clock: c, } } @@ -102,11 +96,6 @@ func (cc *chainClock) WaitForEpoch(ctx context.Context, e abi.ChainEpoch) { cc.waitForEpochOffset(ctx, e, 0) } -// WaitForEpochPropDelay returns propDelay time after the start of the epoch, or when ctx is done. -func (cc *chainClock) WaitForEpochPropDelay(ctx context.Context, e abi.ChainEpoch) { - cc.waitForEpochOffset(ctx, e, cc.propDelay) -} - // waitNextEpochOffset returns when time is offset past the start of the epoch, or ctx is done. func (cc *chainClock) waitForEpochOffset(ctx context.Context, e abi.ChainEpoch, offset time.Duration) { targetTime := cc.StartTimeOfEpoch(e).Add(offset) diff --git a/pkg/clock/chainclock_test.go b/pkg/clock/chainclock_test.go new file mode 100644 index 0000000000..258719bddf --- /dev/null +++ b/pkg/clock/chainclock_test.go @@ -0,0 +1,75 @@ +// stm: #unit +package clock_test + +import ( + "sync" + "testing" + "time" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "golang.org/x/net/context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/venus/pkg/clock" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestChainEpochClock(t *testing.T) { + tf.UnitTest(t) + + // stm: @CLOCK_CHAIN_CLOCK_NEW_001 + now := time.Now().Unix() + bt := clock.DefaultEpochDuration + cec := clock.NewChainClock(uint64(now), bt) + + epoch0Start := time.Unix(now, 0) + epoch1Start := epoch0Start.Add(bt) + + // stm: @CLOCK_CHAIN_CLOCK_EPOCH_AT_TIME_001 + assert.Equal(t, abi.ChainEpoch(0), cec.EpochAtTime(epoch0Start)) + assert.Equal(t, abi.ChainEpoch(1), cec.EpochAtTime(epoch1Start)) + + epoch2Start := epoch1Start.Add(bt) + epoch2Middle := epoch2Start.Add(bt / time.Duration(5)) + assert.Equal(t, abi.ChainEpoch(2), cec.EpochAtTime(epoch2Start)) + assert.Equal(t, abi.ChainEpoch(2), cec.EpochAtTime(epoch2Middle)) + + epoch200Start := epoch0Start.Add(time.Duration(200) * bt) + assert.Equal(t, abi.ChainEpoch(200), cec.EpochAtTime(epoch200Start)) + + expectedStartEpoch := 10 + // stm: @CLOCK_CHAIN_CLOCK_EPOCH_RANGE_AT_TIMESTAMP_001 + first, last := cec.EpochRangeAtTimestamp(uint64(now) + uint64(builtin.EpochDurationSeconds*expectedStartEpoch) - 1) + assert.Equal(t, int(first), expectedStartEpoch-1) + assert.Equal(t, int(last), expectedStartEpoch) + + // stm: @CLOCK_CHAIN_CLOCK_START_TIME_OF_EPOCH_001 + startTime = cec.StartTimeOfEpoch(abi.ChainEpoch(expectedStartEpoch)) + assert.Equal(t, startTime.Unix(), now+(builtin.EpochDurationSeconds*int64(expectedStartEpoch))) + + waitEpoch := abi.ChainEpoch(1) + var expectedNextEpoch abi.ChainEpoch + var wg sync.WaitGroup + + ctx := context.Background() + + wg.Add(2) + go func() { + defer wg.Done() + // stm: @CLOCK_CHAIN_CLOCK_WAIT_NEXT_EPOCH_001 + expectedNextEpoch = cec.WaitNextEpoch(ctx) + }() + + go func() { + defer wg.Done() + // stm: @CLOCK_CHAIN_CLOCK_WAIT_FOR_EPOCH_001 + cec.WaitForEpoch(ctx, waitEpoch) + }() + + t.Logf("waitting for next epoch.") + wg.Wait() + + assert.Equal(t, waitEpoch, expectedNextEpoch) +} diff --git a/internal/pkg/clock/clock.go b/pkg/clock/clock.go similarity index 100% rename from internal/pkg/clock/clock.go rename to pkg/clock/clock.go diff --git a/pkg/clock/testing.go b/pkg/clock/testing.go new file mode 100644 index 0000000000..b99bf24f67 --- /dev/null +++ b/pkg/clock/testing.go @@ -0,0 +1,294 @@ +package clock + +import ( + "sync" + "time" +) + +// Creates a new fake clock and chain clock wrapping it. +func NewFakeChain(genesis uint64, epochDuration time.Duration, now int64) (Fake, ChainEpochClock) { + fake := NewFake(time.Unix(now, 0)) + return fake, NewChainClockFromClock(genesis, epochDuration, fake) +} + +// Fake provides an interface for a clock which can be manually advanced. +// Adapted from: https://github.com/jonboulle/clockwork +type Fake interface { + Clock + // Advance advances the Fake to a new point in time, ensuring any existing + // sleepers are notified appropriately before returning + Advance(d time.Duration) + // BlockUntil will block until the Fake has the given number of + // sleepers (callers of Sleep or After) + BlockUntil(n int) +} + +// Returns a Fake initialised at the given time.Time. +func NewFake(n time.Time) Fake { + return &fakeClock{ + time: n, + } +} + +type fakeClock struct { + timers []*fakeTimer + blockers []*blocker + time time.Time + + l sync.RWMutex +} + +// fakeTimer represents a waiting fakeTimer from NewTimer, Sleep, After, etc. +type fakeTimer struct { + callback func(interface{}, time.Time) + arg interface{} + + c chan time.Time + lk sync.RWMutex + done bool + until time.Time + + clock *fakeClock // needed for Reset() +} + +// blocker represents a caller of BlockUntil +type blocker struct { + count int + ch chan struct{} +} + +func (s *fakeTimer) awaken(now time.Time) { + s.lk.Lock() + if s.done { + s.lk.Unlock() + return + } + s.done = true + s.lk.Unlock() + s.callback(s.arg, now) +} + +func (s *fakeTimer) Chan() <-chan time.Time { return s.c } + +func (s *fakeTimer) Reset(d time.Duration) bool { + wasActive := s.Stop() + until := s.clock.Now().Add(d) + s.lk.Lock() + s.until = until + s.done = false + s.lk.Unlock() + s.clock.addTimer(s) + return wasActive +} + +func (s *fakeTimer) Stop() bool { + now := s.clock.Now() + s.lk.Lock() + if s.done { + s.lk.Unlock() + return false + } + s.done = true + // Expire the timer and notify blockers + s.until = now + s.lk.Unlock() + s.clock.Advance(0) + return true +} + +func (s *fakeTimer) whenToTrigger() time.Time { + s.lk.RLock() + defer s.lk.RUnlock() + return s.until +} + +func (fc *fakeClock) addTimer(s *fakeTimer) { + fc.l.Lock() + defer fc.l.Unlock() + + now := fc.time + if now.Sub(s.whenToTrigger()) >= 0 { + // special case - trigger immediately + s.awaken(now) + } else { + // otherwise, add to the set of sleepers + fc.timers = append(fc.timers, s) + // and notify any blockers + fc.blockers = notifyBlockers(fc.blockers, len(fc.timers)) + } +} + +// After mimics time.After; it waits for the given duration to elapse on the +// fakeClock, then sends the current time on the returned channel. +func (fc *fakeClock) After(d time.Duration) <-chan time.Time { + return fc.NewTimer(d).Chan() +} + +// notifyBlockers notifies all the blockers waiting until the +// given number of sleepers are waiting on the fakeClock. It +// returns an updated slice of blockers (i.e. those still waiting) +func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { + for _, b := range blockers { + if b.count == count { + close(b.ch) + } else { + newBlockers = append(newBlockers, b) + } + } + return +} + +// Sleep blocks until the given duration has passed on the fakeClock +func (fc *fakeClock) Sleep(d time.Duration) { + <-fc.After(d) +} + +// Time returns the current time of the fakeClock +func (fc *fakeClock) Now() time.Time { + fc.l.RLock() + t := fc.time + fc.l.RUnlock() + return t +} + +// Since returns the duration that has passed since the given time on the fakeClock +func (fc *fakeClock) Since(t time.Time) time.Duration { + return fc.Now().Sub(t) +} + +func (fc *fakeClock) NewTicker(d time.Duration) Ticker { + ft := &fakeTicker{ + c: make(chan time.Time, 1), + stop: make(chan bool, 1), + clock: fc, + period: d, + } + go ft.tick() + return ft +} + +// NewTimer creates a new Timer that will send the current time on its channel +// after the given duration elapses on the fake clock. +func (fc *fakeClock) NewTimer(d time.Duration) Timer { + done := make(chan time.Time, 1) + sendTime := func(c interface{}, now time.Time) { + select { + case c.(chan time.Time) <- now: + default: + } + } + + s := &fakeTimer{ + clock: fc, + until: fc.Now().Add(d), + callback: sendTime, + arg: done, + c: done, + } + fc.addTimer(s) + return s +} + +// AfterFunc waits for the duration to elapse on the fake clock and then calls f +// in its own goroutine. +// It returns a Timer that can be used to cancel the call using its Stop method. +func (fc *fakeClock) AfterFunc(d time.Duration, f func()) Timer { + goFunc := func(fn interface{}, _ time.Time) { + go fn.(func())() + } + + s := &fakeTimer{ + clock: fc, + until: fc.Now().Add(d), + callback: goFunc, + arg: f, + // zero-valued c, the same as it is in the `time` pkg + } + fc.addTimer(s) + return s +} + +// Advance advances fakeClock to a new point in time, ensuring channels from any +// previous invocations of After are notified appropriately before returning +func (fc *fakeClock) Advance(d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + + end := fc.time.Add(d) + var newSleepers []*fakeTimer + for _, s := range fc.timers { + if end.Sub(s.whenToTrigger()) >= 0 { + s.awaken(end) + } else { + newSleepers = append(newSleepers, s) + } + } + fc.timers = newSleepers + fc.blockers = notifyBlockers(fc.blockers, len(fc.timers)) + fc.time = end +} + +// BlockUntil will block until the fakeClock has the given number of sleepers +// (callers of Sleep or After) +func (fc *fakeClock) BlockUntil(n int) { + fc.l.Lock() + // Fast path: current number of sleepers is what we're looking for + if len(fc.timers) == n { + fc.l.Unlock() + return + } + // Otherwise, set up a new blocker + b := &blocker{ + count: n, + ch: make(chan struct{}), + } + fc.blockers = append(fc.blockers, b) + fc.l.Unlock() + <-b.ch +} + +type fakeTicker struct { + c chan time.Time + stop chan bool + clock Fake + period time.Duration +} + +func (ft *fakeTicker) Chan() <-chan time.Time { + return ft.c +} + +func (ft *fakeTicker) Stop() { + ft.stop <- true +} + +// tick sends the tick time to the ticker channel after every period. +// Tick events are discarded if the underlying ticker channel does +// not have enough capacity. +func (ft *fakeTicker) tick() { + tick := ft.clock.Now() + for { + tick = tick.Add(ft.period) + remaining := tick.Sub(ft.clock.Now()) + if remaining <= 0 { + // The tick should have already happened. This can happen when + // Advance() is called on the fake clock with a duration larger + // than this ticker's period. + select { + case ft.c <- tick: + default: + } + continue + } + + select { + case <-ft.stop: + return + case <-ft.clock.After(remaining): + select { + case ft.c <- tick: + default: + } + } + } +} diff --git a/pkg/clock/testing_test.go b/pkg/clock/testing_test.go new file mode 100644 index 0000000000..39eb2898e7 --- /dev/null +++ b/pkg/clock/testing_test.go @@ -0,0 +1,356 @@ +// stm: #unit +package clock_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/venus/pkg/clock" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +var startTime = time.Unix(123456789, 0) + +func TestFakeAfter(t *testing.T) { + tf.UnitTest(t) + fc := clock.NewFake(startTime) + + zero := fc.After(0) + select { + case <-zero: + default: + t.Errorf("zero did not return!") + } + // stm: @CLOCK_TESTING_AFTER_001 + one := fc.After(1) + two := fc.After(2) + six := fc.After(6) + ten := fc.After(10) + fc.Advance(1) + select { + case <-one: + default: + t.Errorf("one did not return!") + } + select { + case <-two: + t.Errorf("two returned prematurely!") + case <-six: + t.Errorf("six returned prematurely!") + case <-ten: + t.Errorf("ten returned prematurely!") + default: + } + fc.Advance(1) + select { + case <-two: + default: + t.Errorf("two did not return!") + } + select { + case <-six: + t.Errorf("six returned prematurely!") + case <-ten: + t.Errorf("ten returned prematurely!") + default: + } + fc.Advance(1) + select { + case <-six: + t.Errorf("six returned prematurely!") + case <-ten: + t.Errorf("ten returned prematurely!") + default: + } + fc.Advance(3) + select { + case <-six: + default: + t.Errorf("six did not return!") + } + select { + case <-ten: + t.Errorf("ten returned prematurely!") + default: + } + fc.Advance(100) + select { + case <-ten: + default: + t.Errorf("ten did not return!") + } +} + +func TestNewFakeAt(t *testing.T) { + tf.UnitTest(t) + t1 := time.Date(1999, time.February, 3, 4, 5, 6, 7, time.UTC) + fc := clock.NewFake(t1) + // stm: @CLOCK_TESTING_NOW_001 + now := fc.Now() + assert.Equalf(t, now, t1, "Fake.Now() returned unexpected non-initialised value: want=%#v, got %#v", t1, now) +} + +func TestFakeSince(t *testing.T) { + tf.UnitTest(t) + fc := clock.NewFake(startTime) + now := fc.Now() + elapsedTime := time.Second + fc.Advance(elapsedTime) + // stm: @CLOCK_TESTING_SINCE_001 + assert.Truef(t, fc.Since(now) == elapsedTime, "Fake.Since() returned unexpected duration, got: %d, want: %d", fc.Since(now), elapsedTime) +} + +func TestFakeTimers(t *testing.T) { + tf.UnitTest(t) + fc := clock.NewFake(startTime) + + // stm: @CLOCK_TESTING_NEW_TIMER_001 + zero := fc.NewTimer(0) + + assert.False(t, zero.Stop(), "zero timer could be stopped") + select { + case <-zero.Chan(): + default: + t.Errorf("zero timer didn't emit time") + } + + one := fc.NewTimer(1) + + select { + case <-one.Chan(): + t.Errorf("non-zero timer did emit time") + default: + } + + assert.True(t, one.Stop(), "non-zero timer couldn't be stopped") + + // stm: @CLOCK_TESTING_ADVANCE_001 + fc.Advance(5) + + select { + case <-one.Chan(): + t.Errorf("stopped timer did emit time") + default: + } + + assert.False(t, one.Reset(1), "resetting stopped timer didn't return false") + assert.True(t, one.Reset(1), "resetting active timer didn't return true") + + fc.Advance(1) + + assert.False(t, one.Stop(), "triggered timer could be stopped") + + select { + case <-one.Chan(): + default: + t.Errorf("triggered timer didn't emit time") + } + + fc.Advance(1) + + select { + case <-one.Chan(): + t.Errorf("triggered timer emitted time more than once") + default: + } + + one.Reset(0) + + assert.False(t, one.Stop(), "reset to zero timer could be stopped") + select { + case <-one.Chan(): + default: + t.Errorf("reset to zero timer didn't emit time") + } +} + +type syncFunc func(didAdvance func(), shouldAdvance func(string), shouldBlock func(string)) + +func inSync(t *testing.T, func1 syncFunc, func2 syncFunc) { + stepChan1 := make(chan struct{}, 16) + stepChan2 := make(chan struct{}, 16) + go func() { + func1( + func() { + stepChan1 <- struct{}{} + }, + func(point string) { + select { + case <-stepChan2: + case <-time.After(time.Second): + t.Errorf("Did not advance, should have %s", point) + } + }, + func(point string) { + select { + case <-stepChan2: + t.Errorf("Was able to advance, should not have %s", point) + case <-time.After(10 * time.Millisecond): + } + }, + ) + }() + func2(func() { stepChan2 <- struct{}{} }, func(point string) { + select { + case <-stepChan1: + case <-time.After(time.Second): + t.Errorf("Did not advance, should have %s", point) + } + }, + func(point string) { + select { + case <-stepChan1: + t.Errorf("Was able to advance, should not have %s", point) + case <-time.After(10 * time.Millisecond): + } + }) +} + +func TestBlockingOnTimers(t *testing.T) { + tf.UnitTest(t) + fc := clock.NewFake(startTime) + + inSync(t, func(didAdvance func(), shouldAdvance func(string), _ func(string)) { + // stm: @CLOCK_TESTING_BLOCK_UNTIL_001 + fc.BlockUntil(0) + didAdvance() + fc.BlockUntil(1) + didAdvance() + shouldAdvance("timers stopped") + fc.BlockUntil(0) + didAdvance() + fc.BlockUntil(1) + didAdvance() + fc.BlockUntil(2) + didAdvance() + fc.BlockUntil(3) + didAdvance() + shouldAdvance("timers stopped") + fc.BlockUntil(2) + didAdvance() + shouldAdvance("time advanced") + fc.BlockUntil(0) + didAdvance() + }, func(didAdvance func(), shouldAdvance func(string), shouldBlock func(string)) { + shouldAdvance("when only blocking for 0 timers") + shouldBlock("when waiting for 1 timer") + fc.NewTimer(0) + shouldBlock("when immediately expired timer added") + one := fc.NewTimer(1) + shouldAdvance("once a timer exists") + one.Stop() + didAdvance() + shouldAdvance("when only blocking for 0 timers") + shouldBlock("when all timers are stopped and waiting for a timer") + one.Reset(1) + shouldAdvance("once timer is restarted") + shouldBlock("when waiting for 2 timers with one active") + _ = fc.NewTimer(2) + shouldAdvance("when second timer added") + shouldBlock("when waiting for 3 timers with 2 active") + _ = fc.NewTimer(3) + shouldAdvance("when third timer added") + one.Stop() + didAdvance() + shouldAdvance("when blocking for 2 timers if a third is stopped") + fc.Advance(3) + didAdvance() + shouldAdvance("waiting for no timers") + }) +} + +func TestAdvancePastAfter(t *testing.T) { + tf.UnitTest(t) + + fc := clock.NewFake(startTime) + + start := fc.Now() + // stm: @CLOCK_TESTING_AFTER_FUNC_001 + one := fc.After(1) + two := fc.After(2) + six := fc.After(6) + + fc.Advance(1) + assert.False(t, start.Add(1).Sub(<-one) > 0, "timestamp is too early") + + fc.Advance(5) + assert.False(t, start.Add(2).Sub(<-two) > 0, "timestamp is too early") + assert.False(t, start.Add(6).Sub(<-six) > 0, "timestamp is too early") +} + +func TestFakeTickerStop(t *testing.T) { + tf.UnitTest(t) + fc := clock.NewFake(startTime) + + ft := fc.NewTicker(1) + ft.Stop() + fc.Advance(1) + select { + case <-ft.Chan(): + t.Errorf("received unexpected tick!") + default: + } +} + +func TestFakeTickerTick(t *testing.T) { + tf.UnitTest(t) + fc := clock.NewFake(startTime) + now := fc.Now() + + // The tick at now.Add(2) should not get through since we advance time by + // two units below and the channel can hold at most one tick until it's + // consumed. + first := now.Add(1) + second := now.Add(3) + + // We wrap the Advance() calls with blockers to make sure that the ticker + // can go to sleep and produce ticks without time passing in parallel. + ft := fc.NewTicker(1) + fc.BlockUntil(1) + fc.Advance(2) + fc.BlockUntil(1) + + select { + case tick := <-ft.Chan(): + assert.Truef(t, tick == first, "wrong tick time, got: %v, want: %v", tick, first) + default: + t.Errorf("expected tick!") + } + + // Advance by one more unit, we should get another tick now. + fc.Advance(1) + fc.BlockUntil(1) + + select { + case tick := <-ft.Chan(): + assert.Truef(t, tick == second, "wrong tick time, got: %v, want: %v", tick, second) + default: + t.Errorf("expected tick!") + } + ft.Stop() +} + +func TestFakeSleep(t *testing.T) { + tf.UnitTest(t) + fc := clock.NewFake(startTime) + var afterSleep = make(chan struct{}) + + go func() { + go func() { + <-time.After(time.Second) + fc.Advance(1) + }() + // stm: @CLOCK_TESTING_SLEEP_001 + fc.Sleep(1) + afterSleep <- struct{}{} + }() + + select { + case <-time.After(time.Second * 5): + t.Fatalf("wakeup sleep timeout.") + case <-afterSleep: + return + } +} diff --git a/internal/pkg/clock/ticker.go b/pkg/clock/ticker.go similarity index 100% rename from internal/pkg/clock/ticker.go rename to pkg/clock/ticker.go diff --git a/internal/pkg/clock/timer.go b/pkg/clock/timer.go similarity index 100% rename from internal/pkg/clock/timer.go rename to pkg/clock/timer.go diff --git a/pkg/config/beacon_config.go b/pkg/config/beacon_config.go new file mode 100644 index 0000000000..677baa65a9 --- /dev/null +++ b/pkg/config/beacon_config.go @@ -0,0 +1,72 @@ +package config + +type DrandEnum int + +const ( + DrandMainnet DrandEnum = iota + 1 + DrandTestnet + DrandDevnet + DrandLocalnet + DrandIncentinet +) + +type DrandConf struct { + Servers []string + Relays []string + ChainInfoJSON string +} + +// DrandConfigs a set of drand config +var DrandConfigs = map[DrandEnum]DrandConf{ + DrandMainnet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, + }, + DrandTestnet: { + Servers: []string{ + "https://pl-eu.testnet.drand.sh", + "https://pl-us.testnet.drand.sh", + "https://pl-sin.testnet.drand.sh", + }, + Relays: []string{ + "/dnsaddr/pl-eu.testnet.drand.sh/", + "/dnsaddr/pl-us.testnet.drand.sh/", + "/dnsaddr/pl-sin.testnet.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, + }, + DrandDevnet: { + Servers: []string{ + "https://dev1.drand.sh", + "https://dev2.drand.sh", + }, + Relays: []string{ + "/dnsaddr/dev1.drand.sh/", + "/dnsaddr/dev2.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, + }, + DrandIncentinet: { + Servers: []string{ + "https://pl-eu.incentinet.drand.sh", + "https://pl-us.incentinet.drand.sh", + "https://pl-sin.incentinet.drand.sh", + }, + Relays: []string{ + "/dnsaddr/pl-eu.incentinet.drand.sh/", + "/dnsaddr/pl-us.incentinet.drand.sh/", + "/dnsaddr/pl-sin.incentinet.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, + }, +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 0000000000..2496433446 --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,509 @@ +package config + +import ( + "encoding/json" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strings" + "time" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" +) + +const ( + scryptN = 1 << 21 + scryptP = 1 +) + +var DefaultDefaultMaxFee = types.MustParseFIL("10") + +// Config is an in memory representation of the filecoin configuration file +type Config struct { + API *APIConfig `json:"api"` + Bootstrap *BootstrapConfig `json:"bootstrap"` + Datastore *DatastoreConfig `json:"datastore"` + Mpool *MessagePoolConfig `json:"mpool"` + NetworkParams *NetworkParamsConfig `json:"parameters"` + Observability *ObservabilityConfig `json:"observability"` + Swarm *SwarmConfig `json:"swarm"` + Wallet *WalletConfig `json:"walletModule"` + SlashFilterDs *SlashFilterDsConfig `json:"slashFilter"` + RateLimitCfg *RateLimitCfg `json:"rateLimit"` +} + +// APIConfig holds all configuration options related to the api. +// nolint +type APIConfig struct { + VenusAuthURL string `json:"venusAuthURL"` + APIAddress string `json:"apiAddress"` + AccessControlAllowOrigin []string `json:"accessControlAllowOrigin"` + AccessControlAllowCredentials bool `json:"accessControlAllowCredentials"` + AccessControlAllowMethods []string `json:"accessControlAllowMethods"` +} + +type RateLimitCfg struct { + Endpoint string `json:"RedisEndpoint"` + User string `json:"user"` + Pwd string `json:"pwd"` + Enable bool `json:"enable"` +} + +func newDefaultAPIConfig() *APIConfig { + return &APIConfig{ + APIAddress: "/ip4/127.0.0.1/tcp/3453", + AccessControlAllowOrigin: []string{ + "http://localhost:8080", + "https://localhost:8080", + "http://127.0.0.1:8080", + "https://127.0.0.1:8080", + }, + AccessControlAllowMethods: []string{"GET", "POST", "PUT"}, + } +} + +// DatastoreConfig holds all the configuration options for the datastore. +// TODO: use the advanced datastore configuration from ipfs +type DatastoreConfig struct { + Type string `json:"type"` + Path string `json:"path"` +} + +// Validators hold the list of validation functions for each configuration +// property. Validators must take a key and json string respectively as +// arguments, and must return either an error or nil depending on whether or not +// the given key and value are valid. Validators will only be run if a property +// being set matches the name given in this map. +var Validators = map[string]func(string, string) error{ + "heartbeat.nickname": validateLettersOnly, +} + +func newDefaultDatastoreConfig() *DatastoreConfig { + return &DatastoreConfig{ + Type: "badgerds", + Path: "badger", + } +} + +// SwarmConfig holds all configuration options related to the swarm. +type SwarmConfig struct { + Address string `json:"address"` + PublicRelayAddress string `json:"public_relay_address,omitempty"` +} + +func newDefaultSwarmConfig() *SwarmConfig { + return &SwarmConfig{ + Address: "/ip4/0.0.0.0/tcp/0", + } +} + +// BootstrapConfig holds all configuration options related to bootstrap nodes +type BootstrapConfig struct { + Addresses []string `json:"addresses"` + MinPeerThreshold int `json:"minPeerThreshold"` + Period string `json:"period,omitempty"` +} + +// TODO: provide bootstrap node addresses +func newDefaultBootstrapConfig() *BootstrapConfig { + return &BootstrapConfig{ + Addresses: []string{}, + MinPeerThreshold: 3, // TODO: we don't actually have an bootstrap peers yet. + Period: "1m", + } +} + +// WalletConfig holds all configuration options related to the wallet. +type WalletConfig struct { + DefaultAddress address.Address `json:"defaultAddress,omitempty"` + PassphraseConfig PassphraseConfig `json:"passphraseConfig,omitempty"` + RemoteEnable bool `json:"remoteEnable"` + RemoteBackend string `json:"remoteBackend"` +} + +type PassphraseConfig struct { + ScryptN int `json:"scryptN"` + ScryptP int `json:"scryptP"` +} + +func newDefaultWalletConfig() *WalletConfig { + return &WalletConfig{ + DefaultAddress: address.Undef, + PassphraseConfig: DefaultPassphraseConfig(), + } +} + +func DefaultPassphraseConfig() PassphraseConfig { + return PassphraseConfig{ + ScryptN: scryptN, + ScryptP: scryptP, + } +} + +func TestPassphraseConfig() PassphraseConfig { + return PassphraseConfig{ + ScryptN: 1 << 15, + ScryptP: scryptP, + } +} + +// DrandConfig holds all configuration options related to pulling randomness from Drand servers +type DrandConfig struct { + StartTimeUnix int64 `json:"startTimeUnix"` + RoundSeconds int `json:"roundSeconds"` +} + +// HeartbeatConfig holds all configuration options related to node heartbeat. +type HeartbeatConfig struct { + // BeatTarget represents the address the filecoin node will send heartbeats to. + BeatTarget string `json:"beatTarget"` + // BeatPeriod represents how frequently heartbeats are sent. + // Golang duration units are accepted. + BeatPeriod string `json:"beatPeriod"` + // ReconnectPeriod represents how long the node waits before attempting to reconnect. + // Golang duration units are accepted. + ReconnectPeriod string `json:"reconnectPeriod"` + // Nickname represents the nickname of the filecoin node, + Nickname string `json:"nickname"` +} + +// ObservabilityConfig is a container for configuration related to observables. +type ObservabilityConfig struct { + Metrics *MetricsConfig `json:"metrics"` + Tracing *TraceConfig `json:"tracing"` +} + +func newDefaultObservabilityConfig() *ObservabilityConfig { + return &ObservabilityConfig{ + Metrics: newDefaultMetricsConfig(), + Tracing: newDefaultTraceConfig(), + } +} + +// MetricsConfig holds all configuration options related to node metrics. +type MetricsConfig struct { + // Enabled will enable prometheus metrics when true. + PrometheusEnabled bool `json:"prometheusEnabled"` + // ReportInterval represents how frequently filecoin will update its prometheus metrics. + ReportInterval string `json:"reportInterval"` + // PrometheusEndpoint represents the address filecoin will expose prometheus metrics at. + PrometheusEndpoint string `json:"prometheusEndpoint"` +} + +func newDefaultMetricsConfig() *MetricsConfig { + return &MetricsConfig{ + PrometheusEnabled: false, + ReportInterval: "5s", + PrometheusEndpoint: "/ip4/0.0.0.0/tcp/9400", + } +} + +// TraceConfig holds all configuration options related to enabling and exporting +// filecoin node traces. +type TraceConfig struct { + // JaegerTracingEnabled will enable exporting traces to jaeger when true. + JaegerTracingEnabled bool `json:"jaegerTracingEnabled"` + // ProbabilitySampler will sample fraction of traces, 1.0 will sample all traces. + ProbabilitySampler float64 `json:"probabilitySampler"` + // JaegerEndpoint is the URL traces are collected on. + JaegerEndpoint string `json:"jaegerEndpoint"` + ServerName string `json:"servername"` +} + +func newDefaultTraceConfig() *TraceConfig { + return &TraceConfig{ + JaegerEndpoint: "localhost:6831", + JaegerTracingEnabled: false, + ProbabilitySampler: 1.0, + ServerName: "venus-node", + } +} + +// MessagePoolConfig holds all configuration options related to nodes message pool (mpool). +type MessagePoolConfig struct { + // MaxNonceGap is the maximum nonce of a message past the last received on chain + MaxNonceGap uint64 `json:"maxNonceGap"` + // MaxFee + MaxFee types.FIL `json:"maxFee"` +} + +var DefaultMessagePoolParam = &MessagePoolConfig{ + MaxNonceGap: 100, + MaxFee: DefaultDefaultMaxFee, +} + +func newDefaultMessagePoolConfig() *MessagePoolConfig { + return &MessagePoolConfig{ + MaxNonceGap: 100, + MaxFee: DefaultDefaultMaxFee, + } +} + +// NetworkParamsConfig record netork parameters +type NetworkParamsConfig struct { + DevNet bool `json:"-"` + NetworkType types.NetworkType `json:"networkType"` + AddressNetwork address.Network `json:"-"` + GenesisNetworkVersion network.Version `json:"-"` + ConsensusMinerMinPower uint64 `json:"-"` // uint64 goes up to 18 EiB + MinVerifiedDealSize int64 `json:"-"` + ReplaceProofTypes []abi.RegisteredSealProof `json:"-"` + BlockDelay uint64 `json:"-"` + DrandSchedule map[abi.ChainEpoch]DrandEnum `json:"-"` + ForkUpgradeParam *ForkUpgradeConfig `json:"-"` + PreCommitChallengeDelay abi.ChainEpoch `json:"-"` + PropagationDelaySecs uint64 `json:"-"` +} + +// ForkUpgradeConfig record upgrade parameters +type ForkUpgradeConfig struct { + UpgradeSmokeHeight abi.ChainEpoch `json:"upgradeSmokeHeight"` + UpgradeBreezeHeight abi.ChainEpoch `json:"upgradeBreezeHeight"` + UpgradeIgnitionHeight abi.ChainEpoch `json:"upgradeIgnitionHeight"` + UpgradeLiftoffHeight abi.ChainEpoch `json:"upgradeLiftoffHeight"` + UpgradeAssemblyHeight abi.ChainEpoch `json:"upgradeActorsV2Height"` + UpgradeRefuelHeight abi.ChainEpoch `json:"upgradeRefuelHeight"` + UpgradeTapeHeight abi.ChainEpoch `json:"upgradeTapeHeight"` + UpgradeKumquatHeight abi.ChainEpoch `json:"upgradeKumquatHeight"` + UpgradePriceListOopsHeight abi.ChainEpoch `json:"upgradePriceListOopsHeight"` + BreezeGasTampingDuration abi.ChainEpoch `json:"breezeGasTampingDuration"` + UpgradeCalicoHeight abi.ChainEpoch `json:"upgradeCalicoHeight"` + UpgradePersianHeight abi.ChainEpoch `json:"upgradePersianHeight"` + UpgradeOrangeHeight abi.ChainEpoch `json:"upgradeOrangeHeight"` + UpgradeClausHeight abi.ChainEpoch `json:"upgradeClausHeight"` + UpgradeTrustHeight abi.ChainEpoch `json:"upgradeActorsV3Height"` + UpgradeNorwegianHeight abi.ChainEpoch `json:"upgradeNorwegianHeight"` + UpgradeTurboHeight abi.ChainEpoch `json:"upgradeActorsV4Height"` + UpgradeHyperdriveHeight abi.ChainEpoch `json:"upgradeHyperdriveHeight"` + UpgradeChocolateHeight abi.ChainEpoch `json:"upgradeChocolateHeight"` + UpgradeOhSnapHeight abi.ChainEpoch `json:"upgradeOhSnapHeight"` + UpgradeSkyrHeight abi.ChainEpoch `json:"upgradeSkyrHeight"` + UpgradeSharkHeight abi.ChainEpoch `json:"upgradeSharkHeight"` +} + +func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool { + return epoch > upgradeEpoch-constants.Finality && epoch < upgradeEpoch+constants.Finality +} + +var DefaultForkUpgradeParam = &ForkUpgradeConfig{ + UpgradeBreezeHeight: 41280, + BreezeGasTampingDuration: 120, + UpgradeSmokeHeight: 51000, + UpgradeIgnitionHeight: 94000, + UpgradeRefuelHeight: 130800, + UpgradeTapeHeight: 140760, + UpgradeLiftoffHeight: 148888, + UpgradeKumquatHeight: 170000, + UpgradeCalicoHeight: 265200, + UpgradePersianHeight: 265200 + 120*60, + UpgradeAssemblyHeight: 138720, + UpgradeOrangeHeight: 336458, + UpgradeClausHeight: 343200, + UpgradeTrustHeight: 550321, + UpgradeNorwegianHeight: 665280, + UpgradeTurboHeight: 712320, + UpgradeHyperdriveHeight: 892800, + UpgradeChocolateHeight: 1231620, + UpgradeOhSnapHeight: 1594680, + UpgradeSkyrHeight: 1960320, + UpgradeSharkHeight: 2383680, +} + +func newDefaultNetworkParamsConfig() *NetworkParamsConfig { + defaultParams := *DefaultForkUpgradeParam + return &NetworkParamsConfig{ + DevNet: true, + ConsensusMinerMinPower: 0, // 0 means don't override the value + ReplaceProofTypes: []abi.RegisteredSealProof{ + abi.RegisteredSealProof_StackedDrg2KiBV1, + abi.RegisteredSealProof_StackedDrg512MiBV1, + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + }, + DrandSchedule: map[abi.ChainEpoch]DrandEnum{0: 5, -1: 1}, + ForkUpgradeParam: &defaultParams, + PropagationDelaySecs: 10, + } +} + +type MySQLConfig struct { + ConnectionString string `json:"connectionString"` + MaxOpenConn int `json:"maxOpenConn"` // 100 + MaxIdleConn int `json:"maxIdleConn"` // 10 + ConnMaxLifeTime time.Duration `json:"connMaxLifeTime"` // minuter: 60 + Debug bool `json:"debug"` +} + +type SlashFilterDsConfig struct { + Type string `json:"type"` + MySQL MySQLConfig `json:"mysql"` +} + +func newDefaultSlashFilterDsConfig() *SlashFilterDsConfig { + return &SlashFilterDsConfig{ + Type: "local", + MySQL: MySQLConfig{}, + } +} + +func newRateLimitConfig() *RateLimitCfg { + return &RateLimitCfg{ + Enable: false, + } +} + +// NewDefaultConfig returns a config object with all the fields filled out to +// their default values +func NewDefaultConfig() *Config { + return &Config{ + API: newDefaultAPIConfig(), + Bootstrap: newDefaultBootstrapConfig(), + Datastore: newDefaultDatastoreConfig(), + Mpool: newDefaultMessagePoolConfig(), + NetworkParams: newDefaultNetworkParamsConfig(), + Observability: newDefaultObservabilityConfig(), + Swarm: newDefaultSwarmConfig(), + Wallet: newDefaultWalletConfig(), + SlashFilterDs: newDefaultSlashFilterDsConfig(), + RateLimitCfg: newRateLimitConfig(), + } +} + +// WriteFile writes the config to the given filepath. +func (cfg *Config) WriteFile(file string) error { + f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + if err != nil { + return err + } + defer f.Close() // nolint: errcheck + + configString, err := json.MarshalIndent(*cfg, "", "\t") + if err != nil { + return err + } + + _, err = fmt.Fprint(f, string(configString)) + return err +} + +// ReadFile reads a config file from disk. +func ReadFile(file string) (*Config, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + + cfg := NewDefaultConfig() + rawConfig, err := io.ReadAll(f) + if err != nil { + return nil, err + } + if len(rawConfig) == 0 { + return cfg, nil + } + + err = json.Unmarshal(rawConfig, &cfg) + if err != nil { + return nil, err + } + + return cfg, nil +} + +// Set sets the config sub-struct referenced by `key`, e.g. 'api.address' +// or 'datastore' to the json key value pair encoded in jsonVal. +func (cfg *Config) Set(dottedKey string, jsonString string) error { + if !json.Valid([]byte(jsonString)) { + jsonBytes, _ := json.Marshal(jsonString) + jsonString = string(jsonBytes) + } + + if err := validate(dottedKey, jsonString); err != nil { + return err + } + + keys := strings.Split(dottedKey, ".") + for i := len(keys) - 1; i >= 0; i-- { + jsonString = fmt.Sprintf(`{ "%s": %s }`, keys[i], jsonString) + } + + decoder := json.NewDecoder(strings.NewReader(jsonString)) + decoder.DisallowUnknownFields() + + return decoder.Decode(&cfg) +} + +// Get gets the config sub-struct referenced by `key`, e.g. 'api.address' +func (cfg *Config) Get(key string) (interface{}, error) { + v := reflect.Indirect(reflect.ValueOf(cfg)) + keyTags := strings.Split(key, ".") +OUTER: + for j, keyTag := range keyTags { + if v.Type().Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + jsonTag := strings.Split( + v.Type().Field(i).Tag.Get("json"), + ",")[0] + if jsonTag == keyTag { + v = v.Field(i) + if j == len(keyTags)-1 { + return v.Interface(), nil + } + v = reflect.Indirect(v) // only attempt one dereference + continue OUTER + } + } + } + + return nil, fmt.Errorf("key: %s invalid for config", key) + } + // Cannot get here as len(strings.Split(s, sep)) >= 1 with non-empty sep + return nil, fmt.Errorf("empty key is invalid") +} + +// validate runs validations on a given key and json string. validate uses the +// validators map defined at the top of this file to determine which validations +// to use for each key. +func validate(dottedKey string, jsonString string) error { + var obj interface{} + if err := json.Unmarshal([]byte(jsonString), &obj); err != nil { + return err + } + // recursively validate sub-keys by partially unmarshalling + if reflect.ValueOf(obj).Kind() == reflect.Map { + var obj map[string]json.RawMessage + if err := json.Unmarshal([]byte(jsonString), &obj); err != nil { + return err + } + for key := range obj { + if err := validate(dottedKey+"."+key, string(obj[key])); err != nil { + return err + } + } + return nil + } + + if validationFunc, present := Validators[dottedKey]; present { + return validationFunc(dottedKey, jsonString) + } + + return nil +} + +// validateLettersOnly validates that a given value contains only letters. If it +// does not, an error is returned using the given key for the message. +func validateLettersOnly(key string, value string) error { + if match, _ := regexp.MatchString("^\"[a-zA-Z]+\"$", value); !match { + return errors.Errorf(`"%s" must only contain letters`, key) + } + return nil +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go new file mode 100644 index 0000000000..c3fb7fb9be --- /dev/null +++ b/pkg/config/config_test.go @@ -0,0 +1,264 @@ +package config + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestDefaults(t *testing.T) { + tf.UnitTest(t) + + cfg := NewDefaultConfig() + + bs := []string{} + assert.Equal(t, "/ip4/127.0.0.1/tcp/3453", cfg.API.APIAddress) + assert.Equal(t, "/ip4/0.0.0.0/tcp/0", cfg.Swarm.Address) + assert.Equal(t, bs, cfg.Bootstrap.Addresses) +} + +func TestWriteFile(t *testing.T) { + tf.UnitTest(t) + + dir := t.TempDir() + + cfg := NewDefaultConfig() + + cfgJSON, err := json.MarshalIndent(*cfg, "", "\t") + require.NoError(t, err) + expected := string(cfgJSON) + + SanityCheck(t, expected) + + assert.NoError(t, cfg.WriteFile(filepath.Join(dir, "config.json"))) + content, err := os.ReadFile(filepath.Join(dir, "config.json")) + assert.NoError(t, err) + + assert.Equal(t, expected, string(content)) + assert.NoError(t, os.Remove(filepath.Join(dir, "config.json"))) +} + +func TestConfigRoundtrip(t *testing.T) { + tf.UnitTest(t) + + dir := t.TempDir() + + cfg := NewDefaultConfig() + + cfgpath := filepath.Join(dir, "config.json") + assert.NoError(t, cfg.WriteFile(cfgpath)) + + cfgout, err := ReadFile(cfgpath) + assert.NoError(t, err) + + assert.Equal(t, cfg, cfgout) +} + +func TestConfigReadFileDefaults(t *testing.T) { + tf.UnitTest(t) + + t.Run("all sections exist", func(t *testing.T) { + cfgpath, err := createConfigFile(t, ` + { + "api": { + "apiAddress": "/ip4/127.0.0.1/tcp/9999", + "keyThatDoesntExit": false + }, + "swarm": { + "keyThatDoesntExit": "hello" + } + }`) + assert.NoError(t, err) + cfg, err := ReadFile(cfgpath) + assert.NoError(t, err) + + assert.Equal(t, cfg.API.APIAddress, "/ip4/127.0.0.1/tcp/9999") + assert.Equal(t, cfg.Swarm.Address, "/ip4/0.0.0.0/tcp/0") + }) + + t.Run("missing one section", func(t *testing.T) { + cfgpath, err := createConfigFile(t, ` + { + "api": { + "apiAddress": "/ip4/127.0.0.1/tcp/9999", + "keyThatDoesntExit'": false + } + }`) + assert.NoError(t, err) + cfg, err := ReadFile(cfgpath) + assert.NoError(t, err) + + assert.Equal(t, cfg.API.APIAddress, "/ip4/127.0.0.1/tcp/9999") + assert.Equal(t, cfg.Swarm.Address, "/ip4/0.0.0.0/tcp/0") + }) + + t.Run("empty file", func(t *testing.T) { + cfgpath, err := createConfigFile(t, "") + assert.NoError(t, err) + cfg, err := ReadFile(cfgpath) + assert.NoError(t, err) + + assert.Equal(t, cfg.API.APIAddress, "/ip4/127.0.0.1/tcp/3453") + assert.Equal(t, cfg.Swarm.Address, "/ip4/0.0.0.0/tcp/0") + }) +} + +func TestConfigGet(t *testing.T) { + tf.UnitTest(t) + + t.Run("valid gets", func(t *testing.T) { + cfg := NewDefaultConfig() + + out, err := cfg.Get("api.apiAddress") + assert.NoError(t, err) + assert.Equal(t, cfg.API.APIAddress, out) + + out, err = cfg.Get("api.accessControlAllowOrigin") + assert.NoError(t, err) + assert.Equal(t, cfg.API.AccessControlAllowOrigin, out) + + out, err = cfg.Get("api") + assert.NoError(t, err) + assert.Equal(t, cfg.API, out) + + out, err = cfg.Get("bootstrap.addresses") + assert.NoError(t, err) + assert.Equal(t, cfg.Bootstrap.Addresses, out) + + out, err = cfg.Get("bootstrap") + assert.NoError(t, err) + assert.Equal(t, cfg.Bootstrap, out) + + out, err = cfg.Get("datastore.path") + assert.NoError(t, err) + assert.Equal(t, cfg.Datastore.Path, out) + }) + + t.Run("invalid gets", func(t *testing.T) { + cfg := NewDefaultConfig() + + _, err := cfg.Get("datastore.") + assert.Error(t, err) + + _, err = cfg.Get(".datastore") + assert.Error(t, err) + + _, err = cfg.Get("invalidfield") + assert.Error(t, err) + + _, err = cfg.Get("bootstrap.addresses.toomuch") + assert.Error(t, err) + + _, err = cfg.Get("api-address") + assert.Error(t, err) + + // TODO: temporary as we don't have any ATM. + _, err = cfg.Get("bootstrap.addresses.0") + assert.Error(t, err) + }) +} + +func TestConfigSet(t *testing.T) { + tf.UnitTest(t) + + t.Run("set leaf values", func(t *testing.T) { + cfg := NewDefaultConfig() + + // set string + err := cfg.Set("api.apiAddress", `"/ip4/127.9.9.9/tcp/0"`) + assert.NoError(t, err) + assert.Equal(t, cfg.API.APIAddress, "/ip4/127.9.9.9/tcp/0") + + // set slice + err = cfg.Set("api.accessControlAllowOrigin", `["http://localroast:7854"]`) + assert.NoError(t, err) + assert.Equal(t, cfg.API.AccessControlAllowOrigin, []string{"http://localroast:7854"}) + }) + + t.Run("set table value", func(t *testing.T) { + cfg := NewDefaultConfig() + + jsonBlob := `{"type": "badgerbadgerbadgerds", "path": "mushroom-mushroom"}` + err := cfg.Set("datastore", jsonBlob) + assert.NoError(t, err) + assert.Equal(t, cfg.Datastore.Type, "badgerbadgerbadgerds") + assert.Equal(t, cfg.Datastore.Path, "mushroom-mushroom") + + cfg1path, err := createConfigFile(t, fmt.Sprintf(`{"datastore": %s}`, jsonBlob)) + assert.NoError(t, err) + + cfg1, err := ReadFile(cfg1path) + assert.NoError(t, err) + assert.Equal(t, cfg1.Datastore, cfg.Datastore) + + // inline tables + jsonBlob = `{"type": "badgerbadgerbadgerds", "path": "mushroom-mushroom"}` + err = cfg.Set("datastore", jsonBlob) + assert.NoError(t, err) + + assert.Equal(t, cfg1.Datastore, cfg.Datastore) + }) + + t.Run("invalid set", func(t *testing.T) { + cfg := NewDefaultConfig() + + // bad key + err := cfg.Set("datastore.nope", `"too bad, fake key"`) + assert.Error(t, err) + + // not json + err = cfg.Set("bootstrap.addresses", `nota.json?key`) + assert.Error(t, err) + + // newlines in inline tables are invalid + tomlB := `{type = "badgerbadgerbadgerds", +path = "mushroom-mushroom"}` + err = cfg.Set("datastore", tomlB) + assert.Error(t, err) + + // setting values of wrong type + err = cfg.Set("datastore.type", `["not a", "string"]`) + assert.Error(t, err) + + err = cfg.Set("bootstrap.addresses", `"not a list"`) + assert.Error(t, err) + + err = cfg.Set("api", `"strings aren't structs"`) + assert.Error(t, err) + + // Corrupt address won't pass checksum + // err = cfg.Set("mining.defaultAddress", "fcqv3gmsd9gd7dqfe60d28euf4tx9v7929corrupt") + // assert.Contains(err.Error(), "invalid") + + err = cfg.Set("walletModule.defaultAddress", "corruptandtooshort") + assert.Contains(t, err.Error(), address.ErrUnknownNetwork.Error()) + }) + + t.Run("setting leaves does not interfere with neighboring leaves", func(t *testing.T) { + cfg := NewDefaultConfig() + + err := cfg.Set("bootstrap.period", `"3m"`) + assert.NoError(t, err) + err = cfg.Set("bootstrap.minPeerThreshold", `5`) + assert.NoError(t, err) + assert.Equal(t, cfg.Bootstrap.Period, "3m") + }) +} + +func createConfigFile(t *testing.T, content string) (string, error) { + cfgpath := filepath.Join(t.TempDir(), "config.json") + + if err := os.WriteFile(cfgpath, []byte(content), 0o644); err != nil { + return "", err + } + + return cfgpath, nil +} diff --git a/pkg/config/testing.go b/pkg/config/testing.go new file mode 100644 index 0000000000..763cc8b1a5 --- /dev/null +++ b/pkg/config/testing.go @@ -0,0 +1,18 @@ +package config + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Makes some basic checks of a serialized config to ascertain that it looks kind of right. +// This is instead of brittle hardcoded exact config expectations. +func SanityCheck(t *testing.T, cfgJSON string) { + assert.True(t, strings.Contains(cfgJSON, "accessControlAllowOrigin")) + assert.True(t, strings.Contains(cfgJSON, "http://localhost:8080")) + assert.True(t, strings.Contains(cfgJSON, "bootstrap")) + assert.True(t, strings.Contains(cfgJSON, "bootstrap")) + assert.True(t, strings.Contains(cfgJSON, "\"minPeerThreshold\": 3")) +} diff --git a/pkg/consensus/block_validator.go b/pkg/consensus/block_validator.go new file mode 100644 index 0000000000..19981f6c2d --- /dev/null +++ b/pkg/consensus/block_validator.go @@ -0,0 +1,966 @@ +package consensus + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/Gurpartap/async" + "github.com/hashicorp/go-multierror" + lru "github.com/hashicorp/golang-lru" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + pubsub "github.com/libp2p/go-libp2p-pubsub" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/trace" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + acrypto "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/beacon" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/fork" + appstate "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm/gas" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var ( + ErrTemporal = errors.New("temporal error") + ErrSoftFailure = errors.New("soft validation failure") + ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power") +) + +// BlockValidator used to validate a block is ok or not +type BlockValidator struct { + // TicketValidator validates ticket generation + tv TicketValidator + // chain data store + bstore blockstoreutil.Blockstore + // message store + messageStore *chain.MessageStore + drand beacon.Schedule + // cstore is used for loading state trees during message running. + cstore cbor.IpldStore + // postVerifier verifies PoSt proofs and associated data + proofVerifier ProofVerifier + // state produces snapshots + state StateViewer + // Provides and stores validated tipsets and their state roots. + chainState chainReader + // Selects the heaviest of two chains + chainSelector *ChainSelector + // fork used to process fork code + fork fork.IFork + // network params + config *config.NetworkParamsConfig + // gasprice for vm + gasPirceSchedule *gas.PricesSchedule + // cache for validate block + validateBlockCache *lru.ARCCache + + Stmgr StateTransformer +} + +// NewBlockValidator create a new block validator +func NewBlockValidator(tv TicketValidator, + bstore blockstoreutil.Blockstore, + messageStore *chain.MessageStore, + drand beacon.Schedule, + cstore cbor.IpldStore, + proofVerifier ProofVerifier, + state StateViewer, + chainState chainReader, + chainSelector *ChainSelector, + fork fork.IFork, + config *config.NetworkParamsConfig, + gasPirceSchedule *gas.PricesSchedule, +) *BlockValidator { + validateBlockCache, _ := lru.NewARC(2048) + return &BlockValidator{ + tv: tv, + bstore: bstore, + messageStore: messageStore, + drand: drand, + cstore: cstore, + proofVerifier: proofVerifier, + state: state, + chainState: chainState, + chainSelector: chainSelector, + fork: fork, + config: config, + gasPirceSchedule: gasPirceSchedule, + validateBlockCache: validateBlockCache, + } +} + +// ValidateBlockMsg used to validate block from incoming. check message, signature , wincount. +// if give a reject error. local node reject this block. if give a ignore error. recheck this block in latest notify +func (bv *BlockValidator) ValidateBlockMsg(ctx context.Context, blk *types.BlockMsg) pubsub.ValidationResult { + validationStart := time.Now() + defer func() { + logExpect.Debugw("validate block message", "Cid", blk.Cid(), "took", time.Since(validationStart), "height", blk.Header.Height, "age", time.Since(time.Unix(int64(blk.Header.Timestamp), 0))) + }() + + return bv.validateBlockMsg(ctx, blk) +} + +// ValidateFullBlock should match up with 'Semantical Validation' in validation.md in the spec +func (bv *BlockValidator) ValidateFullBlock(ctx context.Context, blk *types.BlockHeader) error { + validationStart := time.Now() + + if _, ok := bv.validateBlockCache.Get(blk.Cid()); ok { + return nil + } + err := bv.validateBlock(ctx, blk) + if err != nil { + return err + } + bv.validateBlockCache.Add(blk.Cid(), struct{}{}) + + logExpect.Infow("block validation", "took", time.Since(validationStart), "height", blk.Height, "block", blk.Cid(), "age", time.Since(time.Unix(int64(blk.Timestamp), 0))) + + return nil +} + +func (bv *BlockValidator) validateBlock(ctx context.Context, blk *types.BlockHeader) error { + parent, err := bv.chainState.GetTipSet(ctx, types.NewTipSetKey(blk.Parents...)) + if err != nil { + return fmt.Errorf("load parent tipset failed %w", err) + } + parentWeight, err := bv.chainSelector.Weight(ctx, parent) + if err != nil { + return fmt.Errorf("calc parent weight failed %w", err) + } + + if err := blockSanityChecks(blk); err != nil { + return fmt.Errorf("incoming header failed basic sanity checks: %w", err) + } + + baseHeight := parent.Height() + nulls := blk.Height - (baseHeight + 1) + if tgtTS := parent.MinTimestamp() + bv.config.BlockDelay*uint64(nulls+1); blk.Timestamp != tgtTS { + return fmt.Errorf("block has wrong timestamp: %d != %d", blk.Timestamp, tgtTS) + } + + now := uint64(time.Now().Unix()) + if blk.Timestamp > now+AllowableClockDriftSecs { + return fmt.Errorf("block was from the future (now=%d, blk=%d): %v", now, blk.Timestamp, ErrTemporal) + } + if blk.Timestamp > now { + logExpect.Warn("Got block from the future, but within threshold", blk.Timestamp, time.Now().Unix()) + } + + // get parent beacon + prevBeacon, err := bv.chainState.GetLatestBeaconEntry(ctx, parent) + if err != nil { + return fmt.Errorf("failed to get latest beacon entry: %w", err) + } + + if !parentWeight.Equals(blk.ParentWeight) { + return fmt.Errorf("block %s has invalid parent weight %d expected %d", blk.Cid().String(), blk.ParentWeight, parentWeight) + } + + // get worker address + version := bv.fork.GetNetworkVersion(ctx, blk.Height) + lbTS, lbStateRoot, err := bv.chainState.GetLookbackTipSetForRound(ctx, parent, blk.Height, version) + if err != nil { + return fmt.Errorf("failed to get lookback tipset for block: %w", err) + } + + powerStateView := bv.state.PowerStateView(lbStateRoot) + workerAddr, err := powerStateView.GetMinerWorkerRaw(ctx, blk.Miner) + if err != nil { + return fmt.Errorf("query worker address failed: %w", err) + } + + minerCheck := async.Err(func() error { + stateRoot, _, err := bv.Stmgr.RunStateTransition(ctx, parent) + if err != nil { + return err + } + if !stateRoot.Equals(blk.ParentStateRoot) { + return fmt.Errorf("tipset(%s) state root does not match, computed %s, expected: %s", + parent.String(), stateRoot, blk.ParentStateRoot) + } + if err := bv.minerIsValid(ctx, blk.Miner, blk.ParentStateRoot); err != nil { + return fmt.Errorf("minerIsValid failed: %w", err) + } + return nil + }) + + baseFeeCheck := async.Err(func() error { + baseFee, err := bv.messageStore.ComputeBaseFee(ctx, parent, bv.config.ForkUpgradeParam) + if err != nil { + return fmt.Errorf("computing base fee: %w", err) + } + + if big.Cmp(baseFee, blk.ParentBaseFee) != 0 { + return fmt.Errorf("base fee doesn't match: %s (header) != %s (computed)", blk.ParentBaseFee, baseFee) + } + return nil + }) + + blockSigCheck := async.Err(func() error { + // Validate block signature + data, err := blk.SignatureData() + if err != nil { + return err + } + return crypto.Verify(blk.BlockSig, workerAddr, data) + }) + + beaconValuesCheck := async.Err(func() error { + parentHeight := parent.Height() + return bv.ValidateBlockBeacon(blk, parentHeight, prevBeacon) + }) + + tktsCheck := async.Err(func() error { + beaconBase, err := bv.beaconBaseEntry(ctx, blk) + if err != nil { + return fmt.Errorf("failed to get election entry %w", err) + } + + sampleEpoch := blk.Height - constants.TicketRandomnessLookback + bSmokeHeight := blk.Height > bv.config.ForkUpgradeParam.UpgradeSmokeHeight + if err := bv.tv.IsValidTicket(ctx, types.NewTipSetKey(blk.Parents...), beaconBase, bSmokeHeight, sampleEpoch, blk.Miner, workerAddr, *blk.Ticket); err != nil { + return fmt.Errorf("invalid ticket: %s in block %s %w", blk.Ticket.String(), blk.Cid(), err) + } + return nil + }) + + winnerCheck := async.Err(func() error { + return bv.ValidateBlockWinner(ctx, workerAddr, lbTS, lbStateRoot, parent, parent.At(0).ParentStateRoot, blk, prevBeacon) + }) + + winPoStNv := bv.fork.GetNetworkVersion(ctx, baseHeight) + wproofCheck := async.Err(func() error { + if err := bv.VerifyWinningPoStProof(ctx, winPoStNv, blk, prevBeacon, lbStateRoot); err != nil { + return fmt.Errorf("invalid election post: %w", err) + } + return nil + }) + + msgsCheck := async.Err(func() error { + stateRoot, _, err := bv.Stmgr.RunStateTransition(ctx, parent) + if err != nil { + return err + } + keyStateView := bv.state.PowerStateView(stateRoot) + sigValidator := appstate.NewSignatureValidator(keyStateView) + if err := bv.checkBlockMessages(ctx, sigValidator, blk, parent); err != nil { + return fmt.Errorf("block had invalid messages: %w", err) + } + return nil + }) + + stateRootCheck := async.Err(func() error { + stateRoot, receipt, err := bv.Stmgr.RunStateTransition(ctx, parent) + if err != nil { + return fmt.Errorf("get tipsetstate(%d, %s) failed: %w", blk.Height, blk.Parents, err) + } + + if !stateRoot.Equals(blk.ParentStateRoot) { + return fmt.Errorf("tipset(%s) state root does not match, computed %s, expected: %s, %w", + parent.String(), stateRoot, blk.ParentStateRoot, ErrStateRootMismatch) + } + + if !receipt.Equals(blk.ParentMessageReceipts) { + return fmt.Errorf("tipset(%s) receipt root does not match, computed %s, expected: %s, %w", + parent.String(), receipt, blk.ParentMessageReceipts, ErrReceiptRootMismatch) + } + + return nil + }) + + await := []async.ErrorFuture{ + minerCheck, + tktsCheck, + blockSigCheck, + beaconValuesCheck, + wproofCheck, + winnerCheck, + msgsCheck, + baseFeeCheck, + stateRootCheck, + } + + var merr error + for _, fut := range await { + if err := fut.AwaitContext(ctx); err != nil { + merr = multierror.Append(merr, err) + } + } + + if merr != nil { + mulErr := merr.(*multierror.Error) + mulErr.ErrorFormat = func(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %+v", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) + } + return mulErr + } + return nil +} + +func (bv *BlockValidator) validateBlockMsg(ctx context.Context, blk *types.BlockMsg) pubsub.ValidationResult { + // validate the block meta: the Message CID in the header must match the included messages + err := bv.validateMsgMeta(ctx, blk) + if err != nil { + logExpect.Warnf("error validating message metadata: %s", err) + return pubsub.ValidationReject + } + + // we want to ensure that it is a block from a known miner; we reject blocks from unknown miners + // to prevent spam attacks. + // the logic works as follows: we lookup the miner in the chain for its key. + // if we can find it then it's a known miner and we can validate the signature. + // if we can't find it, we check whether we are (near) synced in the chain. + // if we are not synced we cannot validate the block and we must ignore it. + // if we are synced and the miner is unknown, then the block is rejcected. + key, err := bv.checkPowerAndGetWorkerKey(ctx, blk.Header) + if err != nil { + if err != ErrSoftFailure && bv.isChainNearSynced() { + logExpect.Errorf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message") + return pubsub.ValidationReject + } + + logExpect.Errorf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain") + return pubsub.ValidationIgnore + } + + err = checkBlockSignature(ctx, blk.Header, key) + if err != nil { + logExpect.Errorf("block signature verification failed: %s", err) + return pubsub.ValidationReject + } + + if blk.Header.ElectionProof.WinCount < 1 { + logExpect.Errorf("block is not claiming to be winning") + return pubsub.ValidationReject + } + + return pubsub.ValidationAccept +} + +func (bv *BlockValidator) isChainNearSynced() bool { + ts := bv.chainState.GetHead() + timestamp := ts.MinTimestamp() + timestampTime := time.Unix(int64(timestamp), 0) + return constants.Clock.Since(timestampTime) < 6*time.Hour +} + +func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { + // TODO there has to be a simpler way to do this without the blockstore dance + // block headers use adt0 + store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstoreutil.NewTemporary())) + bmArr := blockadt.MakeEmptyArray(store) + smArr := blockadt.MakeEmptyArray(store) + + for i, m := range msg.BlsMessages { + c := cbg.CborCid(m) + if err := bmArr.Set(uint64(i), &c); err != nil { + return err + } + } + + for i, m := range msg.SecpkMessages { + c := cbg.CborCid(m) + if err := smArr.Set(uint64(i), &c); err != nil { + return err + } + } + + bmroot, err := bmArr.Root() + if err != nil { + return err + } + + smroot, err := smArr.Root() + if err != nil { + return err + } + + mrcid, err := store.Put(store.Context(), &types.MessageRoot{ + BlsRoot: bmroot, + SecpkRoot: smroot, + }) + if err != nil { + return err + } + + if msg.Header.Messages != mrcid { + return fmt.Errorf("messages didn't match root cid in header") + } + + return nil +} + +func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) { + // we check that the miner met the minimum power at the lookback tipset + + baseTS := bv.chainState.GetHead() + version := bv.fork.GetNetworkVersion(ctx, bh.Height) + lbts, lbst, err := bv.chainState.GetLookbackTipSetForRound(ctx, baseTS, bh.Height, version) + if err != nil { + log.Warnf("failed to load lookback tipset for incoming block: %s", err) + return address.Undef, ErrSoftFailure + } + + powerStateView := bv.state.PowerStateView(lbst) + key, err := powerStateView.GetMinerWorkerRaw(ctx, bh.Miner) + if err != nil { + log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err) + return address.Undef, ErrSoftFailure + } + + // NOTE: we check to see if the miner was eligible in the lookback + // tipset - 1 for historical reasons. DO NOT use the lookback state + // returned by GetLookbackTipSetForRound. + + eligible, err := bv.MinerEligibleToMine(ctx, bh.Miner, baseTS.At(0).ParentStateRoot, baseTS.Height(), lbts) + if err != nil { + log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err) + return address.Undef, ErrSoftFailure + } + + if !eligible { + log.Warnf("incoming block's miner is ineligible") + return address.Undef, ErrInsufficientPower + } + + return key, nil +} + +func (bv *BlockValidator) minerIsValid(ctx context.Context, maddr address.Address, baseStateRoot cid.Cid) error { + vms := cbor.NewCborStore(bv.bstore) + sm, err := tree.LoadState(ctx, vms, baseStateRoot) + if err != nil { + return fmt.Errorf("loading state: %w", err) + } + + pact, find, err := sm.GetActor(ctx, power.Address) + if err != nil { + return fmt.Errorf("get power actor failed: %w", err) + } + + if !find { + return errors.New("power actor not found") + } + + ps, err := power.Load(adt.WrapStore(ctx, vms), pact) + if err != nil { + return err + } + + _, exist, err := ps.MinerPower(maddr) + if err != nil { + return fmt.Errorf("failed to look up miner's claim: %w", err) + } + + if !exist { + return errors.New("miner isn't valid") + } + + return nil +} + +func (bv *BlockValidator) ValidateBlockBeacon(blk *types.BlockHeader, parentEpoch abi.ChainEpoch, prevEntry *types.BeaconEntry) error { + if os.Getenv("VENUS_IGNORE_DRAND") == "_yes_" { + return nil + } + nv := bv.fork.GetNetworkVersion(context.TODO(), blk.Height) + return beacon.ValidateBlockValues(bv.drand, nv, blk, parentEpoch, prevEntry) +} + +func (bv *BlockValidator) beaconBaseEntry(ctx context.Context, blk *types.BlockHeader) (*types.BeaconEntry, error) { + if len(blk.BeaconEntries) > 0 { + return &blk.BeaconEntries[len(blk.BeaconEntries)-1], nil + } + + parent, err := bv.chainState.GetTipSet(ctx, types.NewTipSetKey(blk.Parents...)) + if err != nil { + return nil, err + } + return chain.FindLatestDRAND(ctx, parent, bv.chainState) +} + +func (bv *BlockValidator) ValidateBlockWinner(ctx context.Context, waddr address.Address, lbTS *types.TipSet, lbRoot cid.Cid, baseTS *types.TipSet, baseRoot cid.Cid, + blk *types.BlockHeader, prevEntry *types.BeaconEntry, +) error { + if blk.ElectionProof.WinCount < 1 { + return fmt.Errorf("block is not claiming to be a winner") + } + + baseHeight := baseTS.Height() + eligible, err := bv.MinerEligibleToMine(ctx, blk.Miner, baseRoot, baseHeight, lbTS) + if err != nil { + return fmt.Errorf("determining if miner has min power failed: %v", err) + } + + if !eligible { + return errors.New("block's miner is ineligible to mine") + } + + rBeacon := prevEntry + if len(blk.BeaconEntries) != 0 { + rBeacon = &blk.BeaconEntries[len(blk.BeaconEntries)-1] + } + buf := new(bytes.Buffer) + if err := blk.Miner.MarshalCBOR(buf); err != nil { + return fmt.Errorf("failed to marshal miner address to cbor: %s", err) + } + + vrfBase, err := chain.DrawRandomness(rBeacon.Data, acrypto.DomainSeparationTag_ElectionProofProduction, blk.Height, buf.Bytes()) + if err != nil { + return fmt.Errorf("could not draw randomness: %s", err) + } + + if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, blk.ElectionProof.VRFProof); err != nil { + return fmt.Errorf("validating block election proof failed: %s", err) + } + + view := bv.state.PowerStateView(lbRoot) + if view == nil { + return errors.New("power state view is null") + } + + _, qaPower, err := view.MinerClaimedPower(ctx, blk.Miner) + if err != nil { + return fmt.Errorf("get miner power failed: %s", err) + } + + tpow, err := view.PowerNetworkTotal(ctx) + if err != nil { + return fmt.Errorf("get network total power failed: %s", err) + } + + j := blk.ElectionProof.ComputeWinCount(qaPower, tpow.QualityAdjustedPower) + if blk.ElectionProof.WinCount != j { + return fmt.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", blk.ElectionProof.WinCount, j) + } + + return nil +} + +func (bv *BlockValidator) MinerEligibleToMine(ctx context.Context, addr address.Address, parentStateRoot cid.Cid, parentHeight abi.ChainEpoch, lookbackTS *types.TipSet) (bool, error) { + hmp, err := bv.minerHasMinPower(ctx, addr, lookbackTS) + + // TODO: We're blurring the lines between a "runtime network version" and a "Lotus upgrade epoch", is that unavoidable? + if bv.fork.GetNetworkVersion(ctx, parentHeight) <= network.Version3 { + return hmp, err + } + + if err != nil { + return false, err + } + + if !hmp { + return false, nil + } + + // Post actors v2, also check MinerEligibleForElection with base ts + vms := cbor.NewCborStore(bv.bstore) + sm, err := tree.LoadState(ctx, vms, parentStateRoot) + if err != nil { + return false, fmt.Errorf("loading state: %v", err) + } + + pact, find, err := sm.GetActor(ctx, power.Address) + if err != nil { + return false, fmt.Errorf("get power actor failed: %v", err) + } + + if !find { + return false, errors.New("power actor not found") + } + + pstate, err := power.Load(adt.WrapStore(ctx, bv.cstore), pact) + if err != nil { + return false, err + } + + mact, find, err := sm.GetActor(ctx, addr) + if err != nil { + return false, fmt.Errorf("loading miner actor state: %v", err) + } + + if !find { + return false, fmt.Errorf("miner actor %s not found", addr) + } + + mstate, err := miner.Load(adt.WrapStore(ctx, vms), mact) + if err != nil { + return false, err + } + + // Non-empty power claim. + if claim, found, err := pstate.MinerPower(addr); err != nil { + return false, err + } else if !found { + return false, nil + } else if claim.QualityAdjPower.LessThanEqual(big.Zero()) { + logExpect.Infof("miner address:%v", addr.String()) + logExpect.Warnf("miner quality adjust power:%v is less than zero", claim.QualityAdjPower) + return false, nil + } + + // No fee debt. + if debt, err := mstate.FeeDebt(); err != nil { + return false, err + } else if !debt.IsZero() { + logExpect.Warnf("the debt:%v is not zero", debt) + return false, nil + } + + // No active consensus faults. + if mInfo, err := mstate.Info(); err != nil { + return false, err + } else if parentHeight <= mInfo.ConsensusFaultElapsed { + return false, nil + } + + return true, nil +} + +func (bv *BlockValidator) minerHasMinPower(ctx context.Context, addr address.Address, ts *types.TipSet) (bool, error) { + vms := cbor.NewCborStore(bv.bstore) + sm, err := tree.LoadState(ctx, vms, ts.Blocks()[0].ParentStateRoot) + if err != nil { + return false, fmt.Errorf("loading state: %v", err) + } + + pact, find, err := sm.GetActor(ctx, power.Address) + if err != nil { + return false, fmt.Errorf("get power actor failed: %v", err) + } + + if !find { + return false, errors.New("power actor not found") + } + + ps, err := power.Load(adt.WrapStore(ctx, vms), pact) + if err != nil { + return false, err + } + + return ps.MinerNominalPowerMeetsConsensusMinimum(addr) +} + +func (bv *BlockValidator) VerifyWinningPoStProof(ctx context.Context, nv network.Version, blk *types.BlockHeader, prevBeacon *types.BeaconEntry, lbst cid.Cid) error { + if constants.InsecurePoStValidation { + if len(blk.WinPoStProof) == 0 { + return fmt.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given") + } + + if string(blk.WinPoStProof[0].ProofBytes) == "valid proof" { + return nil + } + return fmt.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid") + } + + buf := new(bytes.Buffer) + if err := blk.Miner.MarshalCBOR(buf); err != nil { + return fmt.Errorf("failed to marshal miner address: %v", err) + } + + rbase := prevBeacon + if len(blk.BeaconEntries) > 0 { + rbase = &blk.BeaconEntries[len(blk.BeaconEntries)-1] + } + + rand, err := chain.DrawRandomness(rbase.Data, acrypto.DomainSeparationTag_WinningPoStChallengeSeed, blk.Height, buf.Bytes()) + if err != nil { + return fmt.Errorf("failed to get randomness for verifying winning post proof: %v", err) + } + + mid, err := address.IDFromAddress(blk.Miner) + if err != nil { + return fmt.Errorf("failed to get ID from miner address %s: %v", blk.Miner, err) + } + + view := bv.state.PowerStateView(lbst) + if view == nil { + return errors.New("power state view is null") + } + + xsectors, err := view.GetSectorsForWinningPoSt(ctx, nv, bv.proofVerifier, blk.Miner, rand) + if err != nil { + return fmt.Errorf("getting winning post sector set: %v", err) + } + + sectors := make([]proof.SectorInfo, len(xsectors)) + for i, xsi := range xsectors { + sectors[i] = proof.SectorInfo{ + SealProof: xsi.SealProof, + SectorNumber: xsi.SectorNumber, + SealedCID: xsi.SealedCID, + } + } + + ok, err := bv.proofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{ + Randomness: rand, + Proofs: blk.WinPoStProof, + ChallengedSectors: sectors, + Prover: abi.ActorID(mid), + }) + if err != nil { + return fmt.Errorf("failed to verify election post: %w", err) + } + + if !ok { + logExpect.Errorf("invalid winning post (block: %s, %x; %v)", blk.Cid(), rand, sectors) + return fmt.Errorf("winning post was invalid") + } + + return nil +} + +// TODO: We should extract this somewhere else and make the message pool and miner use the same logic +func (bv *BlockValidator) checkBlockMessages(ctx context.Context, sigValidator *appstate.SignatureValidator, blk *types.BlockHeader, baseTS *types.TipSet) (err error) { + blksecpMsgs, blkblsMsgs, err := bv.messageStore.LoadMetaMessages(ctx, blk.Messages) + if err != nil { + return fmt.Errorf("failed loading message list %s for block %s %v", blk.Messages, blk.Cid(), err) + } + + { + // Verify that the BLS signature aggregate is correct + if err := sigValidator.ValidateBLSMessageAggregate(ctx, blkblsMsgs, blk.BLSAggregate); err != nil { + return fmt.Errorf("bls message verification failed for block %s %v", blk.Cid(), err) + } + + // Verify that all secp message signatures are correct + for i, msg := range blksecpMsgs { + if err := sigValidator.ValidateMessageSignature(ctx, msg); err != nil { + return fmt.Errorf("invalid signature for secp message %d in block %s %v", i, blk.Cid(), err) + } + } + } + + nonces := make(map[address.Address]uint64) + vms := cbor.NewCborStore(bv.bstore) + st, err := tree.LoadState(ctx, vms, blk.ParentStateRoot) + if err != nil { + return fmt.Errorf("loading state: %v", err) + } + + pl := bv.gasPirceSchedule.PricelistByEpoch(blk.Height) + var sumGasLimit int64 + checkMsg := func(msg types.ChainMsg) error { + m := msg.VMMessage() + + // Phase 1: syntactic validation, as defined in the spec + minGas := pl.OnChainMessage(msg.ChainLength()) + if err := m.ValidForBlockInclusion(minGas.Total(), bv.fork.GetNetworkVersion(ctx, blk.Height)); err != nil { + return err + } + + // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit + // So below is overflow safe + sumGasLimit += m.GasLimit + if sumGasLimit > constants.BlockGasLimit { + return fmt.Errorf("block gas limit exceeded") + } + + // Phase 2: (Partial) semantic validation: + // the sender exists and is an account actor, and the nonces make sense + var sender address.Address + if bv.fork.GetNetworkVersion(ctx, blk.Height) >= network.Version13 { + sender, err = st.LookupID(m.From) + if err != nil { + return err + } + } else { + sender = m.From + } + + if _, ok := nonces[sender]; !ok { + // `GetActor` does not validate that this is an account actor. + act, find, err := st.GetActor(ctx, sender) + if err != nil { + return fmt.Errorf("failed to get actor: %v", err) + } + + if !find { + return fmt.Errorf("actor %s not found", sender) + } + + if !builtin.IsAccountActor(act.Code) { + return errors.New("sender must be an account actor") + } + nonces[sender] = act.Nonce + } + + if nonces[sender] != m.Nonce { + return fmt.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) + } + nonces[sender]++ + + return nil + } + + // Validate message arrays in a temporary blockstore. + blsMsgs := make([]types.ChainMsg, len(blkblsMsgs)) + for i, m := range blkblsMsgs { + if err := checkMsg(m); err != nil { + return fmt.Errorf("block had invalid bls message at index %d: %v", i, err) + } + + blsMsgs[i] = m + } + + secpMsgs := make([]types.ChainMsg, len(blksecpMsgs)) + for i, m := range blksecpMsgs { + if bv.fork.GetNetworkVersion(ctx, blk.Height) >= network.Version14 { + if m.Signature.Type != crypto.SigTypeSecp256k1 { + return fmt.Errorf("block had invalid secpk message at index %d: %w", i, err) + } + } + if err := checkMsg(m); err != nil { + return fmt.Errorf("block had invalid secpk message at index %d: %v", i, err) + } + + secpMsgs[i] = m + } + + bmroot, err := chain.GetChainMsgRoot(ctx, blsMsgs) + if err != nil { + return fmt.Errorf("get blsMsgs root failed: %v", err) + } + + smroot, err := chain.GetChainMsgRoot(ctx, secpMsgs) + if err != nil { + return fmt.Errorf("get secpMsgs root failed: %v", err) + } + + txMeta := &types.MessageRoot{ + BlsRoot: bmroot, + SecpkRoot: smroot, + } + b, err := chain.MakeBlock(txMeta) + if err != nil { + return fmt.Errorf("serialize tx meta failed: %v", err) + } + if blk.Messages != b.Cid() { + return fmt.Errorf("messages didnt match message root in header") + } + return nil +} + +// ValidateMsgMeta performs structural and content hash validation of the +// messages within this block. If validation passes, it stores the messages in +// the underlying IPLD block store. +func (bv *BlockValidator) ValidateMsgMeta(ctx context.Context, fblk *types.FullBlock) error { + if msgc := len(fblk.BLSMessages) + len(fblk.SECPMessages); msgc > constants.BlockMessageLimit { + return fmt.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) + } + + // TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta + // computation need to go into the 'temporary' side of the blockstore when + // we implement that + + // We use a temporary bstore here to avoid writing intermediate pieces + // into the blockstore. + blockstore := blockstoreutil.NewTemporary() + var bcids, scids []cid.Cid + + for _, m := range fblk.BLSMessages { + c, err := chain.PutMessage(ctx, blockstore, m) + if err != nil { + return fmt.Errorf("putting bls message to blockstore after msgmeta computation: %v", err) + } + bcids = append(bcids, c) + } + + for _, m := range fblk.SECPMessages { + c, err := chain.PutMessage(ctx, blockstore, m) + if err != nil { + return fmt.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) + } + scids = append(scids, c) + } + + // Compute the root CID of the combined message trie. + smroot, err := chain.ComputeMsgMeta(blockstore, bcids, scids) + if err != nil { + return fmt.Errorf("validating msgmeta, compute failed: %v", err) + } + + // Check that the message trie root matches with what's in the block. + if fblk.Header.Messages != smroot { + return fmt.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot) + } + + // Finally, flush + return blockstoreutil.CopyParticial(context.TODO(), blockstore, bv.bstore, smroot) +} + +func blockSanityChecks(b *types.BlockHeader) error { + if b.ElectionProof == nil { + return fmt.Errorf("block cannot have nil election proof") + } + + if b.BlockSig == nil { + return fmt.Errorf("block had nil signature") + } + + if b.BLSAggregate == nil { + return fmt.Errorf("block had nil bls aggregate signature") + } + + return nil +} + +func checkBlockSignature(ctx context.Context, blk *types.BlockHeader, worker address.Address) error { + _, span := trace.StartSpan(ctx, "checkBlockSignature") + defer span.End() + + if blk.IsValidated() { + return nil + } + + if blk.BlockSig == nil { + return errors.New("block signature not present") + } + + sigb, err := blk.SignatureData() + if err != nil { + return err + } + err = crypto.Verify(blk.BlockSig, worker, sigb) + if err == nil { + blk.SetValidated() + } + + return err +} diff --git a/pkg/consensus/chain_selector.go b/pkg/consensus/chain_selector.go new file mode 100644 index 0000000000..7724cb71fe --- /dev/null +++ b/pkg/consensus/chain_selector.go @@ -0,0 +1,120 @@ +package consensus + +// This is to implement Expected Consensus protocol +// See: https://github.com/filecoin-project/specs/blob/master/expected-consensus.md + +import ( + "context" + "errors" + "fmt" + "math/big" + + logging "github.com/ipfs/go-log/v2" + + fbig "github.com/filecoin-project/go-state-types/big" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var log = logging.Logger("chain_selector") + +// ChainSelector weighs and compares chains. +type ChainSelector struct { + cstore cbor.IpldStore + state StateViewer +} + +// NewChainSelector is the constructor for Chain selection module. +func NewChainSelector(cs cbor.IpldStore, state StateViewer) *ChainSelector { + return &ChainSelector{ + cstore: cs, + state: state, + } +} + +// Weight returns the EC weight of this TipSet as a filecoin big int. +func (c *ChainSelector) Weight(ctx context.Context, ts *types.TipSet) (fbig.Int, error) { + pStateID := ts.At(0).ParentStateRoot + // Retrieve parent weight. + if !pStateID.Defined() { + return fbig.Zero(), errors.New("undefined state passed to Chain selector new weight") + } + // todo change view version + powerTableView := state.NewPowerTableView(c.state.PowerStateView(pStateID), c.state.FaultStateView(pStateID)) + networkPower, err := powerTableView.NetworkTotalPower(ctx) + if err != nil { + return fbig.Zero(), err + } + + log2P := int64(0) + if networkPower.GreaterThan(fbig.NewInt(0)) { + log2P = int64(networkPower.BitLen() - 1) + } else { + // Not really expect to be here ... + return fbig.Zero(), fmt.Errorf("all power in the net is gone. You network might be disconnected, or the net is dead") + } + + weight := ts.ParentWeight() + out := new(big.Int).Set(weight.Int) + out.Add(out, big.NewInt(log2P<<8)) + + // (wFunction(totalPowerAtTipset(ts)) * sum(ts.blocks[].ElectionProof.WinCount) * wRatio_num * 2^8) / (e * wRatio_den) + + totalJ := int64(0) + for _, b := range ts.Blocks() { + totalJ += b.ElectionProof.WinCount + } + + eWeight := big.NewInt(log2P * constants.WRatioNum) + eWeight = eWeight.Lsh(eWeight, 8) + eWeight = eWeight.Mul(eWeight, new(big.Int).SetInt64(totalJ)) + eWeight = eWeight.Div(eWeight, big.NewInt(int64(uint64(constants.ExpectedLeadersPerEpoch)*constants.WRatioDen))) + + out = out.Add(out, eWeight) + + return fbig.Int{Int: out}, nil +} + +// IsHeavier returns true if tipset a is heavier than tipset b, and false +// vice versa. In the rare case where two tipsets have the same weight ties +// are broken by taking the tipset with more blocks. +func (c *ChainSelector) IsHeavier(ctx context.Context, a, b *types.TipSet) (bool, error) { + aW, err := c.Weight(ctx, a) + if err != nil { + return false, err + } + bW, err := c.Weight(ctx, b) + if err != nil { + return false, err + } + + heavier := aW.GreaterThan(bW) + if aW.Equals(bW) && !a.Equals(b) { + log.Errorw("weight draw", "currTs", a, "ts", b) + heavier = breakWeightTie(a, b) + } + + return heavier, nil +} + +// true if ts1 wins according to the filecoin tie-break rule +func breakWeightTie(ts1, ts2 *types.TipSet) bool { + s := len(ts1.Blocks()) + if s > len(ts2.Blocks()) { + s = len(ts2.Blocks()) + } + + // blocks are already sorted by ticket + for i := 0; i < s; i++ { + if ts1.Blocks()[i].Ticket.Less(ts2.Blocks()[i].Ticket) { + log.Infof("weight tie broken in favour of %s", ts1.Key()) + return true + } + } + + log.Infof("weight tie left unbroken, default to %s", ts2.Key()) + return false +} diff --git a/pkg/consensus/expected.go b/pkg/consensus/expected.go new file mode 100644 index 0000000000..b2a3d1e126 --- /dev/null +++ b/pkg/consensus/expected.go @@ -0,0 +1,229 @@ +package consensus + +import ( + "context" + "fmt" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log" + "github.com/pkg/errors" + "go.opencensus.io/trace" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/fork" + appstate "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var ( + ErrExpensiveFork = errors.New("refusing explicit call due to state fork at epoch") + // ErrStateRootMismatch is returned when the computed state root doesn't match the expected result. + ErrStateRootMismatch = errors.New("blocks state root does not match computed result") + // ErrUnorderedTipSets is returned when weight and minticket are the same between two tipsets. + ErrUnorderedTipSets = errors.New("trying to order two identical tipsets") + // ErrReceiptRootMismatch is returned when the block's receipt root doesn't match the receipt root computed for the parent tipset. + ErrReceiptRootMismatch = errors.New("blocks receipt root does not match parent tip set") +) + +var logExpect = logging.Logger("consensus") + +const AllowableClockDriftSecs = uint64(1) + +// A Processor processes all the messages in a block or tip set. +type Processor interface { + // ApplyBlocks processes all messages in a tip set. + ApplyBlocks(ctx context.Context, blocks []types.BlockMessagesInfo, ts *types.TipSet, pstate cid.Cid, parentEpoch, epoch abi.ChainEpoch, vmOpts vm.VmOption, cb vm.ExecCallBack) (cid.Cid, []types.MessageReceipt, error) +} + +// TicketValidator validates that an input ticket is valid. +type TicketValidator interface { + IsValidTicket(ctx context.Context, base types.TipSetKey, entry *types.BeaconEntry, newPeriod bool, epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket types.Ticket) error +} + +// Todo Delete view just use state.Viewer +// AsDefaultStateViewer adapts a state viewer to a power state viewer. +func AsDefaultStateViewer(v *appstate.Viewer) DefaultStateViewer { + return DefaultStateViewer{v} +} + +// DefaultStateViewer a state viewer to the power state view interface. +type DefaultStateViewer struct { + *appstate.Viewer +} + +// PowerStateView returns a power state view for a state root. +func (v *DefaultStateViewer) PowerStateView(root cid.Cid) appstate.PowerStateView { + return v.Viewer.StateView(root) +} + +// FaultStateView returns a fault state view for a state root. +func (v *DefaultStateViewer) FaultStateView(root cid.Cid) appstate.FaultStateView { + return v.Viewer.StateView(root) +} + +// StateViewer provides views into the Chain state. +type StateViewer interface { + PowerStateView(root cid.Cid) appstate.PowerStateView + FaultStateView(root cid.Cid) appstate.FaultStateView +} + +type chainReader interface { + GetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) + GetHead() *types.TipSet + StateView(ctx context.Context, ts *types.TipSet) (*appstate.View, error) + GetTipSetStateRoot(context.Context, *types.TipSet) (cid.Cid, error) + GetTipSetReceiptsRoot(context.Context, *types.TipSet) (cid.Cid, error) + GetGenesisBlock(context.Context) (*types.BlockHeader, error) + GetLatestBeaconEntry(context.Context, *types.TipSet) (*types.BeaconEntry, error) + GetTipSetByHeight(context.Context, *types.TipSet, abi.ChainEpoch, bool) (*types.TipSet, error) + GetCirculatingSupplyDetailed(context.Context, abi.ChainEpoch, tree.Tree) (types.CirculatingSupply, error) + GetLookbackTipSetForRound(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch, version network.Version) (*types.TipSet, cid.Cid, error) + GetTipsetMetadata(context.Context, *types.TipSet) (*chain.TipSetMetadata, error) + PutTipSetMetadata(context.Context, *chain.TipSetMetadata) error +} + +var _ chainReader = (*chain.Store)(nil) + +// Expected implements expected consensus. +type Expected struct { + // cstore is used for loading state trees during message running. + cstore cbor.IpldStore + + // bstore contains data referenced by actors within the state + // during message running. Additionally bstore is used for + // accessing the power table. + bstore blockstoreutil.Blockstore + + // message store for message read/write + messageStore *chain.MessageStore + + // chainState is a reference to the current Chain state + chainState chainReader + + // processor is what we use to process messages and pay rewards + processor Processor + + // calculate chain randomness ticket/beacon + rnd ChainRandomness + + // fork for vm process and block validator + fork fork.IFork + + // gas price for vm + gasPirceSchedule *gas.PricesSchedule + + // systemcall for vm + syscallsImpl vm.SyscallsImpl + + // block validator before process tipset + blockValidator *BlockValidator +} + +// NewExpected is the constructor for the Expected consenus.Protocol module. +func NewExpected(cs cbor.IpldStore, + bs blockstoreutil.Blockstore, + chainState *chain.Store, + rnd ChainRandomness, + messageStore *chain.MessageStore, + fork fork.IFork, + gasPirceSchedule *gas.PricesSchedule, + blockValidator *BlockValidator, + syscalls vm.SyscallsImpl, + circulatingSupplyCalculator chain.ICirculatingSupplyCalcualtor, +) *Expected { + processor := NewDefaultProcessor(syscalls, circulatingSupplyCalculator) + return &Expected{ + processor: processor, + syscallsImpl: syscalls, + cstore: cs, + bstore: bs, + chainState: chainState, + messageStore: messageStore, + rnd: rnd, + fork: fork, + gasPirceSchedule: gasPirceSchedule, + blockValidator: blockValidator, + } +} + +// RunStateTransition applies the messages in a tipset to a state, and persists that new state. +// It errors if the tipset was not mined according to the EC rules, or if any of the messages +// in the tipset results in an error. +func (c *Expected) RunStateTransition(ctx context.Context, ts *types.TipSet) (cid.Cid, cid.Cid, error) { + begin := time.Now() + defer func() { + logExpect.Infof("process ts height %d, blocks %d, took %.4f(s)", ts.Height(), ts.Len(), time.Since(begin).Seconds()) + }() + ctx, span := trace.StartSpan(ctx, "Expected.innerRunStateTransition") + defer span.End() + span.AddAttributes(trace.StringAttribute("blocks", ts.String())) + span.AddAttributes(trace.Int64Attribute("height", int64(ts.Height()))) + blockMessageInfo, err := c.messageStore.LoadTipSetMessage(ctx, ts) + if err != nil { + return cid.Undef, cid.Undef, err + } + // process tipset + var pts *types.TipSet + if ts.Height() == 0 { + // NB: This is here because the process that executes blocks requires that the + // block miner reference a valid miner in the state tree. Unless we create some + // magical genesis miner, this won't work properly, so we short circuit here + // This avoids the question of 'who gets paid the genesis block reward' + return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil + } else if ts.Height() > 0 { + parent := ts.Parents() + if pts, err = c.chainState.GetTipSet(ctx, parent); err != nil { + return cid.Undef, cid.Undef, err + } + } else { + return cid.Undef, cid.Undef, nil + } + + vmOption := vm.VmOption{ + CircSupplyCalculator: func(ctx context.Context, epoch abi.ChainEpoch, tree tree.Tree) (abi.TokenAmount, error) { + dertail, err := c.chainState.GetCirculatingSupplyDetailed(ctx, epoch, tree) + if err != nil { + return abi.TokenAmount{}, err + } + return dertail.FilCirculating, nil + }, + LookbackStateGetter: vmcontext.LookbackStateGetterForTipset(ctx, c.chainState, c.fork, ts), + NetworkVersion: c.fork.GetNetworkVersion(ctx, ts.At(0).Height), + Rnd: NewHeadRandomness(c.rnd, ts.Key()), + BaseFee: ts.At(0).ParentBaseFee, + Fork: c.fork, + Epoch: ts.At(0).Height, + GasPriceSchedule: c.gasPirceSchedule, + Bsstore: c.bstore, + PRoot: ts.At(0).ParentStateRoot, + SysCallsImpl: c.syscallsImpl, + Tracing: false, + } + + var parentEpoch abi.ChainEpoch + if pts.Defined() { + parentEpoch = pts.Height() + } + + root, receipts, err := c.processor.ApplyBlocks(ctx, blockMessageInfo, ts, ts.ParentState(), parentEpoch, ts.Height(), vmOption, nil) + if err != nil { + return cid.Undef, cid.Undef, errors.Wrap(err, "error validating tipset") + } + + receiptCid, err := c.messageStore.StoreReceipts(ctx, receipts) + if err != nil { + return cid.Undef, cid.Undef, fmt.Errorf("failed to save receipt: %v", err) + } + + return root, receiptCid, nil +} diff --git a/pkg/consensus/head_randomness.go b/pkg/consensus/head_randomness.go new file mode 100644 index 0000000000..29968d2b6b --- /dev/null +++ b/pkg/consensus/head_randomness.go @@ -0,0 +1,37 @@ +package consensus + +import ( + "context" + + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + acrypto "github.com/filecoin-project/go-state-types/crypto" +) + +// ChainRandomness define randomness method in filecoin +type ChainRandomness interface { + StateGetRandomnessFromBeacon(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) + StateGetRandomnessFromTickets(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) +} + +var _ vmcontext.HeadChainRandomness = (*HeadRandomness)(nil) + +// A Chain randomness source with a fixed Head tipset key. +type HeadRandomness struct { + chain ChainRandomness + head types.TipSetKey +} + +func NewHeadRandomness(chain ChainRandomness, head types.TipSetKey) *HeadRandomness { + return &HeadRandomness{chain: chain, head: head} +} + +func (h HeadRandomness) ChainGetRandomnessFromBeacon(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return h.chain.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, h.head) +} + +func (h HeadRandomness) ChainGetRandomnessFromTickets(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return h.chain.StateGetRandomnessFromTickets(ctx, personalization, randEpoch, entropy, h.head) +} diff --git a/pkg/consensus/message_validator.go b/pkg/consensus/message_validator.go new file mode 100644 index 0000000000..5d88af468c --- /dev/null +++ b/pkg/consensus/message_validator.go @@ -0,0 +1,173 @@ +package consensus + +import ( + "bytes" + "context" + "fmt" + + "github.com/filecoin-project/go-state-types/big" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/metrics" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var ( + invReceiverUndefCt *metrics.Int64Counter + invSenderUndefCt *metrics.Int64Counter + invValueAboveMaxCt *metrics.Int64Counter + invParamsNilCt *metrics.Int64Counter // nolint + invGasPriceNegativeCt *metrics.Int64Counter + invGasBelowMinimumCt *metrics.Int64Counter + invNegativeValueCt *metrics.Int64Counter + invGasAboveBlockLimitCt *metrics.Int64Counter +) + +// The maximum allowed message value. +var msgMaxValue = types.FromFil(2e9) + +// These gas cost values must match those in vm/gas. +// TODO: Look up gas costs from the same place the LegacyVM gets them, keyed by epoch. https://github.com/filecoin-project/venus/issues/3955 +const ( + onChainMessageBase = int64(0) + onChainMessagePerByte = int64(2) +) + +func init() { + invReceiverUndefCt = metrics.NewInt64Counter("consensus/msg_undef_receiver", "Count of") + invSenderUndefCt = metrics.NewInt64Counter("consensus/msg_undef_sender", "Count of") + invValueAboveMaxCt = metrics.NewInt64Counter("consensus/msg_value_max", "Count of") + invParamsNilCt = metrics.NewInt64Counter("consensus/msg_params_nil", "Count of") + invGasPriceNegativeCt = metrics.NewInt64Counter("consensus/msg_gasprice_negative", "Count of") + invGasBelowMinimumCt = metrics.NewInt64Counter("consensus/msg_gaslimit_min", "Count of") + invNegativeValueCt = metrics.NewInt64Counter("consensus/msg_value_negative", "Count of invalid negative messages with negative value") + invGasAboveBlockLimitCt = metrics.NewInt64Counter("consensus/msg_gaslimit_max", "Count of invalid messages with gas above block limit") +} + +// DefaultMessageSyntaxValidator checks basic conditions independent of current state +type DefaultMessageSyntaxValidator struct{} + +func NewMessageSyntaxValidator() *DefaultMessageSyntaxValidator { + return &DefaultMessageSyntaxValidator{} +} + +// ValidateSignedMessageSyntax validates signed message syntax and state-independent invariants. +// Used for incoming messages over pubsub and secp messages included in blocks. +func (v *DefaultMessageSyntaxValidator) ValidateSignedMessageSyntax(ctx context.Context, smsg *types.SignedMessage) error { + msg := &smsg.Message + var msgLen int + if smsg.Signature.Type == crypto.SigTypeBLS { + buf := new(bytes.Buffer) + err := smsg.Message.MarshalCBOR(buf) + if err != nil { + return errors.Wrapf(err, "failed to calculate message size") + } + msgLen = buf.Len() + } else { + buf := new(bytes.Buffer) + err := smsg.MarshalCBOR(buf) + if err != nil { + return errors.Wrapf(err, "failed to calculate message size") + } + msgLen = buf.Len() + } + return v.validateMessageSyntaxShared(ctx, msg, int64(msgLen)) +} + +// ValidateUnsignedMessageSyntax validates unisigned message syntax and state-independent invariants. +// Used for bls messages included in blocks. +func (v *DefaultMessageSyntaxValidator) ValidateUnsignedMessageSyntax(ctx context.Context, msg *types.Message) error { + buf := new(bytes.Buffer) + err := msg.MarshalCBOR(buf) + if err != nil { + return errors.Wrapf(err, "failed to calculate message size") + } + return v.validateMessageSyntaxShared(ctx, msg, int64(buf.Len())) +} + +func (v *DefaultMessageSyntaxValidator) validateMessageSyntaxShared(ctx context.Context, msg *types.Message, msgLen int64) error { + if msg.Version != types.MessageVersion { + return fmt.Errorf("version %d, expected %d", msg.Version, types.MessageVersion) + } + + if msg.To.Empty() { + invReceiverUndefCt.Inc(ctx, 1) + return fmt.Errorf("empty receiver: %s", msg) + } + if msg.From.Empty() { + invSenderUndefCt.Inc(ctx, 1) + return fmt.Errorf("empty sender: %s", msg) + } + // The spec calls for validating a non-negative call sequence num, but by + // the time it's decoded into a uint64 the check is already passed + + if msg.Value.LessThan(big.Zero()) { + invNegativeValueCt.Inc(ctx, 1) + return fmt.Errorf("negative value %s: %s", msg.Value, msg) + } + if msg.Value.GreaterThan(msgMaxValue) { + invValueAboveMaxCt.Inc(ctx, 1) + return fmt.Errorf("value %s exceeds max %s: %s", msg.Value, msgMaxValue, msg) + } + // The spec calls for validating a non-negative method num, but by the + // time it's decoded into a uint64 the check is already passed + + if msg.GasFeeCap.LessThan(types.ZeroFIL) { + invGasPriceNegativeCt.Inc(ctx, 1) + return fmt.Errorf("negative gas price %s: %s", msg.GasFeeCap, msg) + } + // The minimum gas limit ensures the sender has enough balance to pay for inclusion of the message in the Chain + // *at all*. Without this, a message could hit out-of-gas but the sender pay nothing. + // NOTE(anorth): this check has been moved to execution time, and the miner is penalized for including + // such a message. We can probably remove this. + minMsgGas := onChainMessageBase + onChainMessagePerByte*msgLen + if msg.GasLimit < minMsgGas { + invGasBelowMinimumCt.Inc(ctx, 1) + return fmt.Errorf("gas limit %d below minimum %d to cover message size: %s", msg.GasLimit, minMsgGas, msg) + } + if msg.GasLimit > constants.BlockGasLimit { + invGasAboveBlockLimitCt.Inc(ctx, 1) + return fmt.Errorf("gas limit %d exceeds block limit %d: %s", msg.GasLimit, constants.BlockGasLimit, msg) + } + return nil +} + +// MessageSignatureValidator validates message signatures +type MessageSignatureValidator struct { + api signatureValidatorAPI +} + +// signatureValidatorAPI allows the validator to access state needed for signature checking +type signatureValidatorAPI interface { + GetHead() *types.TipSet + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + AccountView(ts *types.TipSet) (state.AccountView, error) +} + +func NewMessageSignatureValidator(api signatureValidatorAPI) *MessageSignatureValidator { + return &MessageSignatureValidator{ + api: api, + } +} + +// Validate validates the signed message signature. Errors probably mean the +// +// validation failed, but possibly indicate a failure to retrieve state. +func (v *MessageSignatureValidator) Validate(ctx context.Context, smsg *types.SignedMessage) error { + head := v.api.GetHead() + view, err := v.api.AccountView(head) + if err != nil { + return errors.Wrapf(err, "failed to load state at %v", head) + } + + sigValidator := state.NewSignatureValidator(view) + + // ensure message is properly signed + if err := sigValidator.ValidateMessageSignature(ctx, smsg); err != nil { + return errors.Wrap(err, fmt.Errorf("invalid signature by sender over message data").Error()) + } + return nil +} diff --git a/pkg/consensus/message_validator_test.go b/pkg/consensus/message_validator_test.go new file mode 100644 index 0000000000..b319b3b3c3 --- /dev/null +++ b/pkg/consensus/message_validator_test.go @@ -0,0 +1,175 @@ +// stm: #unit +package consensus_test + +import ( + "context" + "fmt" + "testing" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/testhelpers" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" + + bls "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/venus/pkg/consensus" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + "github.com/filecoin-project/venus/pkg/state" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +var ( + keys = testhelpers.MustGenerateKeyInfo(2, 42) + addresses = make([]address.Address, len(keys)) +) + +var methodID = abi.MethodNum(21231) + +func init() { + for i, k := range keys { + addr, _ := k.Address() + addresses[i] = addr + } +} + +func TestBLSSignatureValidationConfiguration(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + // create bls address + pubKey := bls.PrivateKeyPublicKey(bls.PrivateKeyGenerate()) + from, err := address.NewBLSAddress(pubKey[:]) + require.NoError(t, err) + + msg := testhelpers.NewMeteredMessage(from, addresses[1], 0, types.ZeroFIL, methodID, []byte("params"), types.NewGasFeeCap(1), types.NewGasPremium(1), 300) + mmsgCid := msg.Cid() + + signer := testhelpers.NewMockSigner(keys) + signer.AddrKeyInfo[msg.From] = keys[0] + sig, err := signer.SignBytes(ctx, mmsgCid.Bytes(), msg.From) + require.NoError(t, err) + unsigned := &types.SignedMessage{Message: *msg, Signature: *sig} + + actor := newActor(t, 1000, 0) + + t.Run("syntax validator does not ignore missing signature", func(t *testing.T) { + api := NewMockIngestionValidatorAPI() + api.ActorAddr = from + api.Actor = actor + + validator := consensus.NewMessageSignatureValidator(api) + + // stm: @CONSENSUS_VALIDATOR_VALIDATE_001 + err := validator.Validate(ctx, unsigned) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid signature") + }) +} + +func TestMessageSyntaxValidator(t *testing.T) { + tf.UnitTest(t) + signer := testhelpers.NewMockSigner(keys) + alice := addresses[0] + bob := addresses[1] + + validator := consensus.NewMessageSyntaxValidator() + ctx := context.Background() + + t.Run("Actor not found is not an error", func(t *testing.T) { + msg, err := testhelpers.NewSignedMessage(ctx, *newMessage(t, bob, alice, 0, 0, 1, 5000), signer) + require.NoError(t, err) + // stm: @CONSENSUS_VALIDATOR_VALIDATE_SIGNED_SYNTAX_001 + assert.NoError(t, validator.ValidateSignedMessageSyntax(ctx, msg)) + // stm: @CONSENSUS_VALIDATOR_VALIDATE_UNSIGNED_SYNTAX_001 + assert.NoError(t, validator.ValidateUnsignedMessageSyntax(ctx, &msg.Message)) + }) + + t.Run("self send passes", func(t *testing.T) { + msg, err := testhelpers.NewSignedMessage(ctx, *newMessage(t, alice, alice, 100, 5, 1, 5000), signer) + require.NoError(t, err) + assert.NoError(t, validator.ValidateSignedMessageSyntax(ctx, msg), "self") + }) + + t.Run("negative value fails", func(t *testing.T) { + msg, err := testhelpers.NewSignedMessage(ctx, *newMessage(t, alice, alice, 100, -5, 1, 5000), signer) + require.NoError(t, err) + assert.Errorf(t, validator.ValidateSignedMessageSyntax(ctx, msg), "negative") + }) + + t.Run("block gas limit fails", func(t *testing.T) { + msg, err := testhelpers.NewSignedMessage(ctx, *newMessage(t, alice, bob, 100, 5, 1, constants.BlockGasLimit+1), signer) + require.NoError(t, err) + assert.Errorf(t, validator.ValidateSignedMessageSyntax(ctx, msg), "block limit") + }) +} + +func newActor(t *testing.T, balanceAF int, nonce uint64) *types.Actor { + actor := types.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(int64(balanceAF)), cid.Undef) + actor.Nonce = nonce + return actor +} + +func newMessage(t *testing.T, from, to address.Address, nonce uint64, valueAF int, + gasPrice int64, gasLimit int64, +) *types.Message { + val, err := types.ParseFIL(fmt.Sprintf("%d", valueAF)) + require.Nil(t, err, "invalid attofil") + return testhelpers.NewMeteredMessage( + from, + to, + nonce, + abi.TokenAmount{Int: val.Int}, + methodID, + []byte("params"), + types.NewGasFeeCap(gasPrice), + types.NewGasPremium(1), + gasLimit, + ) +} + +// FakeIngestionValidatorAPI provides a latest state +type FakeIngestionValidatorAPI struct { + Block *types.BlockHeader + ActorAddr address.Address + Actor *types.Actor +} + +// NewMockIngestionValidatorAPI creates a new FakeIngestionValidatorAPI. +func NewMockIngestionValidatorAPI() *FakeIngestionValidatorAPI { + block := mockBlock() + block.Height = 10 + return &FakeIngestionValidatorAPI{ + Actor: &types.Actor{}, + Block: block, + } +} + +func (api *FakeIngestionValidatorAPI) GetHead() *types.TipSet { + ts, _ := types.NewTipSet([]*types.BlockHeader{api.Block}) + return ts +} + +func (api *FakeIngestionValidatorAPI) GetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + return types.NewTipSet([]*types.BlockHeader{api.Block}) +} + +func (api *FakeIngestionValidatorAPI) GetActorAt(ctx context.Context, key *types.TipSet, a address.Address) (*types.Actor, error) { + if a == api.ActorAddr { + return api.Actor, nil + } + return &types.Actor{ + Balance: abi.NewTokenAmount(0), + }, nil +} + +func (api *FakeIngestionValidatorAPI) AccountView(ts *types.TipSet) (state.AccountView, error) { + return &state.FakeStateView{}, nil +} diff --git a/pkg/consensus/processor.go b/pkg/consensus/processor.go new file mode 100644 index 0000000000..8fc6d64877 --- /dev/null +++ b/pkg/consensus/processor.go @@ -0,0 +1,262 @@ +package consensus + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/fvm" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/cron" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" +) + +var processLog = logging.Logger("process block") + +// ApplicationResult contains the result of successfully applying one message. +// ExecutionError might be set and the message can still be applied successfully. +// See ApplyMessage() for details. +type ApplicationResult struct { + Receipt *types.MessageReceipt + ExecutionError error +} + +// ApplyMessageResult is the result of applying a single message. +type ApplyMessageResult struct { + ApplicationResult // Application-level result, if error is nil. + Failure error // Failure to apply the message + FailureIsPermanent bool // Whether failure is permanent, has no chance of succeeding later. +} + +// DefaultProcessor handles all block processing. +type DefaultProcessor struct { + actors vm.ActorCodeLoader + syscalls vm.SyscallsImpl + circulatingSupplyCalculator chain.ICirculatingSupplyCalcualtor +} + +var _ Processor = (*DefaultProcessor)(nil) + +// NewDefaultProcessor creates a default processor from the given state tree and vms. +func NewDefaultProcessor(syscalls vm.SyscallsImpl, circulatingSupplyCalculator chain.ICirculatingSupplyCalcualtor) *DefaultProcessor { + return NewConfiguredProcessor(*vm.GetDefaultActors(), syscalls, circulatingSupplyCalculator) +} + +// NewConfiguredProcessor creates a default processor with custom validation and rewards. +func NewConfiguredProcessor(actors vm.ActorCodeLoader, syscalls vm.SyscallsImpl, circulatingSupplyCalculator chain.ICirculatingSupplyCalcualtor) *DefaultProcessor { + return &DefaultProcessor{ + actors: actors, + syscalls: syscalls, + circulatingSupplyCalculator: circulatingSupplyCalculator, + } +} + +func (p *DefaultProcessor) ApplyBlocks(ctx context.Context, + blocks []types.BlockMessagesInfo, + ts *types.TipSet, + pstate cid.Cid, + parentEpoch, epoch abi.ChainEpoch, + vmOpts vm.VmOption, + cb vm.ExecCallBack, +) (cid.Cid, []types.MessageReceipt, error) { + toProcessTipset := time.Now() + var receipts []types.MessageReceipt + var err error + + makeVMWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (vm.Interface, error) { + vmOpt := vm.VmOption{ + CircSupplyCalculator: vmOpts.CircSupplyCalculator, + LookbackStateGetter: vmOpts.LookbackStateGetter, + NetworkVersion: vmOpts.Fork.GetNetworkVersion(ctx, e), + Rnd: vmOpts.Rnd, + BaseFee: vmOpts.BaseFee, + Fork: vmOpts.Fork, + ActorCodeLoader: vmOpts.ActorCodeLoader, + Epoch: e, + GasPriceSchedule: vmOpts.GasPriceSchedule, + PRoot: base, + Bsstore: vmOpts.Bsstore, + SysCallsImpl: vmOpts.SysCallsImpl, + Tracing: vmOpts.Tracing, + } + + return fvm.NewVM(ctx, vmOpt) + } + + for i := parentEpoch; i < epoch; i++ { + if i > parentEpoch { + vmCron, err := makeVMWithBaseStateAndEpoch(pstate, i) + if err != nil { + return cid.Undef, nil, fmt.Errorf("making cron vm: %w", err) + } + + // run cron for null rounds if any + cronMessage := makeCronTickMessage() + ret, err := vmCron.ApplyImplicitMessage(ctx, cronMessage) + if err != nil { + return cid.Undef, nil, err + } + pstate, err = vmCron.Flush(ctx) + if err != nil { + return cid.Undef, nil, fmt.Errorf("can not Flush vm State To db %vs", err) + } + if cb != nil { + if err := cb(cid.Undef, cronMessage, ret); err != nil { + return cid.Undef, nil, fmt.Errorf("callback failed on cron message: %w", err) + } + } + } + // handle State forks + // XXX: The State tree + pstate, err = vmOpts.Fork.HandleStateForks(ctx, pstate, i, ts) + if err != nil { + return cid.Undef, nil, fmt.Errorf("hand fork error: %v", err) + } + processLog.Debugf("after fork root: %s\n", pstate) + } + + vm, err := makeVMWithBaseStateAndEpoch(pstate, epoch) + if err != nil { + return cid.Undef, nil, fmt.Errorf("making cron vm: %w", err) + } + + processLog.Debugf("process tipset fork: %v\n", time.Since(toProcessTipset).Milliseconds()) + // create message tracker + // Note: the same message could have been included by more than one miner + seenMsgs := make(map[cid.Cid]struct{}) + + // process messages on each block + for index, blkInfo := range blocks { + toProcessBlock := time.Now() + if blkInfo.Block.Miner.Protocol() != address.ID { + panic("precond failure: block miner address must be an IDAddress") + } + + // initial miner penalty and gas rewards + // Note: certain msg execution failures can cause the miner To pay for the gas + minerPenaltyTotal := big.Zero() + minerGasRewardTotal := big.Zero() + + // Process BLS messages From the block + for _, m := range append(blkInfo.BlsMessages, blkInfo.SecpkMessages...) { + // do not recompute already seen messages + mcid := m.VMMessage().Cid() + if _, found := seenMsgs[mcid]; found { + continue + } + + // apply message + ret, err := vm.ApplyMessage(ctx, m) + if err != nil { + return cid.Undef, nil, fmt.Errorf("execute message error %s : %v", mcid, err) + } + // accumulate result + minerPenaltyTotal = big.Add(minerPenaltyTotal, ret.OutPuts.MinerPenalty) + minerGasRewardTotal = big.Add(minerGasRewardTotal, ret.OutPuts.MinerTip) + receipts = append(receipts, ret.Receipt) + if cb != nil { + if err := cb(mcid, m.VMMessage(), ret); err != nil { + return cid.Undef, nil, err + } + } + // flag msg as seen + seenMsgs[mcid] = struct{}{} + } + // Pay block reward. + // Dragons: missing final protocol design on if/how To determine the nominal power + rewardMessage := makeBlockRewardMessage(blkInfo.Block.Miner, minerPenaltyTotal, minerGasRewardTotal, blkInfo.Block.ElectionProof.WinCount, epoch) + ret, err := vm.ApplyImplicitMessage(ctx, rewardMessage) + if err != nil { + return cid.Undef, nil, err + } + if cb != nil { + if err := cb(cid.Undef, rewardMessage, ret); err != nil { + return cid.Undef, nil, fmt.Errorf("callback failed on reward message: %w", err) + } + } + + if ret.Receipt.ExitCode != 0 { + return cid.Undef, nil, fmt.Errorf("reward application message failed exit: %d, reason: %v", ret.Receipt, ret.ActorErr) + } + + processLog.Debugf("process block %v time %v", index, time.Since(toProcessBlock).Milliseconds()) + } + + // cron tick + toProcessCron := time.Now() + cronMessage := makeCronTickMessage() + + ret, err := vm.ApplyImplicitMessage(ctx, cronMessage) + if err != nil { + return cid.Undef, nil, err + } + if cb != nil { + if err := cb(cid.Undef, cronMessage, ret); err != nil { + return cid.Undef, nil, fmt.Errorf("callback failed on cron message: %w", err) + } + } + + processLog.Debugf("process cron: %v", time.Since(toProcessCron).Milliseconds()) + + root, err := vm.Flush(ctx) + if err != nil { + return cid.Undef, nil, err + } + + // copy to db + return root, receipts, nil +} + +func makeCronTickMessage() *types.Message { + return &types.Message{ + To: cron.Address, + From: builtin.SystemActorAddr, + Value: types.NewInt(0), + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + GasLimit: constants.BlockGasLimit * 10000, // Make super sure this is never too little + Method: cron.Methods.EpochTick, + Params: nil, + } +} + +func makeBlockRewardMessage(blockMiner address.Address, + penalty abi.TokenAmount, + gasReward abi.TokenAmount, + winCount int64, + epoch abi.ChainEpoch, +) *types.Message { + params := &reward.AwardBlockRewardParams{ + Miner: blockMiner, + Penalty: penalty, + GasReward: gasReward, + WinCount: winCount, + } + buf := new(bytes.Buffer) + err := params.MarshalCBOR(buf) + if err != nil { + panic(fmt.Errorf("failed To encode built-in block reward. %s", err)) + } + return &types.Message{ + From: builtin.SystemActorAddr, + To: reward.Address, + Nonce: uint64(epoch), + Value: types.NewInt(0), + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + GasLimit: 1 << 30, + Method: reward.Methods.AwardBlockReward, + Params: buf.Bytes(), + } +} diff --git a/pkg/consensus/proof_verifier.go b/pkg/consensus/proof_verifier.go new file mode 100644 index 0000000000..69980f1cae --- /dev/null +++ b/pkg/consensus/proof_verifier.go @@ -0,0 +1,56 @@ +package consensus + +import ( + "context" + "fmt" + + "go.opencensus.io/trace" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/constants" + crypto2 "github.com/filecoin-project/venus/pkg/crypto" +) + +// Interface to PoSt verification, modify by force EPoStVerifier -> ProofVerifier +type ProofVerifier interface { + VerifySeal(proof7.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) + + GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) +} + +// SignFunc common interface for sign +type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error) + +// VerifyVRF checkout block vrf value +func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error { + _, span := trace.StartSpan(ctx, "VerifyVRF") + defer span.End() + + sig := &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: vrfproof, + } + + if err := crypto2.Verify(sig, worker, vrfBase); err != nil { + return fmt.Errorf("vrf was invalid: %w", err) + } + + return nil +} + +// VerifyElectionPoStVRF verify election post value in block +func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error { + if constants.InsecurePoStValidation { + return nil + } + return VerifyVRF(ctx, worker, rand, evrf) +} diff --git a/pkg/consensus/proof_verifier_mock.go b/pkg/consensus/proof_verifier_mock.go new file mode 100644 index 0000000000..56c5bff261 --- /dev/null +++ b/pkg/consensus/proof_verifier_mock.go @@ -0,0 +1,39 @@ +package consensus + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" +) + +type genFakeVerifier struct{} + +var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) + +func (m genFakeVerifier) VerifySeal(proof7.SealVerifyInfo) (bool, error) { + return true, nil +} + +func (m genFakeVerifier) VerifyAggregateSeals(proof7.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("implement me") +} + +func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { + panic("not supported") +} diff --git a/pkg/consensus/protocol.go b/pkg/consensus/protocol.go new file mode 100644 index 0000000000..678b4f4bfb --- /dev/null +++ b/pkg/consensus/protocol.go @@ -0,0 +1,40 @@ +package consensus + +// This interface is (mostly) stateless. All of its methods are +// pure functions that only depend on their inputs. + +// Note: state does creep in through the cbor and block stores used to keep state tree and +// actor storage data in the Expected implementation. However those stores +// are global to the filecoin node so accessing the correct state is simple. +// Furthermore these stores are providing content addressed values. +// The output of these interface functions does not change based on the store state +// except for errors in the case the stores do not have a mapping. +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +// Protocol is an interface defining a blockchain consensus protocol. The +// methods here were arrived at after significant work fitting consensus into +// the system and the implementation level. The method set is not necessarily +// the most theoretically obvious or pleasing and should not be considered +// finalized. +/* +type Protocol interface { + StateTransformer + // Call compute message result of specify message + Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*vm.Ret, error) + + // CallWithGas compute message result of specify message base on messages in mpool + CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet) (*vm.Ret, error) +} +*/ + +type StateTransformer interface { + // RunStateTransition returns the state root CID resulting from applying the input ts to the + // prior `stateID`. It returns an error if the transition is invalid. + // RunStateTransition(ctx context.Context, ts *types.TipSet) (root cid.Cid, receipt cid.Cid, err error) + RunStateTransition(ctx context.Context, ts *types.TipSet) (root cid.Cid, receipt cid.Cid, err error) +} diff --git a/pkg/consensus/testing.go b/pkg/consensus/testing.go new file mode 100644 index 0000000000..45ba1eaa24 --- /dev/null +++ b/pkg/consensus/testing.go @@ -0,0 +1,138 @@ +package consensus + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + acrypto "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// RequireNewTipSet instantiates and returns a new tipset of the given blocks +// and requires that the setup validation succeed. +func RequireNewTipSet(require *require.Assertions, blks ...*types.BlockHeader) *types.TipSet { + ts, err := types.NewTipSet(blks) + require.NoError(err) + return ts +} + +// FakeConsensusStateViewer is a fake power state viewer. +type FakeConsensusStateViewer struct { + Views map[cid.Cid]*state.FakeStateView +} + +// PowerStateView returns the state view for a root. +func (f *FakeConsensusStateViewer) PowerStateView(root cid.Cid) state.PowerStateView { + return f.Views[root] +} + +// FaultStateView returns the state view for a root. +func (f *FakeConsensusStateViewer) FaultStateView(root cid.Cid) state.FaultStateView { + return f.Views[root] +} + +// FakeMessageValidator is a validator that doesn't validate to simplify message creation in tests. +type FakeMessageValidator struct{} + +func (mv *FakeMessageValidator) ValidateSignedMessageSyntax(ctx context.Context, smsg *types.SignedMessage) error { + return nil +} + +func (mv *FakeMessageValidator) ValidateUnsignedMessageSyntax(ctx context.Context, msg *types.Message) error { + return nil +} + +// FakeTicketMachine generates fake tickets and verifies all tickets +type FakeTicketMachine struct{} + +// MakeTicket returns a fake ticket +func (ftm *FakeTicketMachine) MakeTicket(ctx context.Context, base types.TipSetKey, epoch abi.ChainEpoch, miner address.Address, entry *types.BeaconEntry, newPeriod bool, worker address.Address, signer types.Signer) (types.Ticket, error) { + return *MakeFakeTicketForTest(), nil +} + +// IsValidTicket always returns true +func (ftm *FakeTicketMachine) IsValidTicket(ctx context.Context, base types.TipSetKey, entry *types.BeaconEntry, newPeriod bool, + epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket types.Ticket, +) error { + return nil +} + +// FailingTicketValidator marks all tickets as invalid +type FailingTicketValidator struct{} + +// IsValidTicket always returns false +func (ftv *FailingTicketValidator) IsValidTicket(ctx context.Context, base types.TipSetKey, entry *types.BeaconEntry, newPeriod bool, + epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket types.Ticket, +) error { + return fmt.Errorf("invalid ticket") +} + +// MakeFakeTicketForTest creates a fake ticket +func MakeFakeTicketForTest() *types.Ticket { + val := make([]byte, 65) + val[0] = 200 + return &types.Ticket{ + VRFProof: val[:], + } +} + +// MakeFakeVRFProofForTest creates a fake election proof +func MakeFakeVRFProofForTest() []byte { + proof := make([]byte, 65) + proof[0] = 42 + return proof +} + +// MakeFakePoStForTest creates a fake post +//func MakeFakePoStsForTest() []block.PoStProof { +// return []block.PoStProof{{ +// RegisteredProof: constants.DevRegisteredWinningPoStProof, +// ProofBytes: []byte{0xe}, +// }} +//} +// +//// NFakeSectorInfos returns numSectors fake sector infos +//func RequireFakeSectorInfos(t *testing.T, numSectors uint64) []abi.SectorInfo { +// var infos []abi.SectorInfo +// for i := uint64(0); i < numSectors; i++ { +// infos = append(infos, abi.SectorInfo{ +// RegisteredProof: constants.DevRegisteredSealProof, +// SectorNumber: abi.SectorNumber(i), +// SealedCID: types.CidFromString(t, fmt.Sprintf("fake-sector-%d", i)), +// }) +// } +// +// return infos +//} + +///// Sampler ///// + +// FakeChainRandomness generates deterministic values that are a function of a seed and the provided +// tag, epoch, and entropy (but *not* the Chain Head key). +type FakeChainRandomness struct { + Seed uint +} + +func (s *FakeChainRandomness) GetChainRandomness(ctx context.Context, tsk types.TipSetKey, pers acrypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, _ bool) ([]byte, error) { + return []byte(fmt.Sprintf("s=%d,e=%d,t=%d,p=%s", s.Seed, round, pers, string(entropy))), nil +} + +func (s *FakeChainRandomness) GetBeaconRandomness(ctx context.Context, tsk types.TipSetKey, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, _ bool) (abi.Randomness, error) { + return []byte(""), nil +} + +type FakeSampler struct { + Seed uint +} + +func (s *FakeSampler) SampleTicket(_ context.Context, _ types.TipSetKey, epoch abi.ChainEpoch, _ bool) (types.Ticket, error) { + return types.Ticket{ + VRFProof: []byte(fmt.Sprintf("s=%d,e=%d", s.Seed, epoch)), + }, nil +} diff --git a/pkg/consensus/testing_poster.go b/pkg/consensus/testing_poster.go new file mode 100644 index 0000000000..37e272d89c --- /dev/null +++ b/pkg/consensus/testing_poster.go @@ -0,0 +1,28 @@ +package consensus + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" +) + +// TestElectionPoster generates and verifies electoin PoSts +type TestElectionPoster struct{} + +//var _ EPoStVerifier = new(TestElectionPoster) +//var _ postgenerator.PoStGenerator = new(TestElectionPoster) +// + +func (ep *TestElectionPoster) GenerateWinningPoSt(ctx context.Context, + minerID abi.ActorID, + sectorInfo []builtin.SectorInfo, + randomness abi.PoStRandomness, +) ([]builtin.PoStProof, error) { + return []builtin.PoStProof{{ + PoStProof: constants.DevRegisteredWinningPoStProof, + ProofBytes: []byte{0xe}, + }}, nil +} diff --git a/pkg/consensus/ticket.go b/pkg/consensus/ticket.go new file mode 100644 index 0000000000..b45a8e18fc --- /dev/null +++ b/pkg/consensus/ticket.go @@ -0,0 +1,87 @@ +package consensus + +import ( + "bytes" + "context" + + "github.com/filecoin-project/venus/pkg/chain" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + acrypto "github.com/filecoin-project/go-state-types/crypto" + "github.com/minio/blake2b-simd" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type ChainSampler interface { + SampleTicket(ctx context.Context, head types.TipSetKey, epoch abi.ChainEpoch, lookback bool) (types.Ticket, error) +} + +type tipsetLoader interface { + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +// TicketMachine uses a VRF and VDF to generate deterministic, unpredictable +// and time delayed tickets and validates these tickets. +type TicketMachine struct { + tipsetLoader tipsetLoader +} + +func NewTicketMachine(tipsetLoader tipsetLoader) *TicketMachine { + return &TicketMachine{tipsetLoader: tipsetLoader} +} + +// MakeTicket creates a new ticket from a Chain and target epoch by running a verifiable +// randomness function on the prior ticket. +func (tm TicketMachine) MakeTicket(ctx context.Context, base types.TipSetKey, epoch abi.ChainEpoch, miner address.Address, entry *types.BeaconEntry, newPeriod bool, worker address.Address, signer types.Signer) (types.Ticket, error) { + randomness, err := tm.ticketVRFRandomness(ctx, base, entry, newPeriod, miner, epoch) + if err != nil { + return types.Ticket{}, errors.Wrap(err, "failed to generate ticket randomness") + } + vrfProof, err := signer.SignBytes(ctx, randomness, worker) + if err != nil { + return types.Ticket{}, errors.Wrap(err, "failed to sign election post randomness") + } + return types.Ticket{ + VRFProof: vrfProof.Data, + }, nil +} + +// IsValidTicket verifies that the ticket's proof of randomness is valid with respect to its parent. +func (tm TicketMachine) IsValidTicket(ctx context.Context, base types.TipSetKey, entry *types.BeaconEntry, bSmokeHeight bool, + epoch abi.ChainEpoch, miner address.Address, workerSigner address.Address, ticket types.Ticket, +) error { + randomness, err := tm.ticketVRFRandomness(ctx, base, entry, bSmokeHeight, miner, epoch) + if err != nil { + return errors.Wrap(err, "failed to generate ticket randomness") + } + + return crypto.Verify(&crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: ticket.VRFProof, + }, workerSigner, randomness) +} + +func (tm TicketMachine) ticketVRFRandomness(ctx context.Context, base types.TipSetKey, entry *types.BeaconEntry, bSmokeHeight bool, miner address.Address, epoch abi.ChainEpoch) (abi.Randomness, error) { + entropyBuf := new(bytes.Buffer) + err := miner.MarshalCBOR(entropyBuf) + if err != nil { + return nil, errors.Wrapf(err, "failed to encode miner entropy") + } + + if bSmokeHeight { // todo + ts, err := tm.tipsetLoader.GetTipSet(ctx, base) + if err != nil { + return nil, err + } + _, err = entropyBuf.Write(ts.MinTicket().VRFProof) + if err != nil { + return nil, err + } + } + seed := blake2b.Sum256(entry.Data) + return chain.BlendEntropy(acrypto.DomainSeparationTag_TicketProduction, seed[:], epoch, entropyBuf.Bytes()) +} diff --git a/pkg/consensus/ticket_test.go b/pkg/consensus/ticket_test.go new file mode 100644 index 0000000000..3e35bad873 --- /dev/null +++ b/pkg/consensus/ticket_test.go @@ -0,0 +1,129 @@ +// stm: #unit +package consensus_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" + + fbig "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/crypto" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/filecoin-project/venus/pkg/consensus" +) + +func TestGenValidTicketChain(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + head, _ := types.NewTipSet([]*types.BlockHeader{mockBlock()}) // Tipset key is unused by fake randomness + loader := newMockTipsetLoader(head) + + // Interleave 3 signers + kis := testhelpers.MustGenerateBLSKeyInfo(3, 0) + + miner, err := address.NewIDAddress(uint64(1)) + require.NoError(t, err) + signer := testhelpers.NewMockSigner(kis) + addr1 := requireAddress(t, &kis[0]) + addr2 := requireAddress(t, &kis[1]) + addr3 := requireAddress(t, &kis[2]) + + schedule := struct { + Addrs []address.Address + }{ + Addrs: []address.Address{addr1, addr1, addr1, addr2, addr3, addr3, addr1, addr2}, + } + + tm := consensus.NewTicketMachine(loader) + + // Grow the specified ticket Chain without error + for i := 0; i < len(schedule.Addrs); i++ { + // stm: @CONSENSUS_TICKET_MAKE_001, @CONSENSUS_TICKET_IS_VALID_001 + requireValidTicket(ctx, t, tm, head.Key(), abi.ChainEpoch(i), miner, schedule.Addrs[i], signer) + } +} + +func requireValidTicket(ctx context.Context, t *testing.T, tm *consensus.TicketMachine, head types.TipSetKey, epoch abi.ChainEpoch, + miner, worker address.Address, signer types.Signer, +) { + electionEntry := &types.BeaconEntry{} + newPeriod := false + ticket, err := tm.MakeTicket(ctx, head, epoch, miner, electionEntry, newPeriod, worker, signer) + require.NoError(t, err) + + err = tm.IsValidTicket(ctx, head, electionEntry, newPeriod, epoch, miner, worker, ticket) + require.NoError(t, err) +} + +func TestNextTicketFailsWithInvalidSigner(t *testing.T) { + ctx := context.Background() + head, _ := types.NewTipSet([]*types.BlockHeader{mockBlock()}) // Tipset key is unused by fake randomness + loader := newMockTipsetLoader(head) + miner, err := address.NewIDAddress(uint64(1)) + require.NoError(t, err) + + signer, _ := testhelpers.NewMockSignersAndKeyInfo(1) + badAddr := testhelpers.RequireIDAddress(t, 100) + tm := consensus.NewTicketMachine(loader) + electionEntry := &types.BeaconEntry{} + newPeriod := false + badTicket, err := tm.MakeTicket(ctx, head.Key(), abi.ChainEpoch(1), miner, electionEntry, newPeriod, badAddr, signer) + assert.Error(t, err) + assert.Nil(t, badTicket.VRFProof) +} + +func requireAddress(t *testing.T, ki *crypto.KeyInfo) address.Address { + addr, err := ki.Address() + require.NoError(t, err) + return addr +} + +func mockBlock() *types.BlockHeader { + mockCid, _ := constants.DefaultCidBuilder.Sum([]byte("mock")) + return &types.BlockHeader{ + Miner: testhelpers.NewForTestGetter()(), + Ticket: &types.Ticket{VRFProof: []byte{0x01, 0x02, 0x03}}, + ElectionProof: &types.ElectionProof{VRFProof: []byte{0x0a, 0x0b}}, + BeaconEntries: []types.BeaconEntry{ + { + Round: 5, + Data: []byte{0x0c}, + }, + }, + Height: 2, + ParentWeight: fbig.NewInt(1000), + ForkSignaling: 3, + Timestamp: 1, + ParentBaseFee: abi.NewTokenAmount(10), + BlockSig: &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte{0x3}, + }, + ParentStateRoot: mockCid, + ParentMessageReceipts: mockCid, + Messages: mockCid, + } +} + +type mockTipsetLoader struct { + tsk *types.TipSet +} + +func newMockTipsetLoader(tsk *types.TipSet) *mockTipsetLoader { + return &mockTipsetLoader{tsk: tsk} +} + +func (m *mockTipsetLoader) GetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return m.tsk, nil +} diff --git a/pkg/consensus/weight_test.go b/pkg/consensus/weight_test.go new file mode 100644 index 0000000000..b9dd615ea8 --- /dev/null +++ b/pkg/consensus/weight_test.go @@ -0,0 +1,163 @@ +// stm: #unit +package consensus_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + fbig "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/consensus" + appstate "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/state/tree" +) + +func TestWeight(t *testing.T) { + cst := cbor.NewMemCborStore() + ctx := context.Background() + fakeTree := tree.NewFromString(t, "test-Weight-StateCid", cst) + fakeRoot, err := fakeTree.Flush(ctx) + require.NoError(t, err) + addrGetter := testhelpers.NewForTestGetter() + minerAddr := addrGetter() + // We only care about total power for the weight function + // Total is 16, so bitlen is 5, log2b is 4 + viewer := makeStateViewer(fakeRoot, abi.NewStoragePower(16)) + ticket := consensus.MakeFakeTicketForTest() + toWeigh := testhelpers.RequireNewTipSet(t, &types.BlockHeader{ + Miner: minerAddr, + ParentWeight: fbig.Zero(), + Ticket: ticket, + ElectionProof: &types.ElectionProof{ + WinCount: 1, + }, + ParentStateRoot: fakeRoot, + Messages: testhelpers.EmptyMessagesCID, + ParentMessageReceipts: testhelpers.EmptyReceiptsCID, + }) + + sel := consensus.NewChainSelector(cst, &viewer) + // sel := consensus.NewChainSelector(cst, &viewer, types.CidFromString(t, "genesisCid")) + + t.Run("basic happy path", func(t *testing.T) { + // 0 + (4*256 + (4*1*1*256/5*2)) + // 1024 + 102 = 1126 + // stm: @CONSENSUS_CHAIN_SELECTOR_WEIGHT_001 + w, err := sel.Weight(ctx, toWeigh) + // w, err := sel.Weight(ctx, toWeigh, fakeRoot) + assert.NoError(t, err) + assert.Equal(t, fbig.NewInt(1126), w) + }) + + t.Run("total power adjusts as expected", func(t *testing.T) { + asLowerX := makeStateViewer(fakeRoot, abi.NewStoragePower(15)) + asSameX := makeStateViewer(fakeRoot, abi.NewStoragePower(31)) + asHigherX := makeStateViewer(fakeRoot, abi.NewStoragePower(32)) + + // 0 + (3*256) + (3*1*1*256/2*5) = 844 (truncating not rounding division) + selLower := consensus.NewChainSelector(cst, &asLowerX) + fixWeight, err := selLower.Weight(ctx, toWeigh) + assert.NoError(t, err) + assert.Equal(t, fbig.NewInt(844), fixWeight) + + // Weight is same when total bytes = 16 as when total bytes = 31 + selSame := consensus.NewChainSelector(cst, &asSameX) + fixWeight, err = selSame.Weight(ctx, toWeigh) + assert.NoError(t, err) + assert.Equal(t, fbig.NewInt(1126), fixWeight) + + // 0 + (5*256) + (5*1*1*256/2*5) = 1408 + selHigher := consensus.NewChainSelector(cst, &asHigherX) + fixWeight, err = selHigher.Weight(ctx, toWeigh) + assert.NoError(t, err) + assert.Equal(t, fbig.NewInt(1408), fixWeight) + }) + + t.Run("non-zero parent weight", func(t *testing.T) { + parentWeight := fbig.NewInt(int64(49)) + toWeighWithParent := testhelpers.RequireNewTipSet(t, &types.BlockHeader{ + Miner: minerAddr, + ParentWeight: parentWeight, + Ticket: ticket, + ElectionProof: &types.ElectionProof{ + WinCount: 1, + }, + ParentStateRoot: fakeRoot, + Messages: testhelpers.EmptyMessagesCID, + ParentMessageReceipts: testhelpers.EmptyReceiptsCID, + }) + + // 49 + (4*256) + (4*1*1*256/2*5) = 1175 + w, err := sel.Weight(ctx, toWeighWithParent) + assert.NoError(t, err) + assert.Equal(t, fbig.NewInt(1175), w) + }) + + t.Run("many blocks", func(t *testing.T) { + toWeighThreeBlock := testhelpers.RequireNewTipSet(t, + &types.BlockHeader{ + Miner: minerAddr, + ParentWeight: fbig.Zero(), + Ticket: ticket, + Timestamp: 0, + ElectionProof: &types.ElectionProof{ + WinCount: 1, + }, + ParentStateRoot: fakeRoot, + Messages: testhelpers.EmptyMessagesCID, + ParentMessageReceipts: testhelpers.EmptyReceiptsCID, + }, + &types.BlockHeader{ + Miner: minerAddr, + ParentWeight: fbig.Zero(), + Ticket: ticket, + Timestamp: 1, + ElectionProof: &types.ElectionProof{ + WinCount: 1, + }, + ParentStateRoot: fakeRoot, + Messages: testhelpers.EmptyMessagesCID, + ParentMessageReceipts: testhelpers.EmptyReceiptsCID, + }, + &types.BlockHeader{ + Miner: minerAddr, + ParentWeight: fbig.Zero(), + Ticket: ticket, + Timestamp: 2, + ElectionProof: &types.ElectionProof{ + WinCount: 1, + }, + ParentStateRoot: fakeRoot, + Messages: testhelpers.EmptyMessagesCID, + ParentMessageReceipts: testhelpers.EmptyReceiptsCID, + }, + ) + // 0 + (4*256) + (4*3*1*256/2*5) = 1331 + w, err := sel.Weight(ctx, toWeighThreeBlock) + assert.NoError(t, err) + assert.Equal(t, fbig.NewInt(1331), w) + + // stm: @CONSENSUS_CHAIN_SELECTOR_WEIGHT_001 + toWeighTwoBlock := testhelpers.RequireNewTipSet(t, toWeighThreeBlock.At(0), toWeighThreeBlock.At(1)) + isHeavier, err := sel.IsHeavier(ctx, toWeighThreeBlock, toWeighTwoBlock) + assert.NoError(t, err) + assert.True(t, isHeavier) + }) +} + +func makeStateViewer(stateRoot cid.Cid, networkPower abi.StoragePower) consensus.FakeConsensusStateViewer { + return consensus.FakeConsensusStateViewer{ + Views: map[cid.Cid]*appstate.FakeStateView{ + stateRoot: appstate.NewFakeStateView(networkPower, networkPower, 0, 0), + }, + } +} diff --git a/pkg/consensusfault/check.go b/pkg/consensusfault/check.go new file mode 100644 index 0000000000..674cf5d105 --- /dev/null +++ b/pkg/consensusfault/check.go @@ -0,0 +1,186 @@ +package consensusfault + +import ( + "bytes" + "context" + "fmt" + + cbornode "github.com/ipfs/go-ipld-cbor" + "github.com/pkg/errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type FaultStateView interface { + ResolveToKeyAddr(ctx context.Context, address address.Address) (address.Address, error) + MinerInfo(ctx context.Context, maddr address.Address, nv network.Version) (*miner.MinerInfo, error) +} + +// Chain state required for checking consensus fault reports. +type chainReader interface { + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +// Checks the validity of reported consensus faults. +type ConsensusFaultChecker struct { //nolint + chain chainReader + fork fork.IFork +} + +func NewFaultChecker(chain chainReader, fork fork.IFork) *ConsensusFaultChecker { + return &ConsensusFaultChecker{chain: chain, fork: fork} +} + +// Checks validity of the submitted consensus fault with the two block headers needed to prove the fault +// and an optional extra one to check common ancestry (as needed). +// Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). +func (s *ConsensusFaultChecker) VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, curEpoch abi.ChainEpoch, msg vm.VmMessage, gasIpld cbornode.IpldStore, view vm.SyscallsStateView, getter vmcontext.LookbackStateGetter) (*runtime7.ConsensusFault, error) { + if bytes.Equal(h1, h2) { + return nil, fmt.Errorf("no consensus fault: blocks identical") + } + + var b1, b2, b3 types.BlockHeader + innerErr := b1.UnmarshalCBOR(bytes.NewReader(h1)) + if innerErr != nil { + return nil, errors.Wrapf(innerErr, "failed to decode h1") + } + innerErr = b2.UnmarshalCBOR(bytes.NewReader(h2)) + if innerErr != nil { + return nil, errors.Wrapf(innerErr, "failed to decode h2") + } + + // workaround chain halt + forkUpgrade := s.fork.GetForkUpgrade() + if config.IsNearUpgrade(b1.Height, forkUpgrade.UpgradeOrangeHeight) { + return nil, fmt.Errorf("consensus reporting disabled around Upgrade Orange") + } + if config.IsNearUpgrade(b2.Height, forkUpgrade.UpgradeOrangeHeight) { + return nil, fmt.Errorf("consensus reporting disabled around Upgrade Orange") + } + + // BlockHeader syntax is not validated. This implements the strictest check possible, and is also the simplest check + // possible. + // This means that blocks that could never have been included in the chain (e.g. with an empty parent state) + // are still fault-able. + + if b1.Miner != b2.Miner { + return nil, fmt.Errorf("no consensus fault: miners differ") + } + if b1.Height > b2.Height { + return nil, fmt.Errorf("no consensus fault: first block is higher than second") + } + + // Check the basic fault conditions first, defer the (expensive) signature and chain history check until last. + var fault *runtime7.ConsensusFault + + // Double-fork mining fault: two blocks at the same epoch. + // It is not necessary to present a common ancestor of the blocks. + if b1.Height == b2.Height { + fault = &runtime7.ConsensusFault{ + Target: b1.Miner, + Epoch: b2.Height, + Type: runtime7.ConsensusFaultDoubleForkMining, + } + } + // Time-offset mining fault: two blocks with the same parent but different epochs. + // The curEpoch check is redundant at time of writing, but included for robustness to future changes to this method. + // The blocks have a common ancestor by definition (the parent). + b1PKey := types.NewTipSetKey(b1.Parents...) + b2PKey := types.NewTipSetKey(b2.Parents...) + if b1PKey.Equals(b2PKey) && b1.Height != b2.Height { + fault = &runtime7.ConsensusFault{ + Target: b1.Miner, + Epoch: b2.Height, + Type: runtime7.ConsensusFaultTimeOffsetMining, + } + } + // Parent-grinding fault: one block’s parent is a tipset that provably should have included some block but does not. + // The provable case is that two blocks are mined and the later one does not include the + // earlier one as a parent even though it could have. + // B3 must prove that the higher block (B2) could have been included in B1's tipset. + if len(extra) > 0 { + innerErr = b3.UnmarshalCBOR(bytes.NewReader(extra)) + if innerErr != nil { + return nil, errors.Wrapf(innerErr, "failed to decode extra") + } + b3PKey := types.NewTipSetKey(b3.Parents...) + if b1.Height == b3.Height && b3PKey.Equals(b1PKey) && !b2PKey.Has(b1.Cid()) && b2PKey.Has(b3.Cid()) { + fault = &runtime7.ConsensusFault{ + Target: b1.Miner, + Epoch: b2.Height, + Type: runtime7.ConsensusFaultParentGrinding, + } + } + } + + if fault == nil { + return nil, fmt.Errorf("no consensus fault: blocks are ok") + } + + // Expensive validation: signatures. + b1Version := s.fork.GetNetworkVersion(ctx, b1.Height) + err := verifyBlockSignature(ctx, b1, b1Version, curEpoch, msg.To, gasIpld, view, getter) + if err != nil { + return nil, err + } + b2Version := s.fork.GetNetworkVersion(ctx, b2.Height) + err = verifyBlockSignature(ctx, b2, b2Version, curEpoch, msg.To, gasIpld, view, getter) + if err != nil { + return nil, err + } + + return fault, nil +} + +// Checks whether a block header is correctly signed in the context of the parent state to which it refers. +func verifyBlockSignature(ctx context.Context, blk types.BlockHeader, nv network.Version, curEpoch abi.ChainEpoch, receiver address.Address, gasIpld cbornode.IpldStore, view FaultStateView, getter vmcontext.LookbackStateGetter) error { + if nv >= network.Version7 && blk.Height < curEpoch-policy.ChainFinality { + return fmt.Errorf("cannot get worker key (currEpoch %d, height %d)", curEpoch, blk.Height) + } + + lbstate, err := getter(ctx, blk.Height) + if err != nil { + return fmt.Errorf("fialed to look back state at height %d", blk.Height) + } + + act, err := lbstate.LoadActor(ctx, receiver) + if err != nil { + return errors.Wrapf(err, "failed to get miner actor") + } + + mas, err := miner.Load(adt.WrapStore(ctx, gasIpld), act) + if err != nil { + return fmt.Errorf("failed to load state for miner %s", receiver) + } + + info, err := mas.Info() + if err != nil { + return fmt.Errorf("failed to get miner info for miner %s", receiver) + } + + if blk.BlockSig == nil { + return errors.Errorf("no consensus fault: block %s has nil signature", blk.Cid()) + } + + sd, err := blk.SignatureData() + if err != nil { + return err + } + err = state.NewSignatureValidator(view).ValidateSignature(ctx, sd, info.Worker, *blk.BlockSig) + if err != nil { + return errors.Wrapf(err, "no consensus fault: block %s signature invalid", blk.Cid()) + } + return err +} diff --git a/pkg/constants/chain_parameters.go b/pkg/constants/chain_parameters.go new file mode 100644 index 0000000000..339fd8ab04 --- /dev/null +++ b/pkg/constants/chain_parameters.go @@ -0,0 +1,43 @@ +package constants + +import ( + "math" + + "github.com/filecoin-project/go-state-types/abi" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" +) + +const ( + DefaultConfidence = uint64(5) + DefaultMessageWaitLookback = abi.ChainEpoch(100) // in most cases, this should be enough to avoid races. + LookbackNoLimit = abi.ChainEpoch(-1) +) + +const BlockMessageLimit = 10000 + +// Epochs +const TicketRandomnessLookback = abi.ChainEpoch(1) + +// expect blocks number in a tipset +var ExpectedLeadersPerEpoch = builtin0.ExpectedLeadersPerEpoch + +// BlockGasLimit is the maximum amount of gas that can be used to execute messages in a single block. +const ( + BlockGasLimit = 10_000_000_000 + BlockGasTarget = BlockGasLimit / 2 + BaseFeeMaxChangeDenom = 8 // 12.5% + InitialBaseFee = 100e6 + MinimumBaseFee = 100 + PackingEfficiencyNum = 4 + PackingEfficiencyDenom = 5 +) + +const MainNetBlockDelaySecs = uint64(builtin0.EpochDurationSeconds) + +// todo move this value to config +var InsecurePoStValidation = false + +const ( + NoTimeout = math.MaxInt64 + NoHeight = abi.ChainEpoch(-1) +) diff --git a/pkg/constants/clock.go b/pkg/constants/clock.go new file mode 100644 index 0000000000..11c072a4ac --- /dev/null +++ b/pkg/constants/clock.go @@ -0,0 +1,11 @@ +package constants + +import "github.com/raulk/clock" + +// Clock is the global clock for the system. In standard builds, +// we use a real-time clock, which maps to the `time` package. +// +// Tests that need control of time can replace this variable with +// clock.NewMock(). Always use real time for socket/stream deadlines. +// todo move this clock to clock package. constant package should refer other as little as possible +var Clock = clock.New() diff --git a/pkg/constants/common.go b/pkg/constants/common.go new file mode 100644 index 0000000000..d45a3bec1b --- /dev/null +++ b/pkg/constants/common.go @@ -0,0 +1,3 @@ +package constants + +const StringEmpty = "" diff --git a/internal/pkg/constants/hash.go b/pkg/constants/hash.go similarity index 100% rename from internal/pkg/constants/hash.go rename to pkg/constants/hash.go diff --git a/pkg/constants/registered_proofs.go b/pkg/constants/registered_proofs.go new file mode 100644 index 0000000000..bad6364610 --- /dev/null +++ b/pkg/constants/registered_proofs.go @@ -0,0 +1,11 @@ +package constants + +import "github.com/filecoin-project/go-state-types/abi" + +// just for test +var DevRegisteredSealProof = abi.RegisteredSealProof_StackedDrg2KiBV1 + +var ( + DevRegisteredWinningPoStProof = abi.RegisteredPoStProof_StackedDrgWinning2KiBV1 + DevRegisteredWindowPoStProof = abi.RegisteredPoStProof_StackedDrgWindow2KiBV1 +) diff --git a/internal/pkg/constants/sector_size.go b/pkg/constants/sector_size.go similarity index 81% rename from internal/pkg/constants/sector_size.go rename to pkg/constants/sector_size.go index f2e03d6e44..ecca0ec64e 100644 --- a/internal/pkg/constants/sector_size.go +++ b/pkg/constants/sector_size.go @@ -1,8 +1,8 @@ package constants -import "github.com/filecoin-project/specs-actors/actors/abi" +import "github.com/filecoin-project/go-state-types/abi" -const DevSealProofType = abi.RegisteredProof_StackedDRG2KiBSeal +const DevSealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1 // DevSectorSize is a tiny sector useful only for testing. var DevSectorSize abi.SectorSize diff --git a/pkg/constants/shared_vals.go b/pkg/constants/shared_vals.go new file mode 100644 index 0000000000..e4ba095e6b --- /dev/null +++ b/pkg/constants/shared_vals.go @@ -0,0 +1,77 @@ +package constants + +import ( + "math/big" + + "github.com/filecoin-project/venus/venus-shared/actors/policy" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/network" +) + +// ///// +// Consensus / Network + +const AllowableClockDriftSecs = uint64(1) + +/* inline-gen template + +const TestNetworkVersion = network.Version{{.latestNetworkVersion}} + +/* inline-gen start */ + +const TestNetworkVersion = network.Version17 + +/* inline-gen end */ + +// constants for Weight calculation +// The ratio of weight contributed by short-term vs long-term factors in a given round +const ( + WRatioNum = int64(1) + WRatioDen = uint64(2) +) + +const ( + FilBase = uint64(2_000_000_000) + FilAllocStorageMining = uint64(1_100_000_000) +) + +const ( + FilecoinPrecision = uint64(1_000_000_000_000_000_000) + FilReserved = uint64(300_000_000) +) + +var ( + InitialRewardBalance *big.Int + InitialFilReserved *big.Int +) + +func SetAddressNetwork(n address.Network) { + address.CurrentNetwork = n +} + +func init() { + InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) + InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) + + InitialFilReserved = big.NewInt(int64(FilReserved)) + InitialFilReserved = InitialFilReserved.Mul(InitialFilReserved, big.NewInt(int64(FilecoinPrecision))) +} + +// assuming 4000 messages per round, this lets us not lose any messages across a +// 10 block reorg. +const BlsSignatureCacheSize = 40000 + +// Epochs +const ForkLengthThreshold = Finality + +// Size of signature verification cache +// 32k keeps the cache around 10MB in size, max +const ( + VerifSigCacheSize = 32000 + Finality = policy.ChainFinality +) + +// Epochs +const MessageConfidence = uint64(5) diff --git a/pkg/constants/version.go b/pkg/constants/version.go new file mode 100644 index 0000000000..e655e5c95b --- /dev/null +++ b/pkg/constants/version.go @@ -0,0 +1,19 @@ +package constants + +import ( + "os" +) + +// BuildVersion is the local build version, set by build system +const BuildVersion = "1.9.0" + +var CurrentCommit string + +// software version +func UserVersion() string { + if os.Getenv("VENUS_VERSION_IGNORE_COMMIT") == "1" { + return BuildVersion + } + + return BuildVersion + CurrentCommit +} diff --git a/pkg/crypto/bls/bls_bench_test.go b/pkg/crypto/bls/bls_bench_test.go new file mode 100644 index 0000000000..9bea06df35 --- /dev/null +++ b/pkg/crypto/bls/bls_bench_test.go @@ -0,0 +1,43 @@ +package bls + +import ( + "crypto/rand" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/filecoin-project/go-address" +) + +func BenchmarkBLSSign(b *testing.B) { + tf.BenchUnitTest(b) + signer := blsSigner{} + for i := 0; i < b.N; i++ { + b.StopTimer() + pk, _ := signer.GenPrivate() + randMsg := make([]byte, 32) + _, _ = rand.Read(randMsg) + b.StartTimer() + + _, _ = signer.Sign(pk, randMsg) + } +} + +func BenchmarkBLSVerify(b *testing.B) { + tf.BenchUnitTest(b) + signer := blsSigner{} + for i := 0; i < b.N; i++ { + b.StopTimer() + randMsg := make([]byte, 32) + _, _ = rand.Read(randMsg) + + priv, _ := signer.GenPrivate() + pk, _ := signer.ToPublic(priv) + addr, _ := address.NewBLSAddress(pk) + sig, _ := signer.Sign(priv, randMsg) + + b.StartTimer() + + _ = signer.Verify(sig, addr, randMsg) + } +} diff --git a/pkg/crypto/bls/init.go b/pkg/crypto/bls/init.go new file mode 100644 index 0000000000..ee320792f4 --- /dev/null +++ b/pkg/crypto/bls/init.go @@ -0,0 +1,120 @@ +package bls + +import ( + "crypto/rand" + "fmt" + "io" + + crypto2 "github.com/filecoin-project/venus/pkg/crypto" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + ffi "github.com/filecoin-project/filecoin-ffi" +) + +const DST = string("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_") + +type ( + SecretKey = ffi.PrivateKey + PublicKey = ffi.PublicKey + Signature = ffi.Signature + AggregateSignature = ffi.Signature +) + +type blsSigner struct{} + +func (s blsSigner) VerifyAggregate(pubKeys, msgs [][]byte, signature []byte) bool { + digests := []ffi.Digest{} + for _, msg := range msgs { + digests = append(digests, ffi.Hash(msg)) + } + + keys := []ffi.PublicKey{} + for _, pubKey := range pubKeys { + var blsPubKey ffi.PublicKey + copy(blsPubKey[:], pubKey) + keys = append(keys, blsPubKey) + } + + var blsSig ffi.Signature + copy(blsSig[:], signature) + return ffi.Verify(&blsSig, digests, keys) +} + +func (blsSigner) GenPrivate() ([]byte, error) { + // Generate 32 bytes of randomness + var ikm [32]byte + _, err := rand.Read(ikm[:]) + if err != nil { + return nil, fmt.Errorf("bls signature error generating random data") + } + // Note private keys seem to be serialized little-endian! + sk := ffi.PrivateKeyGenerateWithSeed(ikm) + return sk[:], nil +} + +func (blsSigner) GenPrivateFromSeed(seed io.Reader) ([]byte, error) { + var seedBytes ffi.PrivateKeyGenSeed + read, err := seed.Read(seedBytes[:]) + if err != nil { + return nil, err + } + if read != len(seedBytes) { + return nil, fmt.Errorf("read only %d bytes of %d required from seed", read, len(seedBytes)) + } + priKey := ffi.PrivateKeyGenerateWithSeed(seedBytes) + return priKey[:], nil +} + +func (blsSigner) ToPublic(priv []byte) ([]byte, error) { + if priv == nil || len(priv) != ffi.PrivateKeyBytes { + return nil, fmt.Errorf("bls signature invalid private key") + } + + sk := new(SecretKey) + copy(sk[:], priv[:ffi.PrivateKeyBytes]) + + pubkey := ffi.PrivateKeyPublicKey(*sk) + + return pubkey[:], nil +} + +func (blsSigner) Sign(p []byte, msg []byte) ([]byte, error) { + if p == nil || len(p) != ffi.PrivateKeyBytes { + return nil, fmt.Errorf("bls signature invalid private key") + } + + sk := new(SecretKey) + copy(sk[:], p[:ffi.PrivateKeyBytes]) + + sig := ffi.PrivateKeySign(*sk, msg) + + return sig[:], nil +} + +func (blsSigner) Verify(sig []byte, a address.Address, msg []byte) error { + payload := a.Payload() + if sig == nil || len(sig) != ffi.SignatureBytes || len(payload) != ffi.PublicKeyBytes { + return fmt.Errorf("bls signature failed to verify") + } + + pk := new(PublicKey) + copy(pk[:], payload[:ffi.PublicKeyBytes]) + + sigS := new(Signature) + copy(sigS[:], sig[:ffi.SignatureBytes]) + + msgs := [1]ffi.Message{msg} + pks := [1]PublicKey{*pk} + + if !ffi.HashVerify(sigS, msgs[:], pks[:]) { + return fmt.Errorf("bls signature failed to verify") + } + + return nil +} + +func init() { + crypto2.RegisterSignature(crypto.SigTypeBLS, blsSigner{}) +} diff --git a/pkg/crypto/crypto.go b/pkg/crypto/crypto.go new file mode 100644 index 0000000000..b896cc0010 --- /dev/null +++ b/pkg/crypto/crypto.go @@ -0,0 +1,38 @@ +package crypto + +import ( + "io" + + "github.com/filecoin-project/go-state-types/crypto" +) + +// +// Abstract SECP and BLS crypto operations. +// + +// NewSecpKeyFromSeed generates a new key from the given reader. +func NewSecpKeyFromSeed(seed io.Reader) (KeyInfo, error) { + k, err := sigs[crypto.SigTypeSecp256k1].GenPrivateFromSeed(seed) + if err != nil { + return KeyInfo{}, err + } + ki := &KeyInfo{ + SigType: SigTypeSecp256k1, + } + ki.SetPrivateKey(k) + copy(k, make([]byte, len(k))) // wipe with zero bytes + return *ki, nil +} + +func NewBLSKeyFromSeed(seed io.Reader) (KeyInfo, error) { + k, err := sigs[crypto.SigTypeBLS].GenPrivateFromSeed(seed) + if err != nil { + return KeyInfo{}, err + } + ki := &KeyInfo{ + SigType: SigTypeBLS, + } + ki.SetPrivateKey(k) + copy(k, make([]byte, len(k))) // wipe with zero bytes + return *ki, nil +} diff --git a/pkg/crypto/crypto_test.go b/pkg/crypto/crypto_test.go new file mode 100644 index 0000000000..76e6136b7c --- /dev/null +++ b/pkg/crypto/crypto_test.go @@ -0,0 +1,169 @@ +// stm: #unit +package crypto_test + +import ( + "bytes" + "fmt" + "testing" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-address" + + "crypto/rand" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/crypto" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestGenerateSecpKey(t *testing.T) { + tf.UnitTest(t) + + token := bytes.Repeat([]byte{42}, 512) + // stm: @CRYPTO_CRYPTO_NEW_BLS_KEY_001 + ki, err := crypto.NewSecpKeyFromSeed(bytes.NewReader(token)) + assert.NoError(t, err) + sk := ki.Key() + t.Logf("%x", sk) + assert.Equal(t, len(sk), 32) + + msg := make([]byte, 32) + for i := 0; i < len(msg); i++ { + msg[i] = byte(i) + } + + // stm: @CRYPTO_SIG_SIGN_001 + signature, err := crypto.Sign(msg, sk, crypto.SigTypeSecp256k1) + assert.NoError(t, err) + assert.Equal(t, len(signature.Data), 65) + pk, err := crypto.ToPublic(crypto.SigTypeSecp256k1, sk) + assert.NoError(t, err) + addr, err := address.NewSecp256k1Address(pk) + assert.NoError(t, err) + t.Logf("%x", pk) + // valid signature + // stm: @CRYPTO_SIG_VERIFY_001 + assert.True(t, crypto.Verify(signature, addr, msg) == nil) + + // invalid signature - different message (too short) + assert.False(t, crypto.Verify(signature, addr, msg[3:]) == nil) + + // invalid signature - different message + msg2 := make([]byte, 32) + copy(msg2, msg) + msg2[0] = 42 + assert.False(t, crypto.Verify(signature, addr, msg2) == nil) + + // invalid signature - different digest + digest2 := make([]byte, 65) + copy(digest2, signature.Data) + digest2[0] = 42 + assert.False(t, crypto.Verify(&crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: digest2}, addr, msg) == nil) + + // invalid signature - digest too short + assert.False(t, crypto.Verify(&crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: signature.Data[3:]}, addr, msg) == nil) + assert.False(t, crypto.Verify(&crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: signature.Data[:29]}, addr, msg) == nil) + + // invalid signature - digest too long + digest3 := make([]byte, 70) + copy(digest3, signature.Data) + assert.False(t, crypto.Verify(&crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: digest3}, addr, msg) == nil) +} + +func TestBLSSigning(t *testing.T) { + token := bytes.Repeat([]byte{42}, 512) + // stm: @CRYPTO_CRYPTO_NEW_BLS_KEY_001 + ki, err := crypto.NewBLSKeyFromSeed(bytes.NewReader(token)) + assert.NoError(t, err) + + data := []byte("data to be signed") + // stm: @CRYPTO_KEYINFO_PRIVATE_KEY_001 + privateKey := ki.Key() + // stm: @CRYPTO_KEYINFO_PUBLIC_KEY_001 + publicKey, err := ki.PublicKey() + assert.NoError(t, err) + t.Logf("%x", privateKey) + t.Logf("%x", publicKey) + + signature, err := crypto.Sign(data, privateKey[:], crypto.SigTypeBLS) + require.NoError(t, err) + + // stm: @CRYPTO_KEYINFO_ADDRESS_001 + addr, err := ki.Address() + require.NoError(t, err) + + err = crypto.Verify(signature, addr, data) + require.NoError(t, err) + + // invalid signature fails + err = crypto.Verify(&crypto.Signature{Type: crypto.SigTypeBLS, Data: signature.Data[3:]}, addr, data) + require.Error(t, err) + + // invalid digest fails + err = crypto.Verify(signature, addr, data[3:]) + require.Error(t, err) +} + +func aggregateSignatures(sigs []*crypto.Signature) (*crypto.Signature, error) { + sigsS := make([]ffi.Signature, len(sigs)) + for i := 0; i < len(sigs); i++ { + copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes]) + } + + aggSig := ffi.Aggregate(sigsS) + if aggSig == nil { + if len(sigs) > 0 { + return nil, fmt.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs)) + } + + zeroSig := ffi.CreateZeroSignature() + + // Note: for blst this condition should not happen - nil should not + // be returned + return &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: zeroSig[:], + }, nil + } + return &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: aggSig[:], + }, nil +} + +func TestVerifyAggregate(t *testing.T) { + var ( + size = 10 + messages = make([][]byte, size) + blsSigs = make([]*crypto.Signature, size) + kis = make([]*crypto.KeyInfo, size) + pubKeys = make([][]byte, size) + ) + + for idx := 0; idx < size; idx++ { + ki, err := crypto.NewBLSKeyFromSeed(rand.Reader) + assert.NoError(t, err) + + msg := make([]byte, 32) + _, err = rand.Read(msg) + require.NoError(t, err) + + blsSigs[idx], err = crypto.Sign(msg, ki.Key(), crypto.SigTypeBLS) + require.NoError(t, err) + + messages[idx] = msg + kis[idx] = &ki + pubKeys[idx], err = ki.PublicKey() + require.NoError(t, err) + } + + blsSig, err := aggregateSignatures(blsSigs) + require.NoError(t, err) + + // stm: @CRYPTO_SIG_VERIFY_AGGREGATE_001 + assert.NoError(t, crypto.VerifyAggregate(pubKeys, messages, blsSig.Data)) +} diff --git a/pkg/crypto/keyinfo.go b/pkg/crypto/keyinfo.go new file mode 100644 index 0000000000..bb4b473f92 --- /dev/null +++ b/pkg/crypto/keyinfo.go @@ -0,0 +1,179 @@ +package crypto + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/awnumar/memguard" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + logging "github.com/ipfs/go-log/v2" + "github.com/pkg/errors" +) + +const ( + stBLS = "bls" + stSecp256k1 = "secp256k1" +) + +var log = logging.Logger("keyinfo") + +// KeyInfo is a key and its type used for signing. +type KeyInfo struct { + // Private key. + PrivateKey *memguard.Enclave `json:"privateKey"` + // Cryptographic system used to generate private key. + SigType SigType `json:"type"` +} + +type keyInfo struct { + // Private key. + PrivateKey []byte `json:"privateKey"` + // Cryptographic system used to generate private key. + SigType interface{} `json:"type"` +} + +func (ki *KeyInfo) UnmarshalJSON(data []byte) error { + k := keyInfo{} + err := json.Unmarshal(data, &k) + if err != nil { + return err + } + + switch k.SigType.(type) { + case string: + // compatible with lotus + st := k.SigType.(string) + if st == stBLS { + ki.SigType = crypto.SigTypeBLS + } else if st == stSecp256k1 { + ki.SigType = crypto.SigTypeSecp256k1 + } else { + return fmt.Errorf("unknown sig type value: %s", st) + } + case byte: + ki.SigType = crypto.SigType(k.SigType.(byte)) + case float64: + ki.SigType = crypto.SigType(k.SigType.(float64)) + case int: + ki.SigType = crypto.SigType(k.SigType.(int)) + case int64: + ki.SigType = crypto.SigType(k.SigType.(int64)) + default: + return fmt.Errorf("unknown sig type: %T", k.SigType) + } + ki.SetPrivateKey(k.PrivateKey) + + return nil +} + +func (ki KeyInfo) MarshalJSON() ([]byte, error) { + var err error + var b []byte + err = ki.UsePrivateKey(func(privateKey []byte) error { + k := keyInfo{} + k.PrivateKey = privateKey + if ki.SigType == crypto.SigTypeBLS { + k.SigType = stBLS + } else if ki.SigType == crypto.SigTypeSecp256k1 { + k.SigType = stSecp256k1 + } else { + return fmt.Errorf("unsupport keystore types %T", k.SigType) + } + b, err = json.Marshal(k) + return err + }) + + return b, err +} + +// Key returns the private key of KeyInfo +// This method makes the key escape from memguard's protection, so use caution +func (ki *KeyInfo) Key() []byte { + var pk []byte + err := ki.UsePrivateKey(func(privateKey []byte) error { + pk = make([]byte, len(privateKey)) + copy(pk, privateKey[:]) + return nil + }) + if err != nil { + log.Errorf("got private key failed %v", err) + return []byte{} + } + return pk +} + +// Type returns the type of curve used to generate the private key +func (ki *KeyInfo) Type() SigType { + return ki.SigType +} + +// Equals returns true if the KeyInfo is equal to other. +func (ki *KeyInfo) Equals(other *KeyInfo) bool { + if ki == nil && other == nil { + return true + } + if ki == nil || other == nil { + return false + } + if ki.SigType != other.SigType { + return false + } + + pk, err := ki.PrivateKey.Open() + if err != nil { + return false + } + defer pk.Destroy() + + otherPK, err := other.PrivateKey.Open() + if err != nil { + return false + } + defer otherPK.Destroy() + + return bytes.Equal(pk.Bytes(), otherPK.Bytes()) +} + +// Address returns the address for this keyinfo +func (ki *KeyInfo) Address() (address.Address, error) { + pubKey, err := ki.PublicKey() + if err != nil { + return address.Undef, err + } + if ki.SigType == SigTypeBLS { + return address.NewBLSAddress(pubKey) + } + if ki.SigType == SigTypeSecp256k1 { + return address.NewSecp256k1Address(pubKey) + } + return address.Undef, errors.Errorf("can not generate address for unknown crypto system: %d", ki.SigType) +} + +// Returns the public key part as uncompressed bytes. +func (ki *KeyInfo) PublicKey() ([]byte, error) { + var pubKey []byte + err := ki.UsePrivateKey(func(privateKey []byte) error { + var err error + pubKey, err = ToPublic(ki.SigType, privateKey) + return err + }) + + return pubKey, err +} + +func (ki *KeyInfo) UsePrivateKey(f func([]byte) error) error { + buf, err := ki.PrivateKey.Open() + if err != nil { + return err + } + defer buf.Destroy() + + return f(buf.Bytes()) +} + +func (ki *KeyInfo) SetPrivateKey(privateKey []byte) { + // will wipes privateKey with zeroes + ki.PrivateKey = memguard.NewEnclave(privateKey) +} diff --git a/pkg/crypto/keyinfo_test.go b/pkg/crypto/keyinfo_test.go new file mode 100644 index 0000000000..c4ab2b41f8 --- /dev/null +++ b/pkg/crypto/keyinfo_test.go @@ -0,0 +1,45 @@ +package crypto_test + +import ( + "encoding/hex" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/venus/pkg/crypto" +) + +func TestKeyInfoAddress(t *testing.T) { + prv, _ := hex.DecodeString("2a2a2a2a2a2a2a2a5fbf0ed0f8364c01ff27540ecd6669ff4cc548cbe60ef5ab") + ki := &crypto.KeyInfo{ + SigType: crypto.SigTypeSecp256k1, + } + ki.SetPrivateKey(prv) + + sign, _ := crypto.Sign([]byte("hello filecoin"), prv, crypto.SigTypeSecp256k1) + t.Logf("%x", sign) +} + +func TestKeyInfoUnmarshalAndMarshal(t *testing.T) { + prv := []byte("marshal_and_unmarshal") + prvCp := make([]byte, len(prv)) + copy(prvCp, prv) + ki := &crypto.KeyInfo{ + SigType: crypto.SigTypeSecp256k1, + } + ki.SetPrivateKey(prv) + + assert.NotNil(t, ki.PrivateKey) + t.Log(string(prv)) + assert.Equal(t, prvCp, ki.Key()) + + kiByte, err := json.Marshal(ki) + assert.NoError(t, err) + + var newKI crypto.KeyInfo + assert.NoError(t, json.Unmarshal(kiByte, &newKI)) + + assert.Equal(t, ki.Key(), newKI.Key()) + assert.Equal(t, ki.SigType, newKI.SigType) +} diff --git a/pkg/crypto/secp/init.go b/pkg/crypto/secp/init.go new file mode 100644 index 0000000000..cd4e8e9332 --- /dev/null +++ b/pkg/crypto/secp/init.go @@ -0,0 +1,68 @@ +package secp + +import ( + "fmt" + "io" + + crypto3 "github.com/filecoin-project/venus/pkg/crypto" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-crypto" + crypto2 "github.com/filecoin-project/go-state-types/crypto" + "github.com/minio/blake2b-simd" +) + +type secpSigner struct{} + +func (s secpSigner) VerifyAggregate(pubKeys, msgs [][]byte, signature []byte) bool { + panic("not support") +} + +func (secpSigner) GenPrivate() ([]byte, error) { + priv, err := crypto.GenerateKey() + if err != nil { + return nil, err + } + return priv, nil +} + +func (secpSigner) GenPrivateFromSeed(seed io.Reader) ([]byte, error) { + return crypto.GenerateKeyFromSeed(seed) +} + +func (secpSigner) ToPublic(pk []byte) ([]byte, error) { + return crypto.PublicKey(pk), nil +} + +func (secpSigner) Sign(pk []byte, msg []byte) ([]byte, error) { + b2sum := blake2b.Sum256(msg) + sig, err := crypto.Sign(pk, b2sum[:]) + if err != nil { + return nil, err + } + + return sig, nil +} + +func (secpSigner) Verify(sig []byte, a address.Address, msg []byte) error { + b2sum := blake2b.Sum256(msg) + pubk, err := crypto.EcRecover(b2sum[:], sig) + if err != nil { + return err + } + + maybeaddr, err := address.NewSecp256k1Address(pubk) + if err != nil { + return err + } + + if a != maybeaddr { + return fmt.Errorf("signature did not match") + } + + return nil +} + +func init() { + crypto3.RegisterSignature(crypto2.SigTypeSecp256k1, secpSigner{}) +} diff --git a/pkg/crypto/sigs.go b/pkg/crypto/sigs.go new file mode 100644 index 0000000000..660922e052 --- /dev/null +++ b/pkg/crypto/sigs.go @@ -0,0 +1,119 @@ +package crypto + +import ( + "fmt" + "io" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" +) + +// +// address-based signature validation +// + +type ( + Signature = crypto.Signature + SigType = crypto.SigType +) + +const ( + SigTypeSecp256k1 = crypto.SigTypeSecp256k1 + SigTypeBLS = crypto.SigTypeBLS +) + +const ( + BLSSignatureBytes = 96 +) + +// Sign takes in signature type, private key and message. Returns a signature for that message. +// Valid sigTypes are: "secp256k1" and "bls" +func Sign(msg []byte, privkey []byte, sigType SigType) (*crypto.Signature, error) { + sv, ok := sigs[sigType] + if !ok { + return nil, fmt.Errorf("cannot sign message with signature of unsupported type: %v", sigType) + } + + sb, err := sv.Sign(privkey, msg) + if err != nil { + return nil, err + } + return &crypto.Signature{ + Type: sigType, + Data: sb, + }, nil +} + +// Verify verifies signatures +func Verify(sig *crypto.Signature, addr address.Address, msg []byte) error { + if sig == nil { + return fmt.Errorf("signature is nil") + } + + if addr.Protocol() == address.ID { + return fmt.Errorf("must resolve ID addresses before using them to verify a signature") + } + + sv, ok := sigs[sig.Type] + if !ok { + return fmt.Errorf("cannot verify signature of unsupported type: %v", sig.Type) + } + + return sv.Verify(sig.Data, addr, msg) +} + +func VerifyAggregate(pubKeys, msgs [][]byte, signature []byte) error { + if signature == nil { + return fmt.Errorf("signature is nil") + } + + sv, ok := sigs[crypto.SigTypeBLS] + if !ok { + return fmt.Errorf("bls not register") + } + + if !sv.VerifyAggregate(pubKeys, msgs, signature) { + return fmt.Errorf("verify aggregate message fail") + } + return nil +} + +// Generate generates private key of given type +func Generate(sigType crypto.SigType) ([]byte, error) { + sv, ok := sigs[sigType] + if !ok { + return nil, fmt.Errorf("cannot generate private key of unsupported type: %v", sigType) + } + + return sv.GenPrivate() +} + +// ToPublic converts private key to public key +func ToPublic(sigType crypto.SigType, pk []byte) ([]byte, error) { + sv, ok := sigs[sigType] + if !ok { + return nil, fmt.Errorf("cannot generate public key of unsupported type: %v", sigType) + } + + return sv.ToPublic(pk) +} + +// SigShim is used for introducing signature functions +type SigShim interface { + GenPrivate() ([]byte, error) + GenPrivateFromSeed(seed io.Reader) ([]byte, error) + ToPublic(pk []byte) ([]byte, error) + Sign(pk []byte, msg []byte) ([]byte, error) + Verify(sig []byte, a address.Address, msg []byte) error + VerifyAggregate(pubKeys, msgs [][]byte, signature []byte) bool +} + +var sigs map[crypto.SigType]SigShim + +// RegisterSignature should be only used during init +func RegisterSignature(typ crypto.SigType, vs SigShim) { + if sigs == nil { + sigs = make(map[crypto.SigType]SigShim) + } + sigs[typ] = vs +} diff --git a/pkg/events/cache.go b/pkg/events/cache.go new file mode 100644 index 0000000000..9153e49deb --- /dev/null +++ b/pkg/events/cache.go @@ -0,0 +1,39 @@ +package events + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type uncachedAPI interface { + ChainNotify(context.Context) (<-chan []*types.HeadChange, error) + ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*types.HeadChange, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) + + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg + + ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + ChainHead(context.Context) (*types.TipSet, error) +} + +type cache struct { + //*tipSetCache + *messageCache + uncachedAPI +} + +func newCache(api IEvent, gcConfidence abi.ChainEpoch) *cache { + return &cache{ + // newTSCache(api, gcConfidence), + newMessageCache(api), + api, + } +} diff --git a/pkg/events/eventAPI.go b/pkg/events/eventAPI.go new file mode 100644 index 0000000000..4492e92044 --- /dev/null +++ b/pkg/events/eventAPI.go @@ -0,0 +1,30 @@ +package events + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +// A TipSetObserver receives notifications of tipsets +type TipSetObserver interface { + Apply(ctx context.Context, from, to *types.TipSet) error + Revert(ctx context.Context, from, to *types.TipSet) error +} + +type IEvent interface { + ChainNotify(context.Context) (<-chan []*types.HeadChange, error) + ChainGetBlockMessages(context.Context, cid.Cid) (*types.BlockMessages, error) + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainHead(context.Context) (*types.TipSet, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*types.HeadChange, error) + + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg +} diff --git a/pkg/events/events.go b/pkg/events/events.go new file mode 100644 index 0000000000..b2c7523d98 --- /dev/null +++ b/pkg/events/events.go @@ -0,0 +1,44 @@ +package events + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var log = logging.Logger("events") + +// HeightHandler `curH`-`ts.Height` = `confidence` +type ( + HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error + RevertHandler func(ctx context.Context, ts *types.TipSet) error +) + +type Events struct { + *observer + *heightEvents + *hcEvents +} + +func NewEventsWithConfidence(ctx context.Context, api IEvent, gcConfidence abi.ChainEpoch) (*Events, error) { + cache := newCache(api, gcConfidence) + + ob := newObserver(cache, gcConfidence) + if err := ob.start(ctx); err != nil { + return nil, err + } + + he := newHeightEvents(cache, ob, gcConfidence) + headChange := newHCEvents(cache, ob) + + return &Events{ob, he, headChange}, nil +} + +func NewEvents(ctx context.Context, api IEvent) (*Events, error) { + gcConfidence := 2 * constants.ForkLengthThreshold + return NewEventsWithConfidence(ctx, api, gcConfidence) +} diff --git a/pkg/events/events_called.go b/pkg/events/events_called.go new file mode 100644 index 0000000000..9b0173cb40 --- /dev/null +++ b/pkg/events/events_called.go @@ -0,0 +1,571 @@ +package events + +import ( + "context" + "fmt" + "math" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" +) + +const ( + NoTimeout = math.MaxInt64 + NoHeight = abi.ChainEpoch(-1) +) + +type triggerID = uint64 + +// msgH is the block height at which a message was present / event has happened +type msgH = abi.ChainEpoch + +// triggerH is the block height at which the listener will be notified about the +// +// message (msgH+confidence) +type triggerH = abi.ChainEpoch + +type eventData interface{} + +// EventHandler arguments: +// `prevTs` is the previous tipset, eg the "from" tipset for a state change. +// `ts` is the event tipset, eg the tipset in which the `msg` is included. +// `curH`-`ts.Height` = `confidence` +type EventHandler func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) + +// CheckFunc is used for atomicity guarantees. If the condition the callbacks +// wait for has already happened in tipset `ts` +// +// If `done` is true, timeout won't be triggered +// If `more` is false, no messages will be sent to EventHandler (RevertHandler +// +// may still be called) +type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) + +// Keep track of information for an event handler +type handlerInfo struct { + confidence int + timeout abi.ChainEpoch + + disabled bool // TODO: GC after gcConfidence reached + + handle EventHandler + revert RevertHandler +} + +// When a change occurs, a queuedEvent is created and put into a queue +// until the required confidence is reached +type queuedEvent struct { + trigger triggerID + data eventData + + prevTipset, tipset *types.TipSet + + called bool +} + +// Manages chain head change events, which may be forward (new tipset added to +// chain) or backward (chain branch discarded in favour of heavier branch) +type hcEvents struct { + cs IEvent + + lk sync.Mutex + lastTS *types.TipSet + + ctr triggerID + + // TODO: get rid of trigger IDs and just use pointers as keys. + triggers map[triggerID]*handlerInfo + + // TODO: instead of scheduling events in the future, look at the chain in the past. We can sip the "confidence" queue entirely. + // maps block heights to events + // [triggerH][msgH][event] + confQueue map[triggerH]map[msgH][]*queuedEvent + + // [msgH][triggerH] + revertQueue map[msgH][]triggerH + + // [timeoutH+confidence][triggerID]{calls} + timeouts map[abi.ChainEpoch]map[triggerID]int + + messageEvents + watcherEvents +} + +func newHCEvents(api IEvent, obs *observer) *hcEvents { + e := &hcEvents{ + cs: api, + confQueue: map[triggerH]map[msgH][]*queuedEvent{}, + revertQueue: map[msgH][]triggerH{}, + triggers: map[triggerID]*handlerInfo{}, + timeouts: map[abi.ChainEpoch]map[triggerID]int{}, + } + + e.messageEvents = newMessageEvents(e, api) + e.watcherEvents = newWatcherEvents(e, api) + + // We need to take the lock as the observer could immediately try calling us. + e.lk.Lock() + e.lastTS = obs.Observe((*hcEventsObserver)(e)) + e.lk.Unlock() + + return e +} + +type hcEventsObserver hcEvents + +func (e *hcEventsObserver) Apply(ctx context.Context, from, to *types.TipSet) error { + e.lk.Lock() + defer e.lk.Unlock() + + defer func() { e.lastTS = to }() + + // Check if the head change caused any state changes that we were + // waiting for + stateChanges := e.checkStateChanges(from, to) + + // Queue up calls until there have been enough blocks to reach + // confidence on the state changes + for tid, data := range stateChanges { + e.queueForConfidence(tid, data, from, to) + } + + // Check if the head change included any new message calls + newCalls := e.checkNewCalls(ctx, from, to) + + // Queue up calls until there have been enough blocks to reach + // confidence on the message calls + for tid, calls := range newCalls { + for _, data := range calls { + e.queueForConfidence(tid, data, nil, to) + } + } + + for at := from.Height() + 1; at <= to.Height(); at++ { + // Apply any queued events and timeouts that were targeted at the + // current chain height + e.applyWithConfidence(ctx, at) + e.applyTimeouts(ctx, at, to) + } + return nil +} + +func (e *hcEventsObserver) Revert(ctx context.Context, from, to *types.TipSet) error { + e.lk.Lock() + defer e.lk.Unlock() + + defer func() { e.lastTS = to }() + + reverts, ok := e.revertQueue[from.Height()] + if !ok { + return nil // nothing to do + } + + for _, triggerH := range reverts { + toRevert := e.confQueue[triggerH][from.Height()] + for _, event := range toRevert { + if !event.called { + continue // event wasn't apply()-ied yet + } + + trigger := e.triggers[event.trigger] + + if err := trigger.revert(ctx, from); err != nil { + log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", from.Height(), triggerH, err) + } + } + delete(e.confQueue[triggerH], from.Height()) + } + delete(e.revertQueue, from.Height()) + return nil +} + +// Queue up events until the chain has reached a height that reflects the +// desired confidence +func (e *hcEventsObserver) queueForConfidence(trigID uint64, data eventData, prevTS, ts *types.TipSet) { + trigger := e.triggers[trigID] + + appliedH := ts.Height() + + triggerH := appliedH + abi.ChainEpoch(trigger.confidence) + + byOrigH, ok := e.confQueue[triggerH] + if !ok { + byOrigH = map[abi.ChainEpoch][]*queuedEvent{} + e.confQueue[triggerH] = byOrigH + } + + byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{ + trigger: trigID, + data: data, + tipset: ts, + prevTipset: prevTS, + }) + + e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH) +} + +// Apply any events that were waiting for this chain height for confidence +func (e *hcEventsObserver) applyWithConfidence(ctx context.Context, height abi.ChainEpoch) { + byOrigH, ok := e.confQueue[height] + if !ok { + return // no triggers at this height + } + + for origH, events := range byOrigH { + for _, event := range events { + if event.called { + continue + } + + trigger := e.triggers[event.trigger] + if trigger.disabled { + continue + } + + more, err := trigger.handle(ctx, event.data, event.prevTipset, event.tipset, height) + if err != nil { + log.Errorf("chain trigger (@H %d, triggered @ %d) failed: %s", origH, height, err) + continue // don't revert failed calls + } + + event.called = true + + touts, ok := e.timeouts[trigger.timeout] + if ok { + touts[event.trigger]++ + } + + trigger.disabled = !more + } + } +} + +// Apply any timeouts that expire at this height +func (e *hcEventsObserver) applyTimeouts(ctx context.Context, at abi.ChainEpoch, ts *types.TipSet) { + triggers, ok := e.timeouts[at] + if !ok { + return // nothing to do + } + + for triggerID, calls := range triggers { + if calls > 0 { + continue // don't timeout if the method was called + } + trigger := e.triggers[triggerID] + if trigger.disabled { + continue + } + + // This should be cached. + timeoutTS, err := e.cs.ChainGetTipSetAfterHeight(ctx, at-abi.ChainEpoch(trigger.confidence), ts.Key()) + if err != nil { + log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", at-abi.ChainEpoch(trigger.confidence), at) + } + + more, err := trigger.handle(ctx, nil, nil, timeoutTS, at) + if err != nil { + log.Errorf("chain trigger (call @H %d, called @ %d) failed: %s", timeoutTS.Height(), at, err) + continue // don't revert failed calls + } + + trigger.disabled = !more // allows messages after timeout + } +} + +// Listen for an event +// - CheckFunc: immediately checks if the event already occurred +// - EventHandler: called when the event has occurred, after confidence tipsets +// - RevertHandler: called if the chain head changes causing the event to revert +// - confidence: wait this many tipsets before calling EventHandler +// - timeout: at this chain height, timeout on waiting for this event +func (e *hcEvents) onHeadChanged(ctx context.Context, check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) { + e.lk.Lock() + defer e.lk.Unlock() + + // Check if the event has already occurred + done, more, err := check(ctx, e.lastTS) + if err != nil { + return 0, fmt.Errorf("called check error (h: %d): %w", e.lastTS.Height(), err) + } + if done { + timeout = NoTimeout + } + + id := e.ctr + e.ctr++ + + e.triggers[id] = &handlerInfo{ + confidence: confidence, + timeout: timeout + abi.ChainEpoch(confidence), + + disabled: !more, + + handle: hnd, + revert: rev, + } + + // If there's a timeout, set up a timeout check at that height + if timeout != NoTimeout { + if e.timeouts[timeout+abi.ChainEpoch(confidence)] == nil { + e.timeouts[timeout+abi.ChainEpoch(confidence)] = map[uint64]int{} + } + e.timeouts[timeout+abi.ChainEpoch(confidence)][id] = 0 + } + + return id, nil +} + +// headChangeAPI is used to allow the composed event APIs to call back to hcEvents +// to listen for changes +type headChangeAPI interface { + onHeadChanged(ctx context.Context, check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) +} + +// watcherEvents watches for a state change +type watcherEvents struct { + cs IEvent + hcAPI headChangeAPI + + lk sync.RWMutex + matchers map[triggerID]StateMatchFunc +} + +func newWatcherEvents(hcAPI headChangeAPI, cs IEvent) watcherEvents { + return watcherEvents{ + cs: cs, + hcAPI: hcAPI, + matchers: make(map[triggerID]StateMatchFunc), + } +} + +// Run each of the matchers against the previous and current state to see if +// there's a change +func (we *watcherEvents) checkStateChanges(oldState, newState *types.TipSet) map[triggerID]eventData { + we.lk.RLock() + defer we.lk.RUnlock() + + res := make(map[triggerID]eventData) + for tid, matchFn := range we.matchers { + ok, data, err := matchFn(oldState, newState) + if err != nil { + log.Errorf("event diff fn failed: %s", err) + continue + } + + if ok { + res[tid] = data + } + } + return res +} + +// StateChange represents a change in state +type StateChange interface{} + +// StateChangeHandler arguments: +// `oldTs` is the state "from" tipset +// `newTs` is the state "to" tipset +// `states` is the change in state +// `curH`-`ts.Height` = `confidence` +type StateChangeHandler func(oldTs, newTs *types.TipSet, states StateChange, curH abi.ChainEpoch) (more bool, err error) + +type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error) + +// StateChanged registers a callback which is triggered when a specified state +// change occurs or a timeout is reached. +// +// - `CheckFunc` callback is invoked immediately with a recent tipset, it +// returns two booleans - `done`, and `more`. +// +// - `done` should be true when some on-chain state change we are waiting +// for has happened. When `done` is set to true, timeout trigger is disabled. +// +// - `more` should be false when we don't want to receive new notifications +// through StateChangeHandler. Note that notifications may still be delivered to +// RevertHandler +// +// - `StateChangeHandler` is called when the specified state change was observed +// on-chain, and a confidence threshold was reached, or the specified `timeout` +// height was reached with no state change observed. When this callback is +// invoked on a timeout, `oldTs` and `states are set to nil. +// This callback returns a boolean specifying whether further notifications +// should be sent, like `more` return param from `CheckFunc` above. +// +// - `RevertHandler` is called after apply handler, when we drop the tipset +// containing the message. The tipset passed as the argument is the tipset +// that is being dropped. Note that the event dropped may be re-applied +// in a different tipset in small amount of time. +// +// - `StateMatchFunc` is called against each tipset state. If there is a match, +// the state change is queued up until the confidence interval has elapsed (and +// `StateChangeHandler` is called) +func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error { + hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) { + states, ok := data.(StateChange) + if data != nil && !ok { + panic("expected StateChange") + } + + return scHnd(prevTs, ts, states, height) + } + + id, err := we.hcAPI.onHeadChanged(context.TODO(), check, hnd, rev, confidence, timeout) + if err != nil { + return err + } + + we.lk.Lock() + defer we.lk.Unlock() + we.matchers[id] = mf + + return nil +} + +// messageEvents watches for message calls to actors +type messageEvents struct { + cs IEvent + hcAPI headChangeAPI + + lk sync.RWMutex + matchers map[triggerID]MsgMatchFunc +} + +func newMessageEvents(hcAPI headChangeAPI, cs IEvent) messageEvents { + return messageEvents{ + cs: cs, + hcAPI: hcAPI, + matchers: make(map[triggerID]MsgMatchFunc), + } +} + +// Check if there are any new actor calls +func (me *messageEvents) checkNewCalls(ctx context.Context, from, to *types.TipSet) map[triggerID][]eventData { + me.lk.RLock() + defer me.lk.RUnlock() + + // For each message in the tipset + res := make(map[triggerID][]eventData) + me.messagesForTs(from, func(msg *types.Message) { + // TODO: provide receipts + + // Run each trigger's matcher against the message + for tid, matchFn := range me.matchers { + matched, err := matchFn(msg) + if err != nil { + log.Errorf("event matcher failed: %s", err) + continue + } + + // If there was a match, include the message in the results for the + // trigger + if matched { + res[tid] = append(res[tid], msg) + } + } + }) + + return res +} + +// Get the messages in a tipset +func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) { + seen := map[cid.Cid]struct{}{} + + for i, tsb := range ts.Cids() { + msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb) + if err != nil { + log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", + ts.Height(), tsb, ts.Blocks()[i].Messages, err) + continue + } + for i, c := range msgs.Cids { + // We iterate over the CIDs to avoid having to recompute them. + _, ok := seen[c] + if ok { + continue + } + seen[c] = struct{}{} + if i < len(msgs.BlsMessages) { + consume(msgs.BlsMessages[i]) + } else { + consume(&msgs.SecpkMessages[i-len(msgs.BlsMessages)].Message) + } + } + } +} + +// MsgHandler arguments: +// `ts` is the tipset, in which the `msg` is included. +// `curH`-`ts.Height` = `confidence` +type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) + +type MsgMatchFunc func(msg *types.Message) (matched bool, err error) + +// Called registers a callback which is triggered when a specified method is +// +// called on an actor, or a timeout is reached. +// +// - `CheckFunc` callback is invoked immediately with a recent tipset, it +// returns two booleans - `done`, and `more`. +// +// - `done` should be true when some on-chain action we are waiting for has +// happened. When `done` is set to true, timeout trigger is disabled. +// +// - `more` should be false when we don't want to receive new notifications +// through MsgHandler. Note that notifications may still be delivered to +// RevertHandler +// +// - `MsgHandler` is called when the specified event was observed on-chain, +// and a confidence threshold was reached, or the specified `timeout` height +// was reached with no events observed. When this callback is invoked on a +// timeout, `msg` is set to nil. This callback returns a boolean specifying +// whether further notifications should be sent, like `more` return param +// from `CheckFunc` above. +// +// - `RevertHandler` is called after apply handler, when we drop the tipset +// containing the message. The tipset passed as the argument is the tipset +// that is being dropped. Note that the message dropped may be re-applied +// in a different tipset in small amount of time. +// +// - `MsgMatchFunc` is called against each message. If there is a match, the +// message is queued up until the confidence interval has elapsed (and +// `MsgHandler` is called) +func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error { + hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) { + msg, ok := data.(*types.Message) + if data != nil && !ok { + panic("expected msg") + } + + ml, err := me.cs.StateSearchMsg(ctx, ts.Key(), msg.Cid(), constants.LookbackNoLimit, true) + if err != nil { + return false, err + } + + if ml == nil { + return msgHnd(msg, nil, ts, height) + } + + return msgHnd(msg, &ml.Receipt, ts, height) + } + + id, err := me.hcAPI.onHeadChanged(ctx, check, hnd, rev, confidence, timeout) + if err != nil { + return fmt.Errorf("on head changed error: %w", err) + } + + me.lk.Lock() + defer me.lk.Unlock() + me.matchers[id] = mf + + return nil +} + +// Convenience function for checking and matching messages +func (me *messageEvents) CalledMsg(ctx context.Context, hnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error { + return me.Called(ctx, me.CheckMsg(msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage())) +} diff --git a/pkg/events/events_height.go b/pkg/events/events_height.go new file mode 100644 index 0000000000..49d76584e1 --- /dev/null +++ b/pkg/events/events_height.go @@ -0,0 +1,244 @@ +package events + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" + "go.opencensus.io/trace" +) + +type heightHandler struct { + ts *types.TipSet + height abi.ChainEpoch + called bool + + handle HeightHandler + revert RevertHandler +} + +type heightEvents struct { + api IEvent + gcConfidence abi.ChainEpoch + + lk sync.Mutex + head *types.TipSet + tsHeights, triggerHeights map[abi.ChainEpoch][]*heightHandler + lastGc abi.ChainEpoch //nolint:structcheck +} + +func newHeightEvents(api IEvent, obs *observer, gcConfidence abi.ChainEpoch) *heightEvents { + he := &heightEvents{ + api: api, + gcConfidence: gcConfidence, + tsHeights: map[abi.ChainEpoch][]*heightHandler{}, + triggerHeights: map[abi.ChainEpoch][]*heightHandler{}, + } + he.lk.Lock() + he.head = obs.Observe((*heightEventsObserver)(he)) + he.lk.Unlock() + return he +} + +// ChainAt invokes the specified `HeightHandler` when the chain reaches the +// specified height+confidence threshold. If the chain is rolled-back under the +// specified height, `RevertHandler` will be called. +// +// ts passed to handlers is the tipset at the specified epoch, or above if lower tipsets were null. +// +// The context governs cancellations of this call, it won't cancel the event handler. +func (e *heightEvents) ChainAt(ctx context.Context, hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error { + if abi.ChainEpoch(confidence) > e.gcConfidence { + // Need this to be able to GC effectively. + return fmt.Errorf("confidence cannot be greater than gcConfidence: %d > %d", confidence, e.gcConfidence) + } + handler := &heightHandler{ + height: h, + handle: hnd, + revert: rev, + } + triggerAt := h + abi.ChainEpoch(confidence) + + // Here we try to jump onto a moving train. To avoid stopping the train, we release the lock + // while calling the API and/or the trigger functions. Unfortunately, it's entirely possible + // (although unlikely) to go back and forth across the trigger heights, so we need to keep + // going back and forth here till we're synced. + // + // TODO: Consider using a worker goroutine so we can just drop the handler in a channel? The + // downside is that we'd either need a tipset cache, or we'd need to potentially fetch + // tipsets in-line inside the event loop. + e.lk.Lock() + for { + head := e.head + if head.Height() >= h { + // Head is past the handler height. We at least need to stash the tipset to + // avoid doing this from the main event loop. + e.lk.Unlock() + + var ts *types.TipSet + if head.Height() == h { + ts = head + } else { + var err error + ts, err = e.api.ChainGetTipSetAfterHeight(ctx, handler.height, head.Key()) + if err != nil { + return fmt.Errorf("events.ChainAt: failed to get tipset: %s", err) + } + } + + // If we've applied the handler on the wrong tipset, revert. + if handler.called && !ts.Equals(handler.ts) { + ctx, span := trace.StartSpan(ctx, "events.HeightRevert") + span.AddAttributes(trace.BoolAttribute("immediate", true)) + err := handler.revert(ctx, handler.ts) + span.End() + if err != nil { + return err + } + handler.called = false + } + + // Save the tipset. + handler.ts = ts + + // If we've reached confidence and haven't called, call. + if !handler.called && head.Height() >= triggerAt { + ctx, span := trace.StartSpan(ctx, "events.HeightApply") + span.AddAttributes(trace.BoolAttribute("immediate", true)) + err := handler.handle(ctx, handler.ts, head.Height()) + span.End() + if err != nil { + return err + } + + handler.called = true + + // If we've reached gcConfidence, return without saving anything. + if head.Height() >= h+e.gcConfidence { + return nil + } + } + + e.lk.Lock() + } else if handler.called { + // We're not passed the head (anymore) but have applied the handler. Revert, try again. + e.lk.Unlock() + ctx, span := trace.StartSpan(ctx, "events.HeightRevert") + span.AddAttributes(trace.BoolAttribute("immediate", true)) + err := handler.revert(ctx, handler.ts) + span.End() + if err != nil { + return err + } + handler.called = false + e.lk.Lock() + } // otherwise, we changed heads but the change didn't matter. + + // If we managed to get through this without the head changing, we're finally done. + if head.Equals(e.head) { + e.triggerHeights[triggerAt] = append(e.triggerHeights[triggerAt], handler) + e.tsHeights[h] = append(e.tsHeights[h], handler) + e.lk.Unlock() + return nil + } + } +} + +// Updates the head and garbage collects if we're 2x over our garbage collection confidence period. +func (e *heightEventsObserver) updateHead(h *types.TipSet) { + e.lk.Lock() + defer e.lk.Unlock() + e.head = h + + if e.head.Height() < e.lastGc+e.gcConfidence*2 { + return + } + e.lastGc = h.Height() + + targetGcHeight := e.head.Height() - e.gcConfidence + for h := range e.tsHeights { + if h >= targetGcHeight { + continue + } + delete(e.tsHeights, h) + } + for h := range e.triggerHeights { + if h >= targetGcHeight { + continue + } + delete(e.triggerHeights, h) + } +} + +type heightEventsObserver heightEvents + +func (e *heightEventsObserver) Revert(ctx context.Context, from, to *types.TipSet) error { + // Update the head first so we don't accidental skip reverting a concurrent call to ChainAt. + e.updateHead(to) + + // Call revert on all hights between the two tipsets, handling empty tipsets. + for h := from.Height(); h > to.Height(); h-- { + e.lk.Lock() + triggers := e.tsHeights[h] + e.lk.Unlock() + + // 1. Triggers are only invoked from the global event loop, we don't need to hold the lock while calling. + // 2. We only ever append to or replace the trigger slice, so it's safe to iterate over it without the lock. + for _, handler := range triggers { + handler.ts = nil // invalidate + if !handler.called { + // We haven't triggered this yet, or there has been a concurrent call to ChainAt. + continue + } + ctx, span := trace.StartSpan(ctx, "events.HeightRevert") + err := handler.revert(ctx, from) + span.End() + + if err != nil { + log.Errorf("reverting chain trigger (@H %d): %s", h, err) + } + handler.called = false + } + } + return nil +} + +func (e *heightEventsObserver) Apply(ctx context.Context, from, to *types.TipSet) error { + // Update the head first so we don't accidental skip applying a concurrent call to ChainAt. + e.updateHead(to) + + for h := from.Height() + 1; h <= to.Height(); h++ { + e.lk.Lock() + triggers := e.triggerHeights[h] + tipsets := e.tsHeights[h] + e.lk.Unlock() + + // Stash the tipset for future triggers. + for _, handler := range tipsets { + handler.ts = to + } + + // Trigger the ready triggers. + for _, handler := range triggers { + if handler.called { + // We may have reverted past the trigger point, but not past the call point. + // Or there has been a concurrent call to ChainAt. + continue + } + + ctx, span := trace.StartSpan(ctx, "events.HeightApply") + span.AddAttributes(trace.BoolAttribute("immediate", false)) + err := handler.handle(ctx, handler.ts, h) + span.End() + + if err != nil { + log.Errorf("chain trigger (@H %d, called @ %d) failed: %+v", h, to.Height(), err) + } + + handler.called = true + } + } + return nil +} diff --git a/pkg/events/events_test.go b/pkg/events/events_test.go new file mode 100644 index 0000000000..ad9ad66a8e --- /dev/null +++ b/pkg/events/events_test.go @@ -0,0 +1,1634 @@ +package events + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "gotest.tools/assert" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/constants" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var dummyCid cid.Cid + +func init() { + dummyCid, _ = cid.Parse("bafkqaaa") + ObserveDuration = time.Millisecond * 100 +} + +type fakeMsg struct { + bmsgs []*types.Message + smsgs []*types.SignedMessage +} + +var _ IEvent = &fakeCS{} + +type fakeCS struct { + t *testing.T + h abi.ChainEpoch + tsc *tipSetCache + + msgs map[cid.Cid]fakeMsg + blkMsgs map[cid.Cid]cid.Cid + + tipsetLk sync.Mutex + tipsets map[types.TipSetKey]*types.TipSet + + mu sync.Mutex + waitSub chan struct{} + subCh chan<- []*types.HeadChange + callNumber map[string]int + + cancel context.CancelFunc +} + +func newFakeCS(t *testing.T) *fakeCS { + ctx, cancel := context.WithCancel(context.TODO()) + fcs := &fakeCS{ + t: t, + h: 1, + msgs: make(map[cid.Cid]fakeMsg), + blkMsgs: make(map[cid.Cid]cid.Cid), + tipsets: make(map[types.TipSetKey]*types.TipSet), + tsc: newTSCache(&fakeTSCacheAPI{}, 2*constants.ForkLengthThreshold), + callNumber: map[string]int{}, + waitSub: make(chan struct{}, 1), + cancel: cancel, + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + require.NoError(t, fcs.loopNotify(ctx)) + return fcs +} + +func (fcs *fakeCS) stop() { + if fcs.cancel == nil { + return + } + fcs.cancel() +} + +// our observe use a timer and call 'chainhead' to observe chain head change +// to 'PASS' these tests, we must call 'ChainNotify' to start 'waitSub' +func (fcs *fakeCS) loopNotify(ctx context.Context) error { + head, err := fcs.ChainNotify(ctx) + if err != nil { + return err + } + go func() { + for { + select { + case <-head: + case <-ctx.Done(): + return + } + } + }() + + return nil +} + +func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) { + fcs.mu.Lock() + fcs.callNumber["ChainHead"] = fcs.callNumber["ChainHead"] + 1 + fcs.mu.Unlock() + + return fcs.tsc.ChainHead(ctx) +} + +func (fcs *fakeCS) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*types.HeadChange, error) { + fcs.mu.Lock() + fcs.callNumber["ChainGetPath"] = fcs.callNumber["ChainGetPath"] + 1 + fcs.mu.Unlock() + + fromTS, err := fcs.ChainGetTipSet(ctx, from) + if err != nil { + return nil, err + } + + toTS, err := fcs.ChainGetTipSet(ctx, to) + if err != nil { + return nil, err + } + + // copied from the chainstore + revert, apply, err := chain.ReorgOps(func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return fcs.ChainGetTipSet(ctx, tsk) + }, fromTS, toTS) + if err != nil { + return nil, err + } + + path := make([]*types.HeadChange, len(revert)+len(apply)) + for i, r := range revert { + path[i] = &types.HeadChange{Type: types.HCRevert, Val: r} + } + for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 { + path[j+len(revert)] = &types.HeadChange{Type: types.HCApply, Val: apply[i]} + } + return path, nil +} + +func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + fcs.mu.Lock() + fcs.callNumber["ChainGetTipSet"] = fcs.callNumber["ChainGetTipSet"] + 1 + fcs.mu.Unlock() + + fcs.tipsetLk.Lock() + defer fcs.tipsetLk.Unlock() + return fcs.tipsets[key], nil +} + +func (fcs *fakeCS) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) { + fcs.mu.Lock() + fcs.callNumber["StateSearchMsg"] = fcs.callNumber["StateSearchMsg"] + 1 + fcs.mu.Unlock() + + return nil, nil +} + +func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + fcs.mu.Lock() + fcs.callNumber["StateGetActor"] = fcs.callNumber["StateGetActor"] + 1 + fcs.mu.Unlock() + + panic("Not Implemented") +} + +func (fcs *fakeCS) ChainGetTipSetByHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + fcs.mu.Lock() + fcs.callNumber["ChainGetTipSetByHeight"] = fcs.callNumber["ChainGetTipSetByHeight"] + 1 + fcs.mu.Unlock() + + return fcs.tsc.ChainGetTipSetByHeight(ctx, height, tsk) +} + +func (fcs *fakeCS) ChainGetTipSetAfterHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + fcs.mu.Lock() + fcs.callNumber["ChainGetTipSetAfterHeight"] = fcs.callNumber["ChainGetTipSetAfterHeight"] + 1 + fcs.mu.Unlock() + + return fcs.tsc.ChainGetTipSetAfterHeight(ctx, height, tsk) +} + +func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msgcid cid.Cid) *types.TipSet { + a, _ := address.NewFromString("t00") + b, _ := address.NewFromString("t02") + ts, err := types.NewTipSet([]*types.BlockHeader{ + { + Height: h, + Miner: a, + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, + + ParentStateRoot: dummyCid, + Messages: msgcid, + ParentMessageReceipts: dummyCid, + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + { + Height: h, + Miner: b, + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte((h + 1) % 2)}}, + + ParentStateRoot: dummyCid, + Messages: msgcid, + ParentMessageReceipts: dummyCid, + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + }) + + fcs.tipsetLk.Lock() + if fcs.tipsets == nil { + fcs.tipsets = map[types.TipSetKey]*types.TipSet{} + } + fcs.tipsets[ts.Key()] = ts + fcs.tipsetLk.Unlock() + require.NoError(t, err) + + return ts +} + +func (fcs *fakeCS) ChainNotify(ctx context.Context) (<-chan []*types.HeadChange, error) { + fcs.mu.Lock() + fcs.callNumber["ChainNotify"] = fcs.callNumber["ChainNotify"] + 1 + fcs.mu.Unlock() + + out := make(chan []*types.HeadChange, 1) + if fcs.subCh != nil { + close(out) + return out, fmt.Errorf("already subscribed to notifications") + } + + best, err := fcs.tsc.ChainHead(ctx) + if err != nil { + panic(err) + } + + out <- []*types.HeadChange{{Type: types.HCCurrent, Val: best}} + fcs.subCh = out + close(fcs.waitSub) + + return out, nil +} + +func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*types.BlockMessages, error) { + fcs.mu.Lock() + defer fcs.mu.Unlock() + fcs.callNumber["ChainGetBlockMessages"] = fcs.callNumber["ChainGetBlockMessages"] + 1 + messages, ok := fcs.blkMsgs[blk] + if !ok { + return &types.BlockMessages{}, nil + } + + ms, ok := fcs.msgs[messages] + if !ok { + return &types.BlockMessages{}, nil + } + + cids := make([]cid.Cid, len(ms.bmsgs)+len(ms.smsgs)) + for i, m := range ms.bmsgs { + cids[i] = m.Cid() + } + for i, m := range ms.smsgs { + cids[i+len(ms.bmsgs)] = m.Cid() + } + + return &types.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs, Cids: cids}, nil +} + +func (fcs *fakeCS) fakeMsgs(m fakeMsg) cid.Cid { + n := len(fcs.msgs) + c, err := cid.Prefix{ + Version: 1, + Codec: cid.Raw, + MhType: multihash.IDENTITY, + MhLength: -1, + }.Sum([]byte(fmt.Sprintf("%d", n))) + require.NoError(fcs.t, err) + + fcs.msgs[c] = m + return c +} + +func (fcs *fakeCS) dropSub() { + fcs.mu.Lock() + + if fcs.subCh == nil { + fcs.mu.Unlock() + fcs.t.Fatal("sub not be nil") + } + + waitCh := make(chan struct{}) + fcs.waitSub = waitCh + close(fcs.subCh) + fcs.subCh = nil + fcs.mu.Unlock() + + <-waitCh +} + +func (fcs *fakeCS) sub(rev, app []*types.TipSet) { + <-fcs.waitSub + notif := make([]*types.HeadChange, len(rev)+len(app)) + + for i, r := range rev { + notif[i] = &types.HeadChange{ + Type: types.HCRevert, + Val: r, + } + } + for i, r := range app { + notif[i+len(rev)] = &types.HeadChange{ + Type: types.HCApply, + Val: r, + } + } + + fcs.subCh <- notif +} + +func (fcs *fakeCS) advance(rev, app, drop int, msgs map[int]cid.Cid, nulls ...int) { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + time.Sleep(ObserveDuration * 2) + }() + + nullm := map[int]struct{}{} + for _, v := range nulls { + nullm[v] = struct{}{} + } + + var revs []*types.TipSet + for i := 0; i < rev; i++ { + fcs.t.Log("revert", fcs.h) + from, err := fcs.tsc.ChainHead(ctx) + require.NoError(fcs.t, err) + + if _, ok := nullm[int(from.Height())]; !ok { + require.NoError(fcs.t, fcs.tsc.revert(from)) + + if drop == 0 { + revs = append(revs, from) + } + } + if drop > 0 { + drop-- + if drop == 0 { + fcs.dropSub() + } + } + fcs.h-- + } + + var apps []*types.TipSet + for i := 0; i < app; i++ { + fcs.h++ + fcs.t.Log("apply", fcs.h) + + mc, hasMsgs := msgs[i] + if !hasMsgs { + mc = dummyCid + } + + if _, ok := nullm[int(fcs.h)]; !ok { + best, err := fcs.tsc.ChainHead(ctx) + require.NoError(fcs.t, err) + ts := fcs.makeTs(fcs.t, best.Key().Cids(), fcs.h, mc) + require.NoError(fcs.t, fcs.tsc.add(ts)) + + if hasMsgs { + fcs.blkMsgs[ts.Blocks()[0].Cid()] = mc + } + + if drop == 0 { + apps = append(apps, ts) + } + } + + if drop > 0 { + drop-- + if drop == 0 { + fcs.dropSub() + } + } + } + + fcs.sub(revs, apps) + // Wait for the last round to finish. + fcs.sub(nil, nil) + fcs.sub(nil, nil) +} + +var _ IEvent = &fakeCS{} + +type ObserveMode string + +const ( + ObModeTimer = ObserveMode("timer") + ObModeSubscription = ObserveMode("subscription") +) + +var CurObserveMode = ObModeTimer + +func TestAt(t *testing.T) { + tf.UnitTest(t) + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + var applied bool + var reverted bool + + err = events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, 5, int(ts.Height())) + require.Equal(t, 8, int(curH)) + applied = true + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + fcs.advance(0, 3, 0, nil) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(0, 3, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(0, 3, 0, nil) + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + fcs.advance(0, 3, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(10, 0, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, true, reverted) + + reverted = false + fcs.advance(0, 5, 0, nil) + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + + // observe chain head in timer mode, this kind of observe is not supported + if CurObserveMode == ObModeSubscription { + fcs.advance(10, 10, 0, nil) + require.Equal(t, true, applied) + require.Equal(t, true, reverted) + applied = false + reverted = false + + fcs.advance(10, 1, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, true, reverted) + reverted = false + + fcs.advance(0, 1, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(0, 2, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(0, 1, 0, nil) // 8 + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + } +} + +func TestAtNullTrigger(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + var applied bool + var reverted bool + + err = events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, abi.ChainEpoch(6), ts.Height()) + require.Equal(t, 8, int(curH)) + applied = true + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + fcs.advance(0, 6, 0, nil, 5) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(0, 3, 0, nil) + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false +} + +func TestAtNullConf(t *testing.T) { + tf.UnitTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(ctx, fcs) + require.NoError(t, err) + + var applied bool + var reverted bool + + err = events.ChainAt(ctx, func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, 5, int(ts.Height())) + require.Equal(t, 8, int(curH)) + applied = true + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + fcs.advance(0, 6, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(0, 3, 0, nil, 8) + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + fcs.advance(7, 1, 0, nil) + require.Equal(t, false, applied) + require.Equal(t, true, reverted) + reverted = false +} + +var _ tsCacheAPI = &fakeTSCacheAPI{} + +type fakeTSCacheAPI struct{} + +func (f *fakeTSCacheAPI) ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { + panic("implement me") +} + +func (f *fakeTSCacheAPI) ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) { + panic("implement me") +} + +func (f *fakeTSCacheAPI) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { + panic("implement me") +} + +func (f *fakeTSCacheAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { + panic("implement me") +} + +func TestAtStart(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + fcs.advance(0, 5, 0, nil) // 6 + + var applied bool + var reverted bool + + err = events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, 5, int(ts.Height())) + require.Equal(t, 8, int(curH)) + applied = true + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(0, 5, 0, nil) // 11 + require.Equal(t, true, applied) + require.Equal(t, false, reverted) +} + +func TestAtStartConfidence(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + fcs.advance(0, 10, 0, nil) // 11 + + var applied bool + var reverted bool + + err = events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, 5, int(ts.Height())) + require.Equal(t, 11, int(curH)) + applied = true + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) +} + +func TestAtChained(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + var applied bool + var reverted bool + + err = events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + return events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, 10, int(ts.Height())) + applied = true + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 10) + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + fcs.advance(0, 15, 0, nil) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) +} + +func TestAtChainedConfidence(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + fcs.advance(0, 15, 0, nil) + + var applied bool + var reverted bool + + err = events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + return events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, 10, int(ts.Height())) + applied = true + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 10) + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) +} + +func TestAtChainedConfidenceNull(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + fcs.advance(0, 15, 0, nil, 5) + + var applied bool + var reverted bool + + err = events.ChainAt(context.Background(), func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + applied = true + require.Equal(t, 6, int(ts.Height())) + return nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 5) + require.NoError(t, err) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) +} + +func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matched bool, err error) { + return func(msg *types.Message) (matched bool, err error) { + return to == msg.To && m == msg.Method, nil + } +} + +func TestCalled(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + more := true + var applied, reverted bool + var appliedMsg *types.Message + var appliedTS *types.TipSet + var appliedH abi.ChainEpoch + + err = events.Called(context.Background(), func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + require.Equal(t, false, applied) + applied = true + appliedMsg = msg + appliedTS = ts + appliedH = curH + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + // create few blocks to make sure nothing get's randomly called + + fcs.advance(0, 4, 0, nil) // H=5 + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create blocks with message (but below confidence threshold) + + fcs.advance(0, 3, 0, map[int]cid.Cid{ // msg at H=6; H=8 (confidence=2) + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + }, + }), + }) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional block so we are above confidence threshold + + fcs.advance(0, 2, 0, nil) // H=10 (confidence=3, apply) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + // dip below confidence + fcs.advance(2, 2, 0, nil) // H=10 (confidence=3, apply) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + require.Equal(t, abi.ChainEpoch(7), appliedTS.Height()) + require.Equal(t, "bafkqaaa", appliedTS.Blocks()[0].Messages.String()) + require.Equal(t, abi.ChainEpoch(10), appliedH) + require.Equal(t, t0123, appliedMsg.To) + require.Equal(t, uint64(1), appliedMsg.Nonce) + require.Equal(t, abi.MethodNum(5), appliedMsg.Method) + + // revert some blocks, keep the message + + fcs.advance(3, 1, 0, nil) // H=8 (confidence=1) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // revert the message + t.Skipf("not support in timer observe mode") + fcs.advance(2, 1, 0, nil) // H=7, we reverted ts with the msg execution, but not the msg itself + + require.Equal(t, false, applied) + require.Equal(t, true, reverted) + reverted = false + + // send new message on different height + + n2msg := fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 2}, + }, + }) + + fcs.advance(0, 3, 0, map[int]cid.Cid{ // (n2msg confidence=1) + 0: n2msg, + }) + + require.Equal(t, true, applied) // msg from H=7, which had reverted execution + require.Equal(t, false, reverted) + require.Equal(t, abi.ChainEpoch(10), appliedH) + applied = false + + fcs.advance(0, 2, 0, nil) // (confidence=3) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + require.Equal(t, abi.ChainEpoch(9), appliedTS.Height()) + require.Equal(t, "bafkqaaa", appliedTS.Blocks()[0].Messages.String()) + require.Equal(t, abi.ChainEpoch(12), appliedH) + require.Equal(t, t0123, appliedMsg.To) + require.Equal(t, uint64(2), appliedMsg.Nonce) + require.Equal(t, abi.MethodNum(5), appliedMsg.Method) + + // revert and apply at different height + + fcs.advance(8, 6, 0, map[int]cid.Cid{ // (confidence=3) + 1: n2msg, + }) + + // TODO: We probably don't want to call revert/apply, as restarting certain + // actions may be expensive, and in this case the message is still + // on-chain, just at different height + require.Equal(t, true, applied) + require.Equal(t, true, reverted) + reverted = false + applied = false + + require.Equal(t, abi.ChainEpoch(7), appliedTS.Height()) + require.Equal(t, "bafkqaaa", appliedTS.Blocks()[0].Messages.String()) + require.Equal(t, abi.ChainEpoch(10), appliedH) + require.Equal(t, t0123, appliedMsg.To) + require.Equal(t, uint64(2), appliedMsg.Nonce) + require.Equal(t, abi.MethodNum(5), appliedMsg.Method) + + // call method again + + fcs.advance(0, 5, 0, map[int]cid.Cid{ + 0: n2msg, + }) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + // send and revert below confidence, then cross confidence + fcs.advance(0, 2, 0, map[int]cid.Cid{ + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 3}, + }, + }), + }) + + fcs.advance(2, 5, 0, nil) // H=19, but message reverted + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // test timeout (it's set to 20 in the call to `events.Called` above) + + fcs.advance(0, 6, 0, nil) + + require.Equal(t, false, applied) // not calling timeout as we received messages + require.Equal(t, false, reverted) + + // test unregistering with more + + more = false + fcs.advance(0, 5, 0, map[int]cid.Cid{ + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 4}, // this signals we don't want more + }, + }), + }) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + fcs.advance(0, 5, 0, map[int]cid.Cid{ + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 5}, + }, + }), + }) + + require.Equal(t, false, applied) // should not get any further notifications + require.Equal(t, false, reverted) + + // revert after disabled + + fcs.advance(5, 1, 0, nil) // try reverting msg sent after disabling + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + fcs.advance(5, 1, 0, nil) // try reverting msg sent before disabling + + require.Equal(t, false, applied) + require.Equal(t, true, reverted) +} + +func TestCalledTimeout(t *testing.T) { + tf.UnitTest(t) + t.Skipf("not support in timer observe mode") + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + called := false + + err = events.Called(context.Background(), func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + called = true + require.Nil(t, msg) + require.Equal(t, abi.ChainEpoch(20), ts.Height()) + require.Equal(t, abi.ChainEpoch(23), curH) + return false, nil + }, func(_ context.Context, ts *types.TipSet) error { + t.Fatal("revert on timeout") + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + fcs.advance(0, 21, 0, nil) + require.False(t, called) + + fcs.advance(0, 5, 0, nil) + require.True(t, called) + called = false + + // with check func reporting done + + fcs = newFakeCS(t) + defer fcs.stop() + + events, err = NewEvents(context.Background(), fcs) + require.NoError(t, err) + + err = events.Called(context.Background(), func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return true, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + called = true + require.Nil(t, msg) + require.Equal(t, abi.ChainEpoch(20), ts.Height()) + require.Equal(t, abi.ChainEpoch(23), curH) + return false, nil + }, func(_ context.Context, ts *types.TipSet) error { + t.Fatal("revert on timeout") + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + fcs.advance(0, 21, 0, nil) + require.False(t, called) + + fcs.advance(0, 5, 0, nil) + require.False(t, called) +} + +func TestCalledOrder(t *testing.T) { + tf.UnitTest(t) + t.Skipf("not support in timer observe mode") + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + at := 0 + + err = events.Called(context.Background(), func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + switch at { + case 0: + require.Equal(t, uint64(1), msg.Nonce) + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + case 1: + require.Equal(t, uint64(2), msg.Nonce) + require.Equal(t, abi.ChainEpoch(5), ts.Height()) + default: + t.Fatal("apply should only get called twice, at: ", at) + } + at++ + return true, nil + }, func(_ context.Context, ts *types.TipSet) error { + switch at { + case 2: + require.Equal(t, abi.ChainEpoch(5), ts.Height()) + case 3: + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + default: + t.Fatal("revert should only get called twice, at: ", at) + } + at++ + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + fcs.advance(0, 10, 0, map[int]cid.Cid{ + 1: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + }, + }), + 2: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 2}, + }, + }), + }) + + fcs.advance(9, 1, 0, nil) +} + +func TestCalledNull(t *testing.T) { + tf.UnitTest(t) + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + more := true + var applied, reverted bool + + err = events.Called(context.Background(), func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + require.Equal(t, false, applied) + applied = true + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + // create few blocks to make sure nothing get's randomly called + + fcs.advance(0, 4, 0, nil) // H=5 + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create blocks with message (but below confidence threshold) + + fcs.advance(0, 3, 0, map[int]cid.Cid{ // msg at H=6; H=8 (confidence=2) + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + }, + }), + }) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional blocks so we are above confidence threshold, but with null tipset at the height + // of application + + fcs.advance(0, 3, 0, nil, 10) // H=11 (confidence=3, apply) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + fcs.advance(5, 1, 0, nil, 10) + + require.Equal(t, false, applied) + require.Equal(t, true, reverted) +} + +func TestRemoveTriggersOnMessage(t *testing.T) { + tf.UnitTest(t) + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + more := true + var applied, reverted bool + + err = events.Called(context.Background(), func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + require.Equal(t, false, applied) + applied = true + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + // create few blocks to make sure nothing get's randomly called + + fcs.advance(0, 4, 0, nil) // H=5 + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create blocks with message (but below confidence threshold) + + fcs.advance(0, 3, 0, map[int]cid.Cid{ // msg occurs at H=5, applied at H=6; H=8 (confidence=2) + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + }, + }), + }) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // revert applied TS & message TS + fcs.advance(3, 1, 0, nil) // H=6 (tipset message applied in reverted, AND message reverted) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional blocks so we are above confidence threshold, but message not applied + // as it was reverted + fcs.advance(0, 5, 0, nil) // H=11 (confidence=3, apply) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create blocks with message again (but below confidence threshold) + + fcs.advance(0, 3, 0, map[int]cid.Cid{ // msg occurs at H=12, applied at H=13; H=15 (confidence=2) + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 2}, + }, + }), + }) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // revert applied height TS, but don't remove message trigger + fcs.advance(2, 1, 0, nil) // H=13 (tipset message applied in reverted, by tipset with message not reverted) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional blocks so we are above confidence threshold + fcs.advance(0, 4, 0, nil) // H=18 (confidence=3, apply) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) +} + +type testStateChange struct { + from string + to string +} + +func TestStateChanged(t *testing.T) { + tf.UnitTest(t) + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + more := true + var applied, reverted bool + var appliedData StateChange + var appliedOldTS *types.TipSet + var appliedNewTS *types.TipSet + var appliedH abi.ChainEpoch + var matchData StateChange + + confidence := 3 + timeout := abi.ChainEpoch(20) + + err = events.StateChanged(func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { + if data != nil { + require.Equal(t, oldTs.Key(), newTs.Parents()) + } + require.Equal(t, false, applied) + applied = true + appliedData = data + appliedOldTS = oldTs + appliedNewTS = newTs + appliedH = curH + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { + require.Equal(t, oldTs.Key(), newTs.Parents()) + if matchData == nil { + return false, matchData, nil + } + + d := matchData + matchData = nil + return true, d, nil + }) + require.NoError(t, err) + + // create few blocks to make sure nothing get's randomly called + + fcs.advance(0, 4, 0, nil) // H=5 + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create state change (but below confidence threshold) + matchData = testStateChange{from: "a", to: "b"} + fcs.advance(0, 3, 0, nil) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional block so we are above confidence threshold + + fcs.advance(0, 2, 0, nil) // H=10 (confidence=3, apply) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + // dip below confidence (should not apply again) + fcs.advance(2, 2, 0, nil) // H=10 (confidence=3, apply) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // Change happens from 5 -> 6 + require.Equal(t, abi.ChainEpoch(5), appliedOldTS.Height()) + require.Equal(t, abi.ChainEpoch(6), appliedNewTS.Height()) + + // Actually applied (with confidence) at 9 + require.Equal(t, abi.ChainEpoch(9), appliedH) + + // Make sure the state change was correctly passed through + rcvd := appliedData.(testStateChange) + require.Equal(t, "a", rcvd.from) + require.Equal(t, "b", rcvd.to) +} + +func TestStateChangedRevert(t *testing.T) { + tf.UnitTest(t) + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + more := true + var applied, reverted bool + var matchData StateChange + + confidence := 1 + timeout := abi.ChainEpoch(20) + + err = events.StateChanged(func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { + if data != nil { + require.Equal(t, oldTs.Key(), newTs.Parents()) + } + require.Equal(t, false, applied) + applied = true + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { + require.Equal(t, oldTs.Key(), newTs.Parents()) + + if matchData == nil { + return false, matchData, nil + } + + d := matchData + matchData = nil + return true, d, nil + }) + require.NoError(t, err) + + fcs.advance(0, 2, 0, nil) // H=3 + + // Make a state change from TS at height 3 to TS at height 4 + matchData = testStateChange{from: "a", to: "b"} + fcs.advance(0, 1, 0, nil) // H=4 + + // Haven't yet reached confidence + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // Advance to reach confidence level + fcs.advance(0, 1, 0, nil) // H=5 + + // Should now have called the handler + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + // Advance 3 more TS + fcs.advance(0, 3, 0, nil) // H=8 + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + t.Skipf("not support following tests in timer observe mode") + // Regress but not so far as to cause a revert + fcs.advance(3, 1, 0, nil) // H=6 + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // Regress back to state where change happened + fcs.advance(3, 1, 0, nil) // H=4 + + // Expect revert to have happened + require.Equal(t, false, applied) + require.Equal(t, true, reverted) +} + +func TestStateChangedTimeout(t *testing.T) { + tf.UnitTest(t) + t.Skipf("not support in timer observe mode") + + timeoutHeight := abi.ChainEpoch(20) + confidence := 3 + + testCases := []struct { + name string + checkFn CheckFunc + nilBlocks []int + expectTimeout bool + }{{ + // Verify that the state changed timeout is called at the expected height + name: "state changed timeout", + checkFn: func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, + expectTimeout: true, + }, { + // Verify that the state changed timeout is called even if the timeout + // falls on nil block + name: "state changed timeout falls on nil block", + checkFn: func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, + nilBlocks: []int{20, 21, 22, 23}, + expectTimeout: true, + }, { + // Verify that the state changed timeout is not called if the check + // function reports that it's complete + name: "no timeout callback if check func reports done", + checkFn: func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return true, true, nil + }, + expectTimeout: false, + }} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + // Track whether the callback was called + called := false + + // Set up state change tracking that will timeout at the given height + err = events.StateChanged( + tc.checkFn, + func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { + // Expect the callback to be called at the timeout height with nil data + called = true + require.Nil(t, data) + require.Equal(t, timeoutHeight, newTs.Height()) + require.Equal(t, timeoutHeight+abi.ChainEpoch(confidence), curH) + return false, nil + }, func(_ context.Context, ts *types.TipSet) error { + t.Fatal("revert on timeout") + return nil + }, confidence, timeoutHeight, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { + return false, nil, nil + }) + + require.NoError(t, err) + + // Advance to timeout height + fcs.advance(0, int(timeoutHeight)+1, 0, nil) + require.False(t, called) + + // Advance past timeout height + fcs.advance(0, 5, 0, nil, tc.nilBlocks...) + require.Equal(t, tc.expectTimeout, called) + called = false + }) + } +} + +func TestCalledMultiplePerEpoch(t *testing.T) { + tf.UnitTest(t) + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + at := 0 + + err = events.Called(context.Background(), func(ctx context.Context, ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + switch at { + case 0: + require.Equal(t, uint64(1), msg.Nonce) + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + case 1: + require.Equal(t, uint64(2), msg.Nonce) + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + default: + t.Fatal("apply should only get called twice, at: ", at) + } + at++ + return true, nil + }, func(_ context.Context, ts *types.TipSet) error { + switch at { + case 2: + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + case 3: + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + default: + t.Fatal("revert should only get called twice, at: ", at) + } + at++ + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + fcs.advance(0, 10, 0, map[int]cid.Cid{ + 1: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + {To: t0123, From: t0123, Method: 5, Nonce: 2}, + }, + }), + }) + + fcs.advance(9, 1, 0, nil) +} + +func TestCachedSameBlock(t *testing.T) { + tf.UnitTest(t) + fcs := newFakeCS(t) + defer fcs.stop() + + _, err := NewEvents(context.Background(), fcs) + require.NoError(t, err) + + fcs.advance(0, 10, 0, map[int]cid.Cid{}) + assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 20, "expect call ChainGetBlockMessages %d but got ", 20, fcs.callNumber["ChainGetBlockMessages"]) + + fcs.advance(5, 10, 0, map[int]cid.Cid{}) + assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 30, "expect call ChainGetBlockMessages %d but got ", 30, fcs.callNumber["ChainGetBlockMessages"]) +} + +// nolint +type testObserver struct { + t *testing.T + head *types.TipSet +} + +// nolint +func (t *testObserver) Apply(_ context.Context, from, to *types.TipSet) error { + if t.head != nil { + require.True(t.t, t.head.Equals(from)) + } + t.head = to + return nil +} + +// nolint +func (t *testObserver) Revert(_ context.Context, from, to *types.TipSet) error { + if t.head != nil { + require.True(t.t, t.head.Equals(from)) + } + t.head = to + return nil +} + +func TestReconnect(t *testing.T) { + tf.UnitTest(t) + t.Skipf("not support in timer observe mode") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fcs := newFakeCS(t) + defer fcs.stop() + + events, err := NewEvents(ctx, fcs) + require.NoError(t, err) + + fcs.advance(0, 1, 0, nil) + + events.Observe(&testObserver{t: t}) + + fcs.advance(0, 3, 0, nil) + + // Drop on apply + fcs.advance(0, 6, 2, nil) + require.True(t, fcs.callNumber["ChainGetPath"] == 1) + + // drop across revert/apply boundary + fcs.advance(4, 2, 3, nil) + require.True(t, fcs.callNumber["ChainGetPath"] == 2) + fcs.advance(0, 6, 0, nil) + + // drop on revert + fcs.advance(3, 0, 2, nil) + require.True(t, fcs.callNumber["ChainGetPath"] == 3) + + // drop with nulls + fcs.advance(0, 5, 2, nil, 0, 1, 3) + require.True(t, fcs.callNumber["ChainGetPath"] == 4) +} + +func TestUnregister(t *testing.T) { + tf.UnitTest(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fcs := newFakeCS(t) + + events, err := NewEvents(ctx, fcs) + require.NoError(t, err) + + tsObs := &testObserver{t: t} + events.Observe(tsObs) + + // observer receives heads as the chain advances + fcs.advance(0, 1, 0, nil) + headBeforeDeregister := events.lastTS + require.Equal(t, tsObs.head, headBeforeDeregister) + + // observer unregistered successfully + found := events.Unregister(tsObs) + require.True(t, found) + + // observer stops receiving heads as the chain advances + fcs.advance(0, 1, 0, nil) + require.Equal(t, tsObs.head, headBeforeDeregister) + require.NotEqual(t, tsObs.head, events.lastTS) + + // unregistering an invalid observer returns false + dneObs := &testObserver{t: t} + found = events.Unregister(dneObs) + require.False(t, found) +} diff --git a/pkg/events/message_cache.go b/pkg/events/message_cache.go new file mode 100644 index 0000000000..fb8b33a0a2 --- /dev/null +++ b/pkg/events/message_cache.go @@ -0,0 +1,43 @@ +package events + +import ( + "context" + "sync" + + "github.com/filecoin-project/venus/venus-shared/types" + + lru "github.com/hashicorp/golang-lru" + "github.com/ipfs/go-cid" +) + +type messageCache struct { + api IEvent + + blockMsgLk sync.Mutex + blockMsgCache *lru.ARCCache +} + +func newMessageCache(api IEvent) *messageCache { + blsMsgCache, _ := lru.NewARC(500) + + return &messageCache{ + api: api, + blockMsgCache: blsMsgCache, + } +} + +func (c *messageCache) ChainGetBlockMessages(ctx context.Context, blkCid cid.Cid) (*types.BlockMessages, error) { + c.blockMsgLk.Lock() + defer c.blockMsgLk.Unlock() + + msgsI, ok := c.blockMsgCache.Get(blkCid) + var err error + if !ok { + msgsI, err = c.api.ChainGetBlockMessages(ctx, blkCid) + if err != nil { + return nil, err + } + c.blockMsgCache.Add(blkCid, msgsI) + } + return msgsI.(*types.BlockMessages), nil +} diff --git a/pkg/events/observer.go b/pkg/events/observer.go new file mode 100644 index 0000000000..564dd8c70d --- /dev/null +++ b/pkg/events/observer.go @@ -0,0 +1,251 @@ +package events + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "go.opencensus.io/trace" + + "github.com/filecoin-project/venus/pkg/constants" +) + +var ObserveDuration = time.Second * 45 + +type observer struct { + api IEvent + + gcConfidence abi.ChainEpoch + + ready chan struct{} + + lk sync.Mutex + head *types.TipSet + maxHeight abi.ChainEpoch + observers []TipSetObserver +} + +func newObserver(api *cache, gcConfidence abi.ChainEpoch) *observer { + obs := &observer{ + api: api, + gcConfidence: gcConfidence, + + ready: make(chan struct{}), + observers: []TipSetObserver{}, + } + // obs.Observe(api.observer()) todo has ignore cache + return obs +} + +func (o *observer) start(ctx context.Context) error { + go o.listenHeadChanges(ctx) + + // Wait for the first tipset to be seen or bail if shutting down + select { + case <-o.ready: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (o *observer) listenHeadChanges(ctx context.Context) { + for { + if err := o.listenHeadChangesOnce(ctx); err != nil { + log.Errorf("listen head changes errored: %s", err) + } else { + log.Debugf("listenHeadChanges quit") + } + + select { + case <-constants.Clock.After(ObserveDuration): + case <-ctx.Done(): + log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err()) + return + } + + log.Debugf("restarting listenHeadChanges") + } +} + +func (o *observer) listenHeadChangesOnce(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + curHead, err := o.api.ChainHead(ctx) + if err != nil { + return fmt.Errorf("listenHeadChanges ChainHead call failed: %w", err) + } + + o.lk.Lock() + if o.head == nil { + o.head = curHead + close(o.ready) + } + startHead := o.head + o.lk.Unlock() + + if startHead != nil && !startHead.Equals(curHead) { + changes, err := o.api.ChainGetPath(ctx, startHead.Key(), curHead.Key()) + if err != nil { + return fmt.Errorf("failed to get path from last applied tipset to head: %w", err) + } + + if err := o.applyChanges(ctx, changes); err != nil { + return fmt.Errorf("failed catch-up head changes: %w", err) + } + } + + return nil +} + +func (o *observer) applyChanges(ctx context.Context, changes []*types.HeadChange) error { + // Used to wait for a prior notification round to finish (by tests) + if len(changes) == 0 { + return nil + } + + var rev, app []*types.TipSet + for _, changes := range changes { + switch changes.Type { + case types.HCRevert: + rev = append(rev, changes.Val) + case types.HCApply: + app = append(app, changes.Val) + default: + log.Errorf("unexpected head change notification type: '%s'", changes.Type) + } + } + + if err := o.headChange(ctx, rev, app); err != nil { + return fmt.Errorf("failed to apply head changes: %w", err) + } + return nil +} + +func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) error { + ctx, span := trace.StartSpan(ctx, "events.HeadChange") + span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) + span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) + + o.lk.Lock() + head := o.head + o.lk.Unlock() + + defer func() { + span.AddAttributes(trace.Int64Attribute("endHeight", int64(head.Height()))) + span.End() + }() + + // NOTE: bailing out here if the head isn't what we expected is fine. We'll re-start the + // entire process and handle any strange reorgs. + for i, from := range rev { + if !from.Equals(head) { + return fmt.Errorf( + "expected to revert %s (%d), reverting %s (%d)", + head.Key(), head.Height(), from.Key(), from.Height(), + ) + } + var to *types.TipSet + if i+1 < len(rev) { + // If we have more reverts, the next revert is the next head. + to = rev[i+1] + } else { + // At the end of the revert sequenece, we need to lookup the joint tipset + // between the revert sequence and the apply sequence. + var err error + to, err = o.api.ChainGetTipSet(ctx, from.Parents()) + if err != nil { + // Well, this sucks. We'll bail and restart. + return fmt.Errorf("failed to get tipset when reverting due to a SetHeead: %w", err) + } + } + + // Get the current observers and atomically set the head. + // + // 1. We need to get the observers every time in case some registered/deregistered. + // 2. We need to atomically set the head so new observers don't see events twice or + // skip them. + o.lk.Lock() + observers := o.observers + o.head = to + o.lk.Unlock() + + for _, obs := range observers { + if err := obs.Revert(ctx, from, to); err != nil { + log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) + } + } + + if to.Height() < o.maxHeight-o.gcConfidence { + log.Errorf("reverted past finality, from %d to %d", o.maxHeight, to.Height()) + } + + head = to + } + + for _, to := range app { + if to.Parents() != head.Key() { + return fmt.Errorf( + "cannot apply %s (%d) with parents %s on top of %s (%d)", + to.Key(), to.Height(), to.Parents(), head.Key(), head.Height(), + ) + } + + o.lk.Lock() + observers := o.observers + o.head = to + o.lk.Unlock() + + for _, obs := range observers { + if err := obs.Apply(ctx, head, to); err != nil { + log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) + } + } + if to.Height() > o.maxHeight { + o.maxHeight = to.Height() + } + + head = to + } + return nil +} + +// Observe registers the observer, and returns the current tipset. The observer is guaranteed to +// observe events starting at this tipset. +// +// Returns nil if the observer hasn't started yet (but still registers). +func (o *observer) Observe(obs TipSetObserver) *types.TipSet { + o.lk.Lock() + defer o.lk.Unlock() + o.observers = append(o.observers, obs) + return o.head +} + +// Unregister unregisters an observer. Returns true if we successfully removed the observer. +// +// NOTE: The observer _may_ be called after being removed. Observers MUST handle this case +// internally. +func (o *observer) Unregister(obs TipSetObserver) (found bool) { + o.lk.Lock() + defer o.lk.Unlock() + // We _copy_ the observers list because we may be concurrently reading it from a headChange + // handler. + // + // This should happen infrequently, so it's fine if we spend a bit of time here. + newObservers := make([]TipSetObserver, 0, len(o.observers)) + for _, existingObs := range o.observers { + if existingObs == obs { + found = true + continue + } + newObservers = append(newObservers, existingObs) + } + + o.observers = newObservers + return found +} diff --git a/pkg/events/state/ctxstore.go b/pkg/events/state/ctxstore.go new file mode 100644 index 0000000000..a942315e20 --- /dev/null +++ b/pkg/events/state/ctxstore.go @@ -0,0 +1,29 @@ +package state + +import ( + "context" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" +) + +// nolint +type contextStore struct { + ctx context.Context + cst *cbor.BasicIpldStore +} + +// nolint +func (cs *contextStore) Context() context.Context { + return cs.ctx +} + +// nolint +func (cs *contextStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { + return cs.cst.Get(ctx, c, out) +} + +// nolint +func (cs *contextStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { + return cs.cst.Put(ctx, v) +} diff --git a/pkg/events/state/fastapi.go b/pkg/events/state/fastapi.go new file mode 100644 index 0000000000..a4a47f023d --- /dev/null +++ b/pkg/events/state/fastapi.go @@ -0,0 +1,31 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type FastChainAPI interface { + ChainAPI + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +type fastAPI struct { + FastChainAPI +} + +func WrapFastAPI(api FastChainAPI) ChainAPI { + return &fastAPI{ + api, + } +} + +func (a *fastAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + ts, err := a.FastChainAPI.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + return a.FastChainAPI.StateGetActor(ctx, actor, ts.Parents()) +} diff --git a/pkg/events/state/mock/api.go b/pkg/events/state/mock/api.go new file mode 100644 index 0000000000..b837b5ad07 --- /dev/null +++ b/pkg/events/state/mock/api.go @@ -0,0 +1,71 @@ +package test + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/go-address" + + "github.com/ipfs/go-cid" + + blockstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type MockAPI struct { + bs blockstore.Blockstore + + lk sync.Mutex + ts map[types.TipSetKey]*types.Actor + stateGetActorCalled int +} + +func NewMockAPI(bs blockstore.Blockstore) *MockAPI { + return &MockAPI{ + bs: bs, + ts: make(map[types.TipSetKey]*types.Actor), + } +} + +func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return m.bs.Has(ctx, c) +} + +func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + blk, err := m.bs.Get(ctx, c) + if err != nil { + return nil, fmt.Errorf("blockstore get: %w", err) + } + + return blk.RawData(), nil +} + +func (m *MockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + m.lk.Lock() + defer m.lk.Unlock() + + m.stateGetActorCalled++ + return m.ts[tsk], nil +} + +func (m *MockAPI) StateGetActorCallCount() int { + m.lk.Lock() + defer m.lk.Unlock() + + return m.stateGetActorCalled +} + +func (m *MockAPI) ResetCallCounts() { + m.lk.Lock() + defer m.lk.Unlock() + + m.stateGetActorCalled = 0 +} + +func (m *MockAPI) SetActor(tsk types.TipSetKey, act *types.Actor) { + m.lk.Lock() + defer m.lk.Unlock() + + m.ts[tsk] = act +} diff --git a/pkg/events/state/mock/state.go b/pkg/events/state/mock/state.go new file mode 100644 index 0000000000..bac06b59fc --- /dev/null +++ b/pkg/events/state/mock/state.go @@ -0,0 +1,32 @@ +package test + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + "github.com/stretchr/testify/require" +) + +func CreateEmptyMarketState(t *testing.T, store adt.Store) *market.State { + emptyArrayCid, err := adt.MakeEmptyArray(store).Root() + require.NoError(t, err) + emptyMap, err := adt.MakeEmptyMap(store).Root() + require.NoError(t, err) + return market.ConstructState(emptyArrayCid, emptyMap, emptyMap) +} + +func CreateDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market.DealState) cid.Cid { + root := adt.MakeEmptyArray(store) + for dealID, dealState := range deals { + err := root.Set(uint64(dealID), dealState) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} diff --git a/pkg/events/state/mock/tipset.go b/pkg/events/state/mock/tipset.go new file mode 100644 index 0000000000..abfb1c68c8 --- /dev/null +++ b/pkg/events/state/mock/tipset.go @@ -0,0 +1,27 @@ +package test + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +var dummyCid cid.Cid + +func init() { + dummyCid, _ = cid.Parse("bafkqaaa") +} + +func MockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) { + return types.NewTipSet([]*types.BlockHeader{{ + Miner: minerAddr, + Height: 5, + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + Timestamp: timestamp, + }}) +} diff --git a/pkg/events/state/predicates.go b/pkg/events/state/predicates.go new file mode 100644 index 0000000000..55031d280e --- /dev/null +++ b/pkg/events/state/predicates.go @@ -0,0 +1,434 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/venus/venus-shared/blockstore" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// UserData is the data returned from the DiffTipSetKeyFunc +type UserData interface{} + +// ChainAPI abstracts out calls made by this class to external APIs +type ChainAPI interface { + blockstore.ChainIO + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) +} + +// StatePredicates has common predicates for responding to state changes +type StatePredicates struct { //nolint + api ChainAPI + cst *cbor.BasicIpldStore +} + +func NewStatePredicates(api ChainAPI) *StatePredicates { + return &StatePredicates{ + api: api, + cst: cbor.NewCborStore(blockstore.NewAPIBlockstore(api)), + } +} + +// DiffTipSetKeyFunc check if there's a change form oldState to newState, and returns +// - changed: was there a change +// - user: user-defined data representing the state change +// - err +type DiffTipSetKeyFunc func(ctx context.Context, oldState, newState types.TipSetKey) (changed bool, user UserData, err error) + +type DiffActorStateFunc func(ctx context.Context, oldActorState *types.Actor, newActorState *types.Actor) (changed bool, user UserData, err error) + +// OnActorStateChanged calls diffStateFunc when the state changes for the given actor +func (sp *StatePredicates) OnActorStateChanged(addr address.Address, diffStateFunc DiffActorStateFunc) DiffTipSetKeyFunc { + return func(ctx context.Context, oldState, newState types.TipSetKey) (changed bool, user UserData, err error) { + oldActor, err := sp.api.StateGetActor(ctx, addr, oldState) + if err != nil { + return false, nil, err + } + newActor, err := sp.api.StateGetActor(ctx, addr, newState) + if err != nil { + return false, nil, err + } + + if oldActor.Head.Equals(newActor.Head) { + return false, nil, nil + } + return diffStateFunc(ctx, oldActor, newActor) + } +} + +type DiffStorageMarketStateFunc func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) + +// OnStorageMarketActorChanged calls diffStorageMarketState when the state changes for the market actor +func (sp *StatePredicates) OnStorageMarketActorChanged(diffStorageMarketState DiffStorageMarketStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(market.Address, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := market.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := market.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffStorageMarketState(ctx, oldState, newState) + }) +} + +type BalanceTables struct { + EscrowTable market.BalanceTable + LockedTable market.BalanceTable +} + +// DiffBalanceTablesFunc compares two balance tables +type DiffBalanceTablesFunc func(ctx context.Context, oldBalanceTable, newBalanceTable BalanceTables) (changed bool, user UserData, err error) + +// OnBalanceChanged runs when the escrow table for available balances changes +func (sp *StatePredicates) OnBalanceChanged(diffBalances DiffBalanceTablesFunc) DiffStorageMarketStateFunc { + return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) { + bc, err := oldState.BalancesChanged(newState) + if err != nil { + return false, nil, err + } + + if !bc { + return false, nil, nil + } + + oldEscrowRoot, err := oldState.EscrowTable() + if err != nil { + return false, nil, err + } + + oldLockedRoot, err := oldState.LockedTable() + if err != nil { + return false, nil, err + } + + newEscrowRoot, err := newState.EscrowTable() + if err != nil { + return false, nil, err + } + + newLockedRoot, err := newState.LockedTable() + if err != nil { + return false, nil, err + } + + return diffBalances(ctx, BalanceTables{oldEscrowRoot, oldLockedRoot}, BalanceTables{newEscrowRoot, newLockedRoot}) + } +} + +type ( + DiffDealStatesFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot market.DealStates) (changed bool, user UserData, err error) + DiffDealProposalsFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot market.DealProposals) (changed bool, user UserData, err error) + DiffAdtArraysFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot adt.Array) (changed bool, user UserData, err error) +) + +// OnDealStateChanged calls diffDealStates when the market deal state changes +func (sp *StatePredicates) OnDealStateChanged(diffDealStates DiffDealStatesFunc) DiffStorageMarketStateFunc { + return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) { + sc, err := oldState.StatesChanged(newState) + if err != nil { + return false, nil, err + } + + if !sc { + return false, nil, nil + } + + oldRoot, err := oldState.States() + if err != nil { + return false, nil, err + } + newRoot, err := newState.States() + if err != nil { + return false, nil, err + } + + return diffDealStates(ctx, oldRoot, newRoot) + } +} + +// OnDealProposalChanged calls diffDealProps when the market proposal state changes +func (sp *StatePredicates) OnDealProposalChanged(diffDealProps DiffDealProposalsFunc) DiffStorageMarketStateFunc { + return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) { + pc, err := oldState.ProposalsChanged(newState) + if err != nil { + return false, nil, err + } + + if !pc { + return false, nil, nil + } + + oldRoot, err := oldState.Proposals() + if err != nil { + return false, nil, err + } + newRoot, err := newState.Proposals() + if err != nil { + return false, nil, err + } + + return diffDealProps(ctx, oldRoot, newRoot) + } +} + +// OnDealProposalAmtChanged detects changes in the deal proposal AMT for all deal proposals and returns a MarketProposalsChanges structure containing: +// - Added Proposals +// - Modified Proposals +// - Removed Proposals +func (sp *StatePredicates) OnDealProposalAmtChanged() DiffDealProposalsFunc { + return func(ctx context.Context, oldDealProps, newDealProps market.DealProposals) (changed bool, user UserData, err error) { + proposalChanges, err := market.DiffDealProposals(oldDealProps, newDealProps) + if err != nil { + return false, nil, err + } + + if len(proposalChanges.Added)+len(proposalChanges.Removed) == 0 { + return false, nil, nil + } + + return true, proposalChanges, nil + } +} + +// OnDealStateAmtChanged detects changes in the deal state AMT for all deal states and returns a MarketDealStateChanges structure containing: +// - Added Deals +// - Modified Deals +// - Removed Deals +func (sp *StatePredicates) OnDealStateAmtChanged() DiffDealStatesFunc { + return func(ctx context.Context, oldDealStates, newDealStates market.DealStates) (changed bool, user UserData, err error) { + dealStateChanges, err := market.DiffDealStates(oldDealStates, newDealStates) + if err != nil { + return false, nil, err + } + + if len(dealStateChanges.Added)+len(dealStateChanges.Modified)+len(dealStateChanges.Removed) == 0 { + return false, nil, nil + } + + return true, dealStateChanges, nil + } +} + +// ChangedDeals is a set of changes to deal state +type ChangedDeals map[abi.DealID]market.DealStateChange + +// DealStateChangedForIDs detects changes in the deal state AMT for the given deal IDs +func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDealStatesFunc { + return func(ctx context.Context, oldDealStates, newDealStates market.DealStates) (changed bool, user UserData, err error) { + changedDeals := make(ChangedDeals) + for _, dealID := range dealIds { + + // If the deal has been removed, we just set it to nil + oldDeal, oldFound, err := oldDealStates.Get(dealID) + if err != nil { + return false, nil, err + } + + newDeal, newFound, err := newDealStates.Get(dealID) + if err != nil { + return false, nil, err + } + + existenceChanged := oldFound != newFound + valueChanged := (oldFound && newFound) && *oldDeal != *newDeal + if existenceChanged || valueChanged { + changedDeals[dealID] = market.DealStateChange{ID: dealID, From: oldDeal, To: newDeal} + } + } + if len(changedDeals) > 0 { + return true, changedDeals, nil + } + return false, nil, nil + } +} + +// ChangedBalances is a set of changes to deal state +type ChangedBalances map[address.Address]BalanceChange + +// BalanceChange is a change in balance from -> to +type BalanceChange struct { + From abi.TokenAmount + To abi.TokenAmount +} + +// AvailableBalanceChangedForAddresses detects changes in the escrow table for the given addresses +func (sp *StatePredicates) AvailableBalanceChangedForAddresses(getAddrs func() []address.Address) DiffBalanceTablesFunc { + return func(ctx context.Context, oldBalances, newBalances BalanceTables) (changed bool, user UserData, err error) { + changedBalances := make(ChangedBalances) + addrs := getAddrs() + for _, addr := range addrs { + // If the deal has been removed, we just set it to nil + oldEscrowBalance, err := oldBalances.EscrowTable.Get(addr) + if err != nil { + return false, nil, err + } + + oldLockedBalance, err := oldBalances.LockedTable.Get(addr) + if err != nil { + return false, nil, err + } + + oldBalance := big.Sub(oldEscrowBalance, oldLockedBalance) + + newEscrowBalance, err := newBalances.EscrowTable.Get(addr) + if err != nil { + return false, nil, err + } + + newLockedBalance, err := newBalances.LockedTable.Get(addr) + if err != nil { + return false, nil, err + } + + newBalance := big.Sub(newEscrowBalance, newLockedBalance) + + if !oldBalance.Equals(newBalance) { + changedBalances[addr] = BalanceChange{oldBalance, newBalance} + } + } + if len(changedBalances) > 0 { + return true, changedBalances, nil + } + return false, nil, nil + } +} + +type DiffMinerActorStateFunc func(ctx context.Context, oldState miner.State, newState miner.State) (changed bool, user UserData, err error) + +func (sp *StatePredicates) OnInitActorChange(diffInitActorState DiffInitActorStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(init_.Address, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := init_.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := init_.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffInitActorState(ctx, oldState, newState) + }) +} + +func (sp *StatePredicates) OnMinerActorChange(minerAddr address.Address, diffMinerActorState DiffMinerActorStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(minerAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := miner.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := miner.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffMinerActorState(ctx, oldState, newState) + }) +} + +func (sp *StatePredicates) OnMinerSectorChange() DiffMinerActorStateFunc { + return func(ctx context.Context, oldState, newState miner.State) (changed bool, user UserData, err error) { + sectorChanges, err := miner.DiffSectors(oldState, newState) + if err != nil { + return false, nil, err + } + // nothing changed + if len(sectorChanges.Added)+len(sectorChanges.Extended)+len(sectorChanges.Removed) == 0 { + return false, nil, nil + } + + return true, sectorChanges, nil + } +} + +func (sp *StatePredicates) OnMinerPreCommitChange() DiffMinerActorStateFunc { + return func(ctx context.Context, oldState, newState miner.State) (changed bool, user UserData, err error) { + precommitChanges, err := miner.DiffPreCommits(oldState, newState) + if err != nil { + return false, nil, err + } + + if len(precommitChanges.Added)+len(precommitChanges.Removed) == 0 { + return false, nil, nil + } + + return true, precommitChanges, nil + } +} + +// DiffPaymentChannelStateFunc is function that compares two states for the payment channel +type DiffPaymentChannelStateFunc func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error) + +// OnPaymentChannelActorChanged calls diffPaymentChannelState when the state changes for the the payment channel actor +func (sp *StatePredicates) OnPaymentChannelActorChanged(paychAddr address.Address, diffPaymentChannelState DiffPaymentChannelStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := paych.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := paych.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffPaymentChannelState(ctx, oldState, newState) + }) +} + +// PayChToSendChange is a difference in the amount to send on a payment channel when the money is collected +type PayChToSendChange struct { + OldToSend abi.TokenAmount + NewToSend abi.TokenAmount +} + +// OnToSendAmountChanges monitors changes on the total amount to send from one party to the other on a payment channel +func (sp *StatePredicates) OnToSendAmountChanges() DiffPaymentChannelStateFunc { + return func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error) { + ots, err := oldState.ToSend() + if err != nil { + return false, nil, err + } + + nts, err := newState.ToSend() + if err != nil { + return false, nil, err + } + + if ots.Equals(nts) { + return false, nil, nil + } + return true, &PayChToSendChange{ + OldToSend: ots, + NewToSend: nts, + }, nil + } +} + +type AddressPair struct { + ID address.Address + PK address.Address +} + +type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error) + +func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc { + return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) { + addressChanges, err := init_.DiffAddressMap(oldState, newState) + if err != nil { + return false, nil, err + } + if len(addressChanges.Added)+len(addressChanges.Modified)+len(addressChanges.Removed) == 0 { + return false, nil, nil + } + return true, addressChanges, nil + } +} diff --git a/pkg/events/state/predicates_test.go b/pkg/events/state/predicates_test.go new file mode 100644 index 0000000000..877b376d95 --- /dev/null +++ b/pkg/events/state/predicates_test.go @@ -0,0 +1,577 @@ +package state + +import ( + "context" + "testing" + + test "github.com/filecoin-project/venus/pkg/events/state/mock" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/ipfs/go-cid" + cbornode "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + tutils "github.com/filecoin-project/specs-actors/v6/support/testing" +) + +var dummyCid cid.Cid + +func init() { + dummyCid, _ = cid.Parse("bafkqaaa") +} + +func TestMarketPredicates(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + bs := bstore.NewTemporarySync() + store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) + + oldDeal1 := &market2.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + SlashEpoch: 0, + } + oldDeal2 := &market2.DealState{ + SectorStartEpoch: 4, + LastUpdatedEpoch: 5, + SlashEpoch: 0, + } + oldDeals := map[abi.DealID]*market2.DealState{ + abi.DealID(1): oldDeal1, + abi.DealID(2): oldDeal2, + } + + oldProp1 := &market2.DealProposal{ + PieceCID: dummyCid, + PieceSize: 0, + VerifiedDeal: false, + Client: tutils.NewIDAddr(t, 1), + Provider: tutils.NewIDAddr(t, 1), + StartEpoch: 1, + EndEpoch: 2, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + oldProp2 := &market2.DealProposal{ + PieceCID: dummyCid, + PieceSize: 0, + VerifiedDeal: false, + Client: tutils.NewIDAddr(t, 1), + Provider: tutils.NewIDAddr(t, 1), + StartEpoch: 2, + EndEpoch: 3, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + oldProps := map[abi.DealID]*market2.DealProposal{ + abi.DealID(1): oldProp1, + abi.DealID(2): oldProp2, + } + + oldBalances := map[address.Address]balance{ + tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)}, + tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, + tutils.NewIDAddr(t, 3): {abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)}, + tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)}, + } + + oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances) + + newDeal1 := &market2.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 3, + SlashEpoch: 0, + } + + // deal 2 removed + + // added + newDeal3 := &market2.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + SlashEpoch: 3, + } + newDeals := map[abi.DealID]*market2.DealState{ + abi.DealID(1): newDeal1, + // deal 2 was removed + abi.DealID(3): newDeal3, + } + + // added + newProp3 := &market2.DealProposal{ + PieceCID: dummyCid, + PieceSize: 0, + VerifiedDeal: false, + Client: tutils.NewIDAddr(t, 1), + Provider: tutils.NewIDAddr(t, 1), + StartEpoch: 4, + EndEpoch: 4, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + newProps := map[abi.DealID]*market2.DealProposal{ + abi.DealID(1): oldProp1, // 1 was persisted + // prop 2 was removed + abi.DealID(3): newProp3, // new + // NB: DealProposals cannot be modified, so don't test that case. + } + newBalances := map[address.Address]balance{ + tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(3000), abi.NewTokenAmount(0)}, + tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, + tutils.NewIDAddr(t, 4): {abi.NewTokenAmount(5000), abi.NewTokenAmount(0)}, + tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)}, + } + + newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances) + + minerAddr, err := address.NewFromString("t00") + require.NoError(t, err) + oldState, err := test.MockTipset(minerAddr, 1) + require.NoError(t, err) + newState, err := test.MockTipset(minerAddr, 2) + require.NoError(t, err) + + api := test.NewMockAPI(bs) + api.SetActor(oldState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: oldStateC}) + api.SetActor(newState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: newStateC}) + + t.Run("deal ID predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + + dealIds := []abi.DealID{abi.DealID(1), abi.DealID(2)} + diffIDFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(dealIds))) + + // Diff a state against itself: expect no change + changed, _, err := diffIDFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Diff old state against new state + changed, valIDs, err := diffIDFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedDealIDs, ok := valIDs.(ChangedDeals) + require.True(t, ok) + require.Len(t, changedDealIDs, 2) + require.Contains(t, changedDealIDs, abi.DealID(1)) + require.Contains(t, changedDealIDs, abi.DealID(2)) + deal1 := changedDealIDs[abi.DealID(1)] + if deal1.From.LastUpdatedEpoch != 2 || deal1.To.LastUpdatedEpoch != 3 { + t.Fatal("Unexpected change to LastUpdatedEpoch") + } + deal2 := changedDealIDs[abi.DealID(2)] + if deal2.From.LastUpdatedEpoch != 5 || deal2.To != nil { + t.Fatal("Expected To to be nil") + } + + // Diff with non-existent deal. + noDeal := []abi.DealID{4} + diffNoDealFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(noDeal))) + changed, _, err = diffNoDealFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Test that OnActorStateChanged does not call the callback if the state has not changed + mockAddr, err := address.NewFromString("t01") + require.NoError(t, err) + actorDiffFn := preds.OnActorStateChanged(mockAddr, func(context.Context, *types.Actor, *types.Actor) (bool, UserData, error) { + t.Fatal("No state change so this should not be called") + return false, nil, nil + }) + changed, _, err = actorDiffFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Test that OnDealStateChanged does not call the callback if the state has not changed + diffDealStateFn := preds.OnDealStateChanged(func(context.Context, market.DealStates, market.DealStates) (bool, UserData, error) { + t.Fatal("No state change so this should not be called") + return false, nil, nil + }) + marketState0 := test.CreateEmptyMarketState(t, store) + marketCid, err := store.Put(ctx, marketState0) + require.NoError(t, err) + marketState, err := market.Load(store, &types.Actor{ + Code: builtin2.StorageMarketActorCodeID, + Head: marketCid, + }) + require.NoError(t, err) + changed, _, err = diffDealStateFn(ctx, marketState, marketState) + require.NoError(t, err) + require.False(t, changed) + }) + + t.Run("deal state array predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + diffArrFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.OnDealStateAmtChanged())) + + changed, _, err := diffArrFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + changed, valArr, err := diffArrFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedDeals, ok := valArr.(*market.DealStateChanges) + require.True(t, ok) + require.Len(t, changedDeals.Added, 1) + require.Equal(t, abi.DealID(3), changedDeals.Added[0].ID) + require.True(t, dealEquality(*newDeal3, changedDeals.Added[0].Deal)) + + require.Len(t, changedDeals.Removed, 1) + + require.Len(t, changedDeals.Modified, 1) + require.Equal(t, abi.DealID(1), changedDeals.Modified[0].ID) + require.True(t, dealEquality(*newDeal1, *changedDeals.Modified[0].To)) + require.True(t, dealEquality(*oldDeal1, *changedDeals.Modified[0].From)) + + require.Equal(t, abi.DealID(2), changedDeals.Removed[0].ID) + }) + + t.Run("deal proposal array predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + diffArrFn := preds.OnStorageMarketActorChanged(preds.OnDealProposalChanged(preds.OnDealProposalAmtChanged())) + changed, _, err := diffArrFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + changed, valArr, err := diffArrFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedProps, ok := valArr.(*market.DealProposalChanges) + require.True(t, ok) + require.Len(t, changedProps.Added, 1) + require.Equal(t, abi.DealID(3), changedProps.Added[0].ID) + + // proposals cannot be modified -- no modified testing + + require.Len(t, changedProps.Removed, 1) + require.Equal(t, abi.DealID(2), changedProps.Removed[0].ID) + }) + + t.Run("balances predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + + getAddresses := func() []address.Address { + return []address.Address{tutils.NewIDAddr(t, 1), tutils.NewIDAddr(t, 2), tutils.NewIDAddr(t, 3), tutils.NewIDAddr(t, 4)} + } + diffBalancesFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(getAddresses))) + + // Diff a state against itself: expect no change + changed, _, err := diffBalancesFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Diff old state against new state + changed, valIDs, err := diffBalancesFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedBalances, ok := valIDs.(ChangedBalances) + require.True(t, ok) + require.Len(t, changedBalances, 3) + require.Contains(t, changedBalances, tutils.NewIDAddr(t, 1)) + require.Contains(t, changedBalances, tutils.NewIDAddr(t, 3)) + require.Contains(t, changedBalances, tutils.NewIDAddr(t, 4)) + + balance1 := changedBalances[tutils.NewIDAddr(t, 1)] + if !balance1.From.Equals(abi.NewTokenAmount(1000)) || !balance1.To.Equals(abi.NewTokenAmount(3000)) { + t.Fatal("Unexpected change to balance") + } + balance3 := changedBalances[tutils.NewIDAddr(t, 3)] + if !balance3.From.Equals(abi.NewTokenAmount(3000)) || !balance3.To.Equals(abi.NewTokenAmount(0)) { + t.Fatal("Unexpected change to balance") + } + balance4 := changedBalances[tutils.NewIDAddr(t, 4)] + if !balance4.From.Equals(abi.NewTokenAmount(0)) || !balance4.To.Equals(abi.NewTokenAmount(5000)) { + t.Fatal("Unexpected change to balance") + } + + // Diff with non-existent address. + getNoAddress := func() []address.Address { return []address.Address{tutils.NewIDAddr(t, 6)} } + diffNoAddressFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(getNoAddress))) + changed, _, err = diffNoAddressFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Test that OnBalanceChanged does not call the callback if the state has not changed + diffDealBalancesFn := preds.OnBalanceChanged(func(context.Context, BalanceTables, BalanceTables) (bool, UserData, error) { + t.Fatal("No state change so this should not be called") + return false, nil, nil + }) + marketState0 := test.CreateEmptyMarketState(t, store) + marketCid, err := store.Put(ctx, marketState0) + require.NoError(t, err) + marketState, err := market.Load(store, &types.Actor{ + Code: builtin2.StorageMarketActorCodeID, + Head: marketCid, + }) + require.NoError(t, err) + changed, _, err = diffDealBalancesFn(ctx, marketState, marketState) + require.NoError(t, err) + require.False(t, changed) + }) +} + +func TestMinerSectorChange(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + bs := bstore.NewTemporarySync() + store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) + + nextID := uint64(0) + nextIDAddrF := func() address.Address { + defer func() { nextID++ }() + return tutils.NewIDAddr(t, nextID) + } + + owner, worker := nextIDAddrF(), nextIDAddrF() + si0 := newSectorOnChainInfo(0, tutils.MakeCID("0", &miner2.SealedCIDPrefix), big.NewInt(0), abi.ChainEpoch(0), abi.ChainEpoch(10)) + si1 := newSectorOnChainInfo(1, tutils.MakeCID("1", &miner2.SealedCIDPrefix), big.NewInt(1), abi.ChainEpoch(1), abi.ChainEpoch(11)) + si2 := newSectorOnChainInfo(2, tutils.MakeCID("2", &miner2.SealedCIDPrefix), big.NewInt(2), abi.ChainEpoch(2), abi.ChainEpoch(11)) + oldMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si0, si1, si2}) + + si3 := newSectorOnChainInfo(3, tutils.MakeCID("3", &miner2.SealedCIDPrefix), big.NewInt(3), abi.ChainEpoch(3), abi.ChainEpoch(12)) + // 0 delete + // 1 extend + // 2 same + // 3 added + si1Ext := si1 + si1Ext.Expiration++ + newMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si1Ext, si2, si3}) + + minerAddr := nextIDAddrF() + oldState, err := test.MockTipset(minerAddr, 1) + require.NoError(t, err) + newState, err := test.MockTipset(minerAddr, 2) + require.NoError(t, err) + + api := test.NewMockAPI(bs) + api.SetActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin2.StorageMinerActorCodeID}) + api.SetActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin2.StorageMinerActorCodeID}) + + preds := NewStatePredicates(api) + + minerDiffFn := preds.OnMinerActorChange(minerAddr, preds.OnMinerSectorChange()) + change, val, err := minerDiffFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, change) + require.NotNil(t, val) + + sectorChanges, ok := val.(*miner.SectorChanges) + require.True(t, ok) + + require.Equal(t, len(sectorChanges.Added), 1) + require.Equal(t, 1, len(sectorChanges.Added)) + require.Equal(t, si3, sectorChanges.Added[0]) + + require.Equal(t, 1, len(sectorChanges.Removed)) + require.Equal(t, si0, sectorChanges.Removed[0]) + + require.Equal(t, 1, len(sectorChanges.Extended)) + require.Equal(t, si1, sectorChanges.Extended[0].From) + require.Equal(t, si1Ext, sectorChanges.Extended[0].To) + + change, val, err = minerDiffFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, change) + require.Nil(t, val) + + change, val, err = minerDiffFn(ctx, newState.Key(), oldState.Key()) + require.NoError(t, err) + require.True(t, change) + require.NotNil(t, val) + + sectorChanges, ok = val.(*miner.SectorChanges) + require.True(t, ok) + + require.Equal(t, 1, len(sectorChanges.Added)) + require.Equal(t, si0, sectorChanges.Added[0]) + + require.Equal(t, 1, len(sectorChanges.Removed)) + require.Equal(t, si3, sectorChanges.Removed[0]) + + require.Equal(t, 1, len(sectorChanges.Extended)) + require.Equal(t, si1, sectorChanges.Extended[0].To) + require.Equal(t, si1Ext, sectorChanges.Extended[0].From) +} + +type balance struct { + available abi.TokenAmount + locked abi.TokenAmount +} + +func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState, props map[abi.DealID]*market2.DealProposal, balances map[address.Address]balance) cid.Cid { + dealRootCid := test.CreateDealAMT(ctx, t, store, deals) + propRootCid := createProposalAMT(ctx, t, store, props) + balancesCids := createBalanceTable(ctx, t, store, balances) + state := test.CreateEmptyMarketState(t, store) + state.States = dealRootCid + state.Proposals = propRootCid + state.EscrowTable = balancesCids[0] + state.LockedTable = balancesCids[1] + + stateC, err := store.Put(ctx, state) + require.NoError(t, err) + return stateC +} + +func createProposalAMT(ctx context.Context, t *testing.T, store adt2.Store, props map[abi.DealID]*market2.DealProposal) cid.Cid { + root := adt2.MakeEmptyArray(store) + for dealID, prop := range props { + err := root.Set(uint64(dealID), prop) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} + +func createBalanceTable(ctx context.Context, t *testing.T, store adt2.Store, balances map[address.Address]balance) [2]cid.Cid { + escrowMapRoot := adt2.MakeEmptyMap(store) + escrowMapRootCid, err := escrowMapRoot.Root() + require.NoError(t, err) + escrowRoot, err := adt2.AsBalanceTable(store, escrowMapRootCid) + require.NoError(t, err) + lockedMapRoot := adt2.MakeEmptyMap(store) + lockedMapRootCid, err := lockedMapRoot.Root() + require.NoError(t, err) + lockedRoot, err := adt2.AsBalanceTable(store, lockedMapRootCid) + require.NoError(t, err) + + for addr, balance := range balances { + err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked)) + require.NoError(t, err) + err = lockedRoot.Add(addr, balance.locked) + require.NoError(t, err) + + } + escrowRootCid, err := escrowRoot.Root() + require.NoError(t, err) + lockedRootCid, err := lockedRoot.Root() + require.NoError(t, err) + return [2]cid.Cid{escrowRootCid, lockedRootCid} +} + +func createMinerState(ctx context.Context, t *testing.T, store adt2.Store, owner, worker address.Address, sectors []miner.SectorOnChainInfo) cid.Cid { + rootCid := createSectorsAMT(ctx, t, store, sectors) + + state := createEmptyMinerState(ctx, t, store, owner, worker) + state.Sectors = rootCid + + stateC, err := store.Put(ctx, state) + require.NoError(t, err) + return stateC +} + +func createEmptyMinerState(ctx context.Context, t *testing.T, store adt2.Store, owner, worker address.Address) *miner2.State { + emptyArrayCid, err := adt2.MakeEmptyArray(store).Root() + require.NoError(t, err) + emptyMap, err := adt2.MakeEmptyMap(store).Root() + require.NoError(t, err) + + emptyDeadline, err := store.Put(store.Context(), miner2.ConstructDeadline(emptyArrayCid)) + require.NoError(t, err) + + emptyVestingFunds := miner2.ConstructVestingFunds() + emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds) + require.NoError(t, err) + + emptyDeadlines := miner2.ConstructDeadlines(emptyDeadline) + emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines) + require.NoError(t, err) + + minerInfo := emptyMap + + emptyBitfield := bitfield.NewFromSet(nil) + emptyBitfieldCid, err := store.Put(store.Context(), emptyBitfield) + require.NoError(t, err) + + state, err := miner2.ConstructState(minerInfo, 123, 4, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid) + require.NoError(t, err) + return state +} + +func createSectorsAMT(ctx context.Context, t *testing.T, store adt2.Store, sectors []miner.SectorOnChainInfo) cid.Cid { + root := adt2.MakeEmptyArray(store) + for _, sector := range sectors { + sector := miner2.SectorOnChainInfo{ + SectorNumber: sector.SectorNumber, + SealProof: sector.SealProof, + SealedCID: sector.SealedCID, + DealIDs: sector.DealIDs, + Activation: sector.Activation, + Expiration: sector.Expiration, + DealWeight: sector.DealWeight, + VerifiedDealWeight: sector.VerifiedDealWeight, + InitialPledge: sector.InitialPledge, + ExpectedDayReward: sector.ExpectedDayReward, + ExpectedStoragePledge: sector.ExpectedStoragePledge, + ReplacedSectorAge: 0, + ReplacedDayReward: big.NewInt(0), + } + err := root.Set(uint64(sector.SectorNumber), §or) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} + +// returns a unique SectorOnChainInfo with each invocation with SectorNumber set to `sectorNo`. +func newSectorOnChainInfo(sectorNo abi.SectorNumber, sealed cid.Cid, weight big.Int, activation, expiration abi.ChainEpoch) miner.SectorOnChainInfo { + info := newSectorPreCommitInfo(sectorNo, sealed, expiration) + return miner.SectorOnChainInfo{ + SectorNumber: info.SectorNumber, + SealProof: info.SealProof, + SealedCID: info.SealedCID, + DealIDs: info.DealIDs, + Expiration: info.Expiration, + + Activation: activation, + DealWeight: weight, + VerifiedDealWeight: weight, + InitialPledge: big.Zero(), + ExpectedDayReward: big.Zero(), + ExpectedStoragePledge: big.Zero(), + } +} + +const ( + sectorSealRandEpochValue = abi.ChainEpoch(1) +) + +// returns a unique SectorPreCommitInfo with each invocation with SectorNumber set to `sectorNo`. +func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiration abi.ChainEpoch) *miner2.SectorPreCommitInfo { + return &miner2.SectorPreCommitInfo{ + SealProof: abi.RegisteredSealProof_StackedDrg32GiBV1, + SectorNumber: sectorNo, + SealedCID: sealed, + SealRandEpoch: sectorSealRandEpochValue, + DealIDs: nil, + Expiration: expiration, + } +} + +func dealEquality(expected market2.DealState, actual market.DealState) bool { + return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch && + expected.SectorStartEpoch == actual.SectorStartEpoch && + expected.SlashEpoch == actual.SlashEpoch +} diff --git a/pkg/events/tscache.go b/pkg/events/tscache.go new file mode 100644 index 0000000000..867707078b --- /dev/null +++ b/pkg/events/tscache.go @@ -0,0 +1,217 @@ +package events + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type tsCacheAPI interface { + ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + ChainHead(context.Context) (*types.TipSet, error) +} + +// tipSetCache implements a simple ring-buffer cache to keep track of recent +// tipsets +type tipSetCache struct { + mu sync.RWMutex + + byKey map[types.TipSetKey]*types.TipSet + byHeight []*types.TipSet + start int // chain head (end) + len int + + storage tsCacheAPI +} + +func newTSCache(storage tsCacheAPI, cap abi.ChainEpoch) *tipSetCache { + return &tipSetCache{ + byKey: make(map[types.TipSetKey]*types.TipSet, cap), + byHeight: make([]*types.TipSet, cap), + start: 0, + len: 0, + + storage: storage, + } +} + +func (tsc *tipSetCache) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + if ts, ok := tsc.byKey[tsk]; ok { + return ts, nil + } + return tsc.storage.ChainGetTipSet(ctx, tsk) +} + +func (tsc *tipSetCache) ChainGetTipSetByHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + return tsc.get(ctx, height, tsk, true) +} + +func (tsc *tipSetCache) ChainGetTipSetAfterHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + return tsc.get(ctx, height, tsk, false) +} + +func (tsc *tipSetCache) get(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey, prev bool) (*types.TipSet, error) { + fallback := tsc.storage.ChainGetTipSetAfterHeight + if prev { + fallback = tsc.storage.ChainGetTipSetByHeight + } + tsc.mu.RLock() + + // Nothing in the cache? + if tsc.len == 0 { + tsc.mu.RUnlock() + log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height) + return fallback(ctx, height, tsk) + } + + // Resolve the head. + head := tsc.byHeight[tsc.start] + if !tsk.IsEmpty() { + // Not on this chain? + var ok bool + head, ok = tsc.byKey[tsk] + if !ok { + tsc.mu.RUnlock() + return fallback(ctx, height, tsk) + } + } + + headH := head.Height() + tailH := headH - abi.ChainEpoch(tsc.len) + + if headH == height { + tsc.mu.RUnlock() + return head, nil + } else if headH < height { + tsc.mu.RUnlock() + // If the user doesn't pass a tsk, we assume "head" is the last tipset we processed. + return nil, fmt.Errorf("requested epoch is in the future") + } else if height < tailH { + log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tailH) + tsc.mu.RUnlock() + return fallback(ctx, height, head.Key()) + } + + direction := 1 + if prev { + direction = -1 + } + var ts *types.TipSet + for i := 0; i < tsc.len && ts == nil; i += direction { + ts = tsc.byHeight[normalModulo(tsc.start-int(headH-height)+i, len(tsc.byHeight))] + } + tsc.mu.RUnlock() + return ts, nil +} + +func (tsc *tipSetCache) ChainHead(ctx context.Context) (*types.TipSet, error) { + tsc.mu.RLock() + best := tsc.byHeight[tsc.start] + tsc.mu.RUnlock() + if best == nil { + return tsc.storage.ChainHead(ctx) + } + return best, nil +} + +func (tsc *tipSetCache) add(to *types.TipSet) error { + tsc.mu.Lock() + defer tsc.mu.Unlock() + + if tsc.len > 0 { + best := tsc.byHeight[tsc.start] + if best.Height() >= to.Height() { + return fmt.Errorf("tipSetCache.add: expected new tipset height to be at least %d, was %d", tsc.byHeight[tsc.start].Height()+1, to.Height()) + } + if best.Key() != to.Parents() { + return fmt.Errorf( + "tipSetCache.add: expected new tipset %s (%d) to follow %s (%d), its parents are %s", + to.Key(), to.Height(), best.Key(), best.Height(), best.Parents(), + ) + } + } + + nextH := to.Height() + if tsc.len > 0 { + nextH = tsc.byHeight[tsc.start].Height() + 1 + } + + // fill null blocks + for nextH != to.Height() { + tsc.start = normalModulo(tsc.start+1, len(tsc.byHeight)) + was := tsc.byHeight[tsc.start] + if was != nil { + tsc.byHeight[tsc.start] = nil + delete(tsc.byKey, was.Key()) + } + if tsc.len < len(tsc.byHeight) { + tsc.len++ + } + nextH++ + } + + tsc.start = normalModulo(tsc.start+1, len(tsc.byHeight)) + was := tsc.byHeight[tsc.start] + if was != nil { + delete(tsc.byKey, was.Key()) + } + tsc.byHeight[tsc.start] = to + if tsc.len < len(tsc.byHeight) { + tsc.len++ + } + tsc.byKey[to.Key()] = to + return nil +} + +func (tsc *tipSetCache) revert(from *types.TipSet) error { + tsc.mu.Lock() + defer tsc.mu.Unlock() + + return tsc.revertUnlocked(from) +} + +func (tsc *tipSetCache) revertUnlocked(ts *types.TipSet) error { + if tsc.len == 0 { + return nil // this can happen, and it's fine + } + + was := tsc.byHeight[tsc.start] + + if !was.Equals(ts) { + return errors.New("tipSetCache.revert: revert tipset didn't match cache head") + } + delete(tsc.byKey, was.Key()) + + tsc.byHeight[tsc.start] = nil + tsc.start = normalModulo(tsc.start-1, len(tsc.byHeight)) + tsc.len-- + + _ = tsc.revertUnlocked(nil) // revert null block gap + return nil +} + +func (tsc *tipSetCache) observer() TipSetObserver { //nolint + return (*tipSetCacheObserver)(tsc) +} + +type tipSetCacheObserver tipSetCache + +var _ TipSetObserver = new(tipSetCacheObserver) + +func (tsc *tipSetCacheObserver) Apply(_ context.Context, _, to *types.TipSet) error { + return (*tipSetCache)(tsc).add(to) +} + +func (tsc *tipSetCacheObserver) Revert(ctx context.Context, from, _ *types.TipSet) error { + return (*tipSetCache)(tsc).revert(from) +} + +func normalModulo(n, m int) int { + return ((n % m) + m) % m +} diff --git a/pkg/events/tscache_test.go b/pkg/events/tscache_test.go new file mode 100644 index 0000000000..de7222dff4 --- /dev/null +++ b/pkg/events/tscache_test.go @@ -0,0 +1,222 @@ +package events + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type tsCacheAPIFailOnStorageCall struct { + t *testing.T +} + +func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetAfterHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) { + tc.t.Fatal("storage call") + return &types.TipSet{}, nil +} + +func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) { + tc.t.Fatal("storage call") + return &types.TipSet{}, nil +} + +func (tc *tsCacheAPIFailOnStorageCall) ChainHead(ctx context.Context) (*types.TipSet, error) { + tc.t.Fatal("storage call") + return &types.TipSet{}, nil +} + +func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + tc.t.Fatal("storage call") + return &types.TipSet{}, nil +} + +type cacheHarness struct { + t *testing.T + + miner address.Address + tsc *tipSetCache + height abi.ChainEpoch +} + +func newCacheharness(t *testing.T) *cacheHarness { + a, err := address.NewFromString("t00") + require.NoError(t, err) + + h := &cacheHarness{ + t: t, + tsc: newTSCache(&tsCacheAPIFailOnStorageCall{t: t}, 50), + height: 75, + miner: a, + } + h.addWithParents(nil) + return h +} + +func (h *cacheHarness) addWithParents(parents []cid.Cid) { + ts, err := types.NewTipSet([]*types.BlockHeader{{ + Miner: h.miner, + Height: h.height, + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + Parents: parents, + }}) + require.NoError(h.t, err) + require.NoError(h.t, h.tsc.add(ts)) + h.height++ +} + +func (h *cacheHarness) add() { + last, err := h.tsc.ChainHead(context.Background()) + require.NoError(h.t, err) + h.addWithParents(last.Cids()) +} + +func (h *cacheHarness) revert() { + best, err := h.tsc.ChainHead(context.Background()) + require.NoError(h.t, err) + err = h.tsc.revert(best) + require.NoError(h.t, err) + h.height-- +} + +func (h *cacheHarness) skip(n abi.ChainEpoch) { + h.height += n +} + +func TestTsCache(t *testing.T) { + tf.UnitTest(t) + h := newCacheharness(t) + + for i := 0; i < 9000; i++ { + if i%90 > 60 { + h.revert() + } else { + h.add() + } + } +} + +func TestTsCacheNulls(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + h := newCacheharness(t) + + h.add() + h.add() + h.add() + h.skip(5) + + h.add() + h.add() + + best, err := h.tsc.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, h.height-1, best.Height()) + + ts, err := h.tsc.ChainGetTipSetByHeight(ctx, h.height-1, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, h.height-1, ts.Height()) + + ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-2, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, h.height-2, ts.Height()) + + // Should skip the nulls and walk back to the last tipset. + ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-3, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, h.height-8, ts.Height()) + + ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-8, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, h.height-8, ts.Height()) + + best, err = h.tsc.ChainHead(ctx) + require.NoError(t, err) + require.NoError(t, h.tsc.revert(best)) + + best, err = h.tsc.ChainHead(ctx) + require.NoError(t, err) + require.NoError(t, h.tsc.revert(best)) + + best, err = h.tsc.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, h.height-8, best.Height()) + + h.skip(50) + h.add() + + ts, err = h.tsc.ChainGetTipSetByHeight(ctx, h.height-1, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, h.height-1, ts.Height()) +} + +type tsCacheAPIStorageCallCounter struct { + t *testing.T + chainGetTipSetByHeight int + chainGetTipSetAfterHeight int + chainGetTipSet int + chainHead int +} + +func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) { + tc.chainGetTipSetByHeight++ + return &types.TipSet{}, nil +} + +func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetAfterHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) { + tc.chainGetTipSetAfterHeight++ + return &types.TipSet{}, nil +} + +func (tc *tsCacheAPIStorageCallCounter) ChainHead(ctx context.Context) (*types.TipSet, error) { + tc.chainHead++ + return &types.TipSet{}, nil +} + +func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + tc.chainGetTipSet++ + return &types.TipSet{}, nil +} + +func TestTsCacheEmpty(t *testing.T) { + tf.UnitTest(t) + + // Calling best on an empty cache should just call out to the chain API + callCounter := &tsCacheAPIStorageCallCounter{t: t} + tsc := newTSCache(callCounter, 50) + _, err := tsc.ChainHead(context.Background()) + require.NoError(t, err) + require.Equal(t, 1, callCounter.chainHead) +} + +func TestTsCacheSkip(t *testing.T) { + tf.UnitTest(t) + + h := newCacheharness(t) + + ts, err := types.NewTipSet([]*types.BlockHeader{{ + Miner: h.miner, + Height: h.height, + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + // With parents that don't match the last block. + Parents: types.EmptyTSK.Cids(), + }}) + require.NoError(h.t, err) + err = h.tsc.add(ts) + require.Error(t, err) +} diff --git a/pkg/events/utils.go b/pkg/events/utils.go new file mode 100644 index 0000000000..69cf9b6ae5 --- /dev/null +++ b/pkg/events/utils.go @@ -0,0 +1,50 @@ +package events + +import ( + "context" + "fmt" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// CheckMsg convenience function for checking and matching messages +func (me *messageEvents) CheckMsg(smsg types.ChainMsg, hnd MsgHandler) CheckFunc { + msg := smsg.VMMessage() + + return func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { + fa, err := me.cs.StateGetActor(ctx, msg.From, ts.Key()) + if err != nil { + return false, true, err + } + + // >= because actor nonce is actually the next nonce that is expected to appear on chain + if msg.Nonce >= fa.Nonce { + return false, true, nil + } + + ml, err := me.cs.StateSearchMsg(ctx, ts.Key(), msg.Cid(), constants.LookbackNoLimit, true) + if err != nil { + return false, true, fmt.Errorf("getting receipt in CheckMsg: %w", err) + } + + if ml == nil { + more, err = hnd(msg, nil, ts, ts.Height()) + } else { + more, err = hnd(msg, &ml.Receipt, ts, ts.Height()) + } + + return true, more, err + } +} + +// MatchMsg check that a specific message is in a block message +func (me *messageEvents) MatchMsg(inmsg *types.Message) MsgMatchFunc { + return func(msg *types.Message) (matched bool, err error) { + if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) { + return false, fmt.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) + } + + return inmsg.Equals(msg), nil + } +} diff --git a/pkg/fork/FVMLiftoff.txt b/pkg/fork/FVMLiftoff.txt new file mode 100644 index 0000000000..4bff5c25b0 --- /dev/null +++ b/pkg/fork/FVMLiftoff.txt @@ -0,0 +1,57 @@ + . + + . ' ` . + . ' . + + . ' . ' | + . ' . ' | ++ . ' . + +| ` . . ' . ' . +| + . ' . + ++ | . ' . ' | + ` . | . ' . ' | ++ + . ' . + +| ` . . ' . ' +| + . ' ++ | . ' . + ` . | '. ` . + + ` . ` . + ` . ` . ` . --- --- + ` . ` . . + /\__\ ___ /\ \ + ` . + ' | /:/ _/_ /\ \ |::\ \ + ` . | | /:/ /\__\ \:\ \ |:|:\ \ + ` . | . + /:/ /:/ / \:\ \ __|:|\:\ \ + + ' /:/_/:/ / ___ \:\__\ /::::|_\:\__\ + \:\/:/ / /\ \ |:| | \:\~~\ \/__/ + \::/__/ \:\ \|:| | \:\ \ + \:\ \ \:\__|:|__| \:\ \ + \:\__\ \::::/__/ \:\__\ + \/__/ ~~~~ \/__/ + ___ ___ ___ ___ + /\__\ /\ \ /\__\ /\__\ + ___ /:/ _/_ ___ /::\ \ /:/ _/_ /:/ _/_ + /\__\ /:/ /\__\ /\__\ /:/\:\ \ /:/ /\__\ /:/ /\__\ + ___ ___ /:/__/ /:/ /:/ / /:/ / /:/ \:\ \ /:/ /:/ / /:/ /:/ / + /\ \ /\__\ /::\ \ /:/_/:/ / /:/__/ /:/__/ \:\__\ /:/_/:/ / /:/_/:/ / + \:\ \ /:/ / \/\:\ \__ \:\/:/ / /::\ \ \:\ \ /:/ / \:\/:/ / \:\/:/ / + \:\ /:/ / ~~\:\/\__\ \::/__/ /:/\:\ \ \:\ /:/ / \::/__/ \::/__/ . + + \:\/:/ / \::/ / \:\ \ \/__\:\ \ \:\/:/ / \:\ \ \:\ \ . ' ` . + \::/ / /:/ / \:\__\ \:\__\ \::/ / \:\__\ \:\__\ . ' . + + \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ . ' . ' | + . ' . ' | + + . ' . + + | ` . . ' . ' . + | + . ' . + + + | . ' . ' | + ` . | . ' . ' | + + + . ' . + + | ` . . ' . ' + | + . ' + + | . ' . + ` . | '. ` . + + ` . ` . + ` . ` . ` . + ` . ` . . + + ` . + ' | + ` . | | + ` . | . + + + diff --git a/pkg/fork/fork.go b/pkg/fork/fork.go new file mode 100644 index 0000000000..400c16a2d0 --- /dev/null +++ b/pkg/fork/fork.go @@ -0,0 +1,2488 @@ +package fork + +import ( + "bytes" + "context" + _ "embed" + "encoding/binary" + "errors" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "sync" + "time" + + "github.com/docker/go-units" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/manifest" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/rt" + gstStore "github.com/filecoin-project/go-state-types/store" + ipfsblock "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + mh "github.com/multiformats/go-multihash" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/trace" + + nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration" + "github.com/filecoin-project/specs-actors/actors/migration/nv3" + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv4" + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv7" + "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" + "github.com/filecoin-project/specs-actors/v4/actors/migration/nv12" + "github.com/filecoin-project/specs-actors/v5/actors/migration/nv13" + "github.com/filecoin-project/specs-actors/v6/actors/migration/nv14" + "github.com/filecoin-project/specs-actors/v7/actors/migration/nv15" + "github.com/filecoin-project/specs-actors/v8/actors/migration/nv16" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + vmstate "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/multisig" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/system" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +//go:embed FVMLiftoff.txt +var fvmLiftoffBanner string + +var log = logging.Logger("fork") + +var ErrExpensiveFork = errors.New("refusing explicit call due to state fork at epoch") + +var ( + MigrationMaxWorkerCount int + EnvMigrationMaxWorkerCount = "VENUS_MIGRATION_MAX_WORKER_COUNT" +) + +func init() { + // the default calculation used for migration worker count + MigrationMaxWorkerCount = runtime.NumCPU() + // check if an alternative value was request by environment + if mwcs := os.Getenv(EnvMigrationMaxWorkerCount); mwcs != "" { + mwc, err := strconv.ParseInt(mwcs, 10, 32) + if err != nil { + log.Warnf("invalid value for %s (%s) defaulting to %d: %s", EnvMigrationMaxWorkerCount, mwcs, MigrationMaxWorkerCount, err) + return + } + // use value from environment + log.Infof("migration worker cound set from %s (%d)", EnvMigrationMaxWorkerCount, mwc) + MigrationMaxWorkerCount = int(mwc) + return + } + log.Infof("migration worker count: %d", MigrationMaxWorkerCount) +} + +// MigrationCache can be used to cache information used by a migration. This is primarily useful to +// "pre-compute" some migration state ahead of time, and make it accessible in the migration itself. +type MigrationCache interface { + Write(key string, value cid.Cid) error + Read(key string) (bool, cid.Cid, error) + Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error) +} + +// MigrationFunc is a migration function run at every upgrade. +// +// - The cache is a per-upgrade cache, pre-populated by pre-migrations. +// - The oldState is the state produced by the upgrade epoch. +// - The returned newState is the new state that will be used by the next epoch. +// - The height is the upgrade epoch height (already executed). +// - The tipset is the tipset for the last non-null block before the upgrade. Do +// not assume that ts.Height() is the upgrade height. +type MigrationFunc func( + ctx context.Context, + cache MigrationCache, + oldState cid.Cid, + height abi.ChainEpoch, + ts *types.TipSet, +) (newState cid.Cid, err error) + +// PreMigrationFunc is a function run _before_ a network upgrade to pre-compute part of the network +// upgrade and speed it up. +type PreMigrationFunc func( + ctx context.Context, + cache MigrationCache, + oldState cid.Cid, + height abi.ChainEpoch, + ts *types.TipSet, +) error + +// PreMigration describes a pre-migration step to prepare for a network state upgrade. Pre-migrations +// are optimizations, are not guaranteed to run, and may be canceled and/or run multiple times. +type PreMigration struct { + // PreMigration is the pre-migration function to run at the specified time. This function is + // run asynchronously and must abort promptly when canceled. + PreMigration PreMigrationFunc + + // StartWithin specifies that this pre-migration should be started at most StartWithin + // epochs before the upgrade. + StartWithin abi.ChainEpoch + + // DontStartWithin specifies that this pre-migration should not be started DontStartWithin + // epochs before the final upgrade epoch. + // + // This should be set such that the pre-migration is likely to complete before StopWithin. + DontStartWithin abi.ChainEpoch + + // StopWithin specifies that this pre-migration should be stopped StopWithin epochs of the + // final upgrade epoch. + StopWithin abi.ChainEpoch +} + +type Upgrade struct { + Height abi.ChainEpoch + Network network.Version + Expensive bool + Migration MigrationFunc + + // PreMigrations specifies a set of pre-migration functions to run at the indicated epochs. + // These functions should fill the given cache with information that can speed up the + // eventual full migration at the upgrade epoch. + PreMigrations []PreMigration +} + +type UpgradeSchedule []Upgrade + +type migrationLogger struct{} + +func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) { + switch level { + case rt.DEBUG: + log.Debugf(msg, args...) + case rt.INFO: + log.Infof(msg, args...) + case rt.WARN: + log.Warnf(msg, args...) + case rt.ERROR: + log.Errorf(msg, args...) + } +} + +func DefaultUpgradeSchedule(cf *ChainFork, upgradeHeight *config.ForkUpgradeConfig) UpgradeSchedule { + var us UpgradeSchedule + + updates := []Upgrade{ + { + Height: upgradeHeight.UpgradeBreezeHeight, + Network: network.Version1, + Migration: cf.UpgradeFaucetBurnRecovery, + }, + { + Height: upgradeHeight.UpgradeSmokeHeight, + Network: network.Version2, + Migration: nil, + }, + { + Height: upgradeHeight.UpgradeIgnitionHeight, + Network: network.Version3, + Migration: cf.UpgradeIgnition, + }, + { + Height: upgradeHeight.UpgradeRefuelHeight, + Network: network.Version3, + Migration: cf.UpgradeRefuel, + }, + { + Height: upgradeHeight.UpgradeAssemblyHeight, + Network: network.Version4, + Expensive: true, + Migration: cf.UpgradeActorsV2, + }, + { + Height: upgradeHeight.UpgradeTapeHeight, + Network: network.Version5, + Migration: nil, + }, + { + Height: upgradeHeight.UpgradeLiftoffHeight, + Network: network.Version5, + Migration: cf.UpgradeLiftoff, + }, + { + Height: upgradeHeight.UpgradeKumquatHeight, + Network: network.Version6, + Migration: nil, + }, + //{ + // Height: upgradeHeight.UpgradePriceListOopsHeight, + // Network: network.Version6AndAHalf, + // Migration: nil, + //}, + { + Height: upgradeHeight.UpgradeCalicoHeight, + Network: network.Version7, + Migration: cf.UpgradeCalico, + }, + { + Height: upgradeHeight.UpgradePersianHeight, + Network: network.Version8, + Migration: nil, + }, + { + Height: upgradeHeight.UpgradeOrangeHeight, + Network: network.Version9, + Migration: nil, + }, + { + Height: upgradeHeight.UpgradeTrustHeight, + Network: network.Version10, + Migration: cf.UpgradeActorsV3, + PreMigrations: []PreMigration{{ + PreMigration: cf.PreUpgradeActorsV3, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: cf.PreUpgradeActorsV3, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, + { + Height: upgradeHeight.UpgradeNorwegianHeight, + Network: network.Version11, + Migration: nil, + }, + { + Height: upgradeHeight.UpgradeTurboHeight, + Network: network.Version12, + Migration: cf.UpgradeActorsV4, + PreMigrations: []PreMigration{{ + PreMigration: cf.PreUpgradeActorsV4, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: cf.PreUpgradeActorsV4, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, + { + Height: upgradeHeight.UpgradeHyperdriveHeight, + Network: network.Version13, + Migration: cf.UpgradeActorsV5, + PreMigrations: []PreMigration{{ + PreMigration: cf.PreUpgradeActorsV5, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: cf.PreUpgradeActorsV5, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, + { + Height: upgradeHeight.UpgradeChocolateHeight, + Network: network.Version14, + Migration: cf.UpgradeActorsV6, + PreMigrations: []PreMigration{{ + PreMigration: cf.PreUpgradeActorsV6, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: cf.PreUpgradeActorsV6, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, + { + Height: upgradeHeight.UpgradeOhSnapHeight, + Network: network.Version15, + Migration: cf.UpgradeActorsV7, + PreMigrations: []PreMigration{{ + PreMigration: cf.PreUpgradeActorsV7, + StartWithin: 180, + DontStartWithin: 60, + StopWithin: 5, + }}, + Expensive: true, + }, + { + Height: upgradeHeight.UpgradeSkyrHeight, + Network: network.Version16, + Migration: cf.UpgradeActorsV8, + PreMigrations: []PreMigration{{ + PreMigration: cf.PreUpgradeActorsV8, + StartWithin: 180, + DontStartWithin: 60, + StopWithin: 5, + }}, + Expensive: true, + }, + { + Height: upgradeHeight.UpgradeSharkHeight, + Network: network.Version17, + Migration: cf.UpgradeActorsV9, + PreMigrations: []PreMigration{{ + PreMigration: cf.PreUpgradeActorsV9, + StartWithin: 240, + DontStartWithin: 60, + StopWithin: 20, + }, { + PreMigration: cf.PreUpgradeActorsV9, + StartWithin: 15, + DontStartWithin: 10, + StopWithin: 5, + }}, + Expensive: true, + }, + } + + for _, u := range updates { + if u.Height < 0 { + // upgrade disabled + continue + } + us = append(us, u) + } + return us +} + +func (us UpgradeSchedule) Validate() error { + // Make sure each upgrade is valid. + for _, u := range us { + if u.Network <= 0 { + return fmt.Errorf("cannot upgrade to version <= 0: %d", u.Network) + } + + for _, m := range u.PreMigrations { + if m.StartWithin <= 0 { + return fmt.Errorf("pre-migration must specify a positive start-within epoch") + } + + if m.DontStartWithin < 0 || m.StopWithin < 0 { + return fmt.Errorf("pre-migration must specify non-negative epochs") + } + + if m.StartWithin <= m.StopWithin { + return fmt.Errorf("pre-migration start-within must come before stop-within") + } + + // If we have a dont-start-within. + if m.DontStartWithin != 0 { + if m.DontStartWithin < m.StopWithin { + return fmt.Errorf("pre-migration dont-start-within must come before stop-within") + } + if m.StartWithin <= m.DontStartWithin { + return fmt.Errorf("pre-migration start-within must come after dont-start-within") + } + } + } + if !sort.SliceIsSorted(u.PreMigrations, func(i, j int) bool { + return u.PreMigrations[i].StartWithin > u.PreMigrations[j].StartWithin //nolint:scopelint,gosec + }) { + return fmt.Errorf("pre-migrations must be sorted by start epoch") + } + } + + // Make sure the upgrade order makes sense. + for i := 1; i < len(us); i++ { + prev := &us[i-1] + curr := &us[i] + if !(prev.Network <= curr.Network) { + return fmt.Errorf("cannot downgrade from version %d to version %d", prev.Network, curr.Network) + } + // Make sure the heights make sense. + if prev.Height < 0 { + // Previous upgrade was disabled. + continue + } + if !(prev.Height < curr.Height) { + return fmt.Errorf("upgrade heights must be strictly increasing: upgrade %d was at height %d, followed by upgrade %d at height %d", i-1, prev.Height, i, curr.Height) + } + } + return nil +} + +type chainReader interface { + GetHead() *types.TipSet + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + GetTipSetByHeight(context.Context, *types.TipSet, abi.ChainEpoch, bool) (*types.TipSet, error) + GetTipSetState(context.Context, *types.TipSet) (vmstate.Tree, error) + GetGenesisBlock(context.Context) (*types.BlockHeader, error) + GetLookbackTipSetForRound(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch, version network.Version) (*types.TipSet, cid.Cid, error) + SubHeadChanges(context.Context) chan []*types.HeadChange +} + +type IFork interface { + HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) + GetNetworkVersion(ctx context.Context, height abi.ChainEpoch) network.Version + HasExpensiveFork(ctx context.Context, height abi.ChainEpoch) bool + HasExpensiveForkBetween(parent, height abi.ChainEpoch) bool + GetForkUpgrade() *config.ForkUpgradeConfig + Start(ctx context.Context) error +} + +var _ = IFork((*ChainFork)(nil)) + +type versionSpec struct { + networkVersion network.Version + atOrBelow abi.ChainEpoch +} + +type migration struct { + upgrade MigrationFunc + preMigrations []PreMigration + cache *nv16.MemMigrationCache +} + +type ChainFork struct { + cr chainReader + bs blockstoreutil.Blockstore + ipldstore cbor.IpldStore + + // Determines the network version at any given epoch. + networkVersions []versionSpec + latestVersion network.Version + + // Maps chain epochs to upgrade functions. + stateMigrations map[abi.ChainEpoch]*migration + // A set of potentially expensive/time consuming upgrades. Explicit + // calls for, e.g., gas estimation fail against this epoch with + // ErrExpensiveFork. + expensiveUpgrades map[abi.ChainEpoch]struct{} + + // upgrade param + networkType types.NetworkType + forkUpgrade *config.ForkUpgradeConfig +} + +func NewChainFork(ctx context.Context, cr chainReader, ipldstore cbor.IpldStore, bs blockstoreutil.Blockstore, networkParams *config.NetworkParamsConfig) (*ChainFork, error) { + fork := &ChainFork{ + cr: cr, + bs: bs, + ipldstore: ipldstore, + networkType: networkParams.NetworkType, + forkUpgrade: networkParams.ForkUpgradeParam, + } + + // If we have upgrades, make sure they're in-order and make sense. + us := DefaultUpgradeSchedule(fork, networkParams.ForkUpgradeParam) + if err := us.Validate(); err != nil { + return nil, err + } + + stateMigrations := make(map[abi.ChainEpoch]*migration, len(us)) + expensiveUpgrades := make(map[abi.ChainEpoch]struct{}, len(us)) + var networkVersions []versionSpec + lastVersion := networkParams.GenesisNetworkVersion + if len(us) > 0 { + // If we have any upgrades, process them and create a version schedule. + for _, upgrade := range us { + if upgrade.Migration != nil || upgrade.PreMigrations != nil { + migration := &migration{ + upgrade: upgrade.Migration, + preMigrations: upgrade.PreMigrations, + cache: nv16.NewMemMigrationCache(), + } + stateMigrations[upgrade.Height] = migration + } + if upgrade.Expensive { + expensiveUpgrades[upgrade.Height] = struct{}{} + } + networkVersions = append(networkVersions, versionSpec{ + networkVersion: lastVersion, + atOrBelow: upgrade.Height, + }) + lastVersion = upgrade.Network + } + } + + fork.networkVersions = networkVersions + fork.latestVersion = lastVersion + fork.stateMigrations = stateMigrations + fork.expensiveUpgrades = expensiveUpgrades + + return fork, nil +} + +func (c *ChainFork) Start(ctx context.Context) error { + log.Info("preMigrationWorker start ...") + go c.preMigrationWorker(ctx) + + return nil +} + +func (c *ChainFork) StateTree(ctx context.Context, st cid.Cid) (*vmstate.State, error) { + return vmstate.LoadState(ctx, c.ipldstore, st) +} + +func (c *ChainFork) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + retCid := root + var err error + u := c.stateMigrations[height] + if u != nil && u.upgrade != nil { + startTime := time.Now() + log.Warnw("STARTING migration", "height", height, "from", root) + // Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may + // have to migrate multiple times. + tmpCache := u.cache.Clone() + retCid, err = u.upgrade(ctx, tmpCache, root, height, ts) + if err != nil { + log.Errorw("FAILED migration", "height", height, "from", root, "error", err) + return cid.Undef, err + } + // Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This + // can save us a _lot_ of time because very few actors will have changed if we + // do a small revert then need to re-run the migration. + u.cache.Update(tmpCache) + log.Warnw("COMPLETED migration", + "height", height, + "from", root, + "to", retCid, + "duration", time.Since(startTime), + ) + } + + return retCid, nil +} + +func (c *ChainFork) HasExpensiveFork(ctx context.Context, height abi.ChainEpoch) bool { + _, ok := c.expensiveUpgrades[height] + return ok +} + +// Returns true executing tipsets between the specified heights would trigger an expensive +// migration. NOTE: migrations occurring _at_ the target height are not included, as they're +// executed _after_ the target height. +func (c *ChainFork) HasExpensiveForkBetween(parent, height abi.ChainEpoch) bool { + for h := parent; h < height; h++ { + if _, ok := c.expensiveUpgrades[h]; ok { + return true + } + } + return false +} + +func (c *ChainFork) GetNetworkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { + // The epochs here are the _last_ epoch for every version, or -1 if the + // version is disabled. + for _, spec := range c.networkVersions { + if height <= spec.atOrBelow { + return spec.networkVersion + } + } + return c.latestVersion +} + +func runPreMigration(ctx context.Context, fn PreMigrationFunc, cache *nv16.MemMigrationCache, ts *types.TipSet) { + height := ts.Height() + parent := ts.Blocks()[0].ParentStateRoot + + startTime := time.Now() + + log.Warn("STARTING pre-migration") + // Clone the cache so we don't actually _update_ it + // till we're done. Otherwise, if we fail, the next + // migration to use the cache may assume that + // certain blocks exist, even if they don't. + tmpCache := cache.Clone() + err := fn(ctx, tmpCache, parent, height, ts) + if err != nil { + log.Errorw("FAILED pre-migration", "error", err) + return + } + // Finally, if everything worked, update the cache. + cache.Update(tmpCache) + log.Warnw("COMPLETED pre-migration", "duration", time.Since(startTime)) +} + +func (c *ChainFork) preMigrationWorker(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + type op struct { + after abi.ChainEpoch + notAfter abi.ChainEpoch + run func(ts *types.TipSet) + } + + var wg sync.WaitGroup + defer wg.Wait() + + // Turn each pre-migration into an operation in a schedule. + var schedule []op + for upgradeEpoch, migration := range c.stateMigrations { + cache := migration.cache + for _, prem := range migration.preMigrations { + preCtx, preCancel := context.WithCancel(ctx) + migrationFunc := prem.PreMigration + + afterEpoch := upgradeEpoch - prem.StartWithin + notAfterEpoch := upgradeEpoch - prem.DontStartWithin + stopEpoch := upgradeEpoch - prem.StopWithin + // We can't start after we stop. + if notAfterEpoch > stopEpoch { + notAfterEpoch = stopEpoch - 1 + } + + // Add an op to start a pre-migration. + schedule = append(schedule, op{ + after: afterEpoch, + notAfter: notAfterEpoch, + + // TODO: are these values correct? + run: func(ts *types.TipSet) { + wg.Add(1) + go func() { + defer wg.Done() + runPreMigration(preCtx, migrationFunc, cache, ts) + }() + }, + }) + + // Add an op to cancel the pre-migration if it's still running. + schedule = append(schedule, op{ + after: stopEpoch, + notAfter: -1, + run: func(ts *types.TipSet) { preCancel() }, + }) + } + } + + // Then sort by epoch. + sort.Slice(schedule, func(i, j int) bool { + return schedule[i].after < schedule[j].after + }) + + // Finally, when the head changes, see if there's anything we need to do. + // + // We're intentionally ignoring reorgs as they don't matter for our purposes. + for change := range c.cr.SubHeadChanges(ctx) { + for _, head := range change { + for len(schedule) > 0 { + op := &schedule[0] + if head.Val.Height() < op.after { + break + } + + // If we haven't passed the pre-migration height... + if op.notAfter < 0 || head.Val.Height() < op.notAfter { + op.run(head.Val) + } + schedule = schedule[1:] + } + } + } +} + +func doTransfer(tree vmstate.Tree, from, to address.Address, amt abi.TokenAmount) error { + fromAct, found, err := tree.GetActor(context.TODO(), from) + if err != nil { + return fmt.Errorf("failed to get 'from' actor for transfer: %v", err) + } + if !found { + return fmt.Errorf("did not find 'from' actor for transfer: %v", from.String()) + } + + fromAct.Balance = big.Sub(fromAct.Balance, amt) + if fromAct.Balance.Sign() < 0 { + return fmt.Errorf("(sanity) deducted more funds from target account than it had (%s, %s)", from, types.FIL(amt)) + } + + if err := tree.SetActor(context.TODO(), from, fromAct); err != nil { + return fmt.Errorf("failed to persist from actor: %v", err) + } + + toAct, found, err := tree.GetActor(context.TODO(), to) + if err != nil { + return fmt.Errorf("failed to get 'to' actor for transfer: %v", err) + } + if !found { + return fmt.Errorf("did not find 'to' actor for transfer: %v", from.String()) + } + + toAct.Balance = big.Add(toAct.Balance, amt) + + if err := tree.SetActor(context.TODO(), to, toAct); err != nil { + return fmt.Errorf("failed to persist to actor: %v", err) + } + + return nil +} + +func (c *ChainFork) ParentState(ts *types.TipSet) cid.Cid { + if ts == nil { + tts := c.cr.GetHead() + return tts.Blocks()[0].ParentStateRoot + } + return ts.Blocks()[0].ParentStateRoot +} + +func (c *ChainFork) UpgradeFaucetBurnRecovery(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Some initial parameters + FundsForMiners := types.FromFil(1_000_000) + LookbackEpoch := abi.ChainEpoch(32000) + AccountCap := types.FromFil(0) + BaseMinerBalance := types.FromFil(20) + DesiredReimbursementBalance := types.FromFil(5_000_000) + + isSystemAccount := func(addr address.Address) (bool, error) { + id, err := address.IDFromAddress(addr) + if err != nil { + return false, fmt.Errorf("id address: %v", err) + } + + if id < 1000 { + return true, nil + } + return false, nil + } + + minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount { + return big.Div(big.Mul(pow, FundsForMiners), tpow) + } + + // Grab lookback state for account checks + lbts, err := c.cr.GetTipSetByHeight(ctx, ts, LookbackEpoch, false) + if err != nil { + return cid.Undef, fmt.Errorf("failed to get tipset at lookback height: %v", err) + } + + pts, err := c.cr.GetTipSet(ctx, lbts.Parents()) + if err != nil { + return cid.Undef, fmt.Errorf("failed to get tipset : %v", err) + } + + lbtree, err := c.cr.GetTipSetState(ctx, pts) + if err != nil { + return cid.Undef, fmt.Errorf("loading state tree failed: %v", err) + } + + tree, err := c.StateTree(ctx, root) + if err != nil { + return cid.Undef, fmt.Errorf("getting state tree: %v", err) + } + + type transfer struct { + From address.Address + To address.Address + Amt abi.TokenAmount + } + + // todo not needed + var transfers []transfer + //subcalls := make([]types.ExecutionTrace, 0) + //transferCb := func(trace types.ExecutionTrace) { + // subcalls = append(subcalls, trace) + //} + + // Take all excess funds away, put them into the reserve account + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + switch act.Code { + case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: + sysAcc, err := isSystemAccount(addr) + if err != nil { + return fmt.Errorf("checking system account: %v", err) + } + + if !sysAcc { + transfers = append(transfers, transfer{ + From: addr, + To: builtin.ReserveAddress, + Amt: act.Balance, + }) + } + case builtin0.StorageMinerActorCodeID: + var st miner0.State + if err := c.ipldstore.Get(ctx, act.Head, &st); err != nil { + return fmt.Errorf("failed to load miner state: %v", err) + } + + var available abi.TokenAmount + { + defer func() { + if err := recover(); err != nil { + log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err) + } + available = abi.NewTokenAmount(0) + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available = st.GetAvailableBalance(act.Balance) + } + + if !available.IsZero() { + transfers = append(transfers, transfer{ + From: addr, + To: builtin.ReserveAddress, + Amt: available, + }) + } + } + return nil + }) + if err != nil { + return cid.Undef, fmt.Errorf("foreach over state tree failed: %v", err) + } + + // Execute transfers from previous step + // fmt.Printf("num:%v, transfers:%v\n", len(transfers), transfers) + for _, t := range transfers { + if err := doTransfer(tree, t.From, t.To, t.Amt); err != nil { + return cid.Undef, fmt.Errorf("transfer %s %s->%s failed: %v", t.Amt, t.From, t.To, err) + } + } + + // pull up power table to give miners back some funds proportional to their power + var ps power0.State + powAct, find, err := tree.GetActor(ctx, builtin0.StoragePowerActorAddr) + if err != nil { + return cid.Undef, fmt.Errorf("failed to load power actor: %v", err) + } + + if !find { + return cid.Undef, errors.New("did not find power actor") + } + + if err := c.ipldstore.Get(ctx, powAct.Head, &ps); err != nil { + return cid.Undef, fmt.Errorf("failed to get power actor state: %v", err) + } + + totalPower := ps.TotalBytesCommitted + + var transfersBack []transfer + // Now, we return some funds to places where they are needed + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + lbact, _, err := lbtree.GetActor(ctx, addr) + if err != nil { + return fmt.Errorf("failed to get actor in lookback state") + } + + prevBalance := abi.NewTokenAmount(0) + if lbact != nil { + prevBalance = lbact.Balance + } + + switch act.Code { + case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: + nbalance := big.Min(prevBalance, AccountCap) + if nbalance.Sign() != 0 { + transfersBack = append(transfersBack, transfer{ + From: builtin.ReserveAddress, + To: addr, + Amt: nbalance, + }) + } + case builtin0.StorageMinerActorCodeID: + var st miner0.State + if err := c.ipldstore.Get(ctx, act.Head, &st); err != nil { + return fmt.Errorf("failed to load miner state: %v", err) + } + + var minfo miner0.MinerInfo + if err := c.ipldstore.Get(ctx, st.Info, &minfo); err != nil { + return fmt.Errorf("failed to get miner info: %v", err) + } + + sectorsArr, err := adt0.AsArray(adt.WrapStore(ctx, c.ipldstore), st.Sectors) + if err != nil { + return fmt.Errorf("failed to load sectors array: %v", err) + } + + slen := sectorsArr.Length() + + power := big.Mul(big.NewInt(int64(slen)), big.NewInt(int64(minfo.SectorSize))) + + mfunds := minerFundsAlloc(power, totalPower) + transfersBack = append(transfersBack, transfer{ + From: builtin.ReserveAddress, + To: minfo.Worker, + Amt: mfunds, + }) + + // Now make sure to give each miner who had power at the lookback some FIL + lbact, found, err := lbtree.GetActor(ctx, addr) + if err == nil { + if found { + var lbst miner0.State + if err := c.ipldstore.Get(ctx, lbact.Head, &lbst); err != nil { + return fmt.Errorf("failed to load miner state: %v", err) + } + + lbsectors, err := adt0.AsArray(adt.WrapStore(ctx, c.ipldstore), lbst.Sectors) + if err != nil { + return fmt.Errorf("failed to load lb sectors array: %v", err) + } + + if lbsectors.Length() > 0 { + transfersBack = append(transfersBack, transfer{ + From: builtin.ReserveAddress, + To: minfo.Worker, + Amt: BaseMinerBalance, + }) + } + } else { + log.Warnf("did not find actor: %s", addr.String()) + } + } else { + log.Warnf("failed to get miner in lookback state: %s", err) + } + } + return nil + }) + if err != nil { + return cid.Undef, fmt.Errorf("foreach over state tree failed: %v", err) + } + + for _, t := range transfersBack { + if err := doTransfer(tree, t.From, t.To, t.Amt); err != nil { + return cid.Undef, fmt.Errorf("transfer %s %s->%s failed: %v", t.Amt, t.From, t.To, err) + } + } + + // transfer all burnt funds back to the reserve account + burntAct, find, err := tree.GetActor(ctx, builtin0.BurntFundsActorAddr) + if err != nil { + return cid.Undef, fmt.Errorf("failed to load burnt funds actor: %v", err) + } + if !find { + return cid.Undef, errors.New("did not find burnt funds actor") + } + if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance); err != nil { + return cid.Undef, fmt.Errorf("failed to unburn funds: %v", err) + } + + // Top up the reimbursement service + reimbAddr, err := address.NewFromString("t0111") + if err != nil { + return cid.Undef, fmt.Errorf("failed to parse reimbursement service address") + } + + reimb, find, err := tree.GetActor(ctx, reimbAddr) + if err != nil { + return cid.Undef, fmt.Errorf("failed to load reimbursement account actor: %v", err) + } + if !find { + return cid.Undef, errors.New("did not find reimbursement actor") + } + + difference := big.Sub(DesiredReimbursementBalance, reimb.Balance) + if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference); err != nil { + return cid.Undef, fmt.Errorf("failed to top up reimbursement account: %v", err) + } + + // Now, a final sanity check to make sure the balances all check out + total := abi.NewTokenAmount(0) + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + total = big.Add(total, act.Balance) + return nil + }) + if err != nil { + return cid.Undef, fmt.Errorf("checking final state balance failed: %v", err) + } + + exp := types.FromFil(constants.FilBase) + if !exp.Equals(total) { + return cid.Undef, fmt.Errorf("resultant state tree account balance was not correct: %s", total) + } + + return tree.Flush(ctx) +} + +func setNetworkName(ctx context.Context, store adt.Store, tree *vmstate.State, name string) error { + ia, find, err := tree.GetActor(ctx, builtin0.InitActorAddr) + if err != nil { + return fmt.Errorf("getting init actor: %v", err) + } + if !find { + return errors.New("did not find init actor") + } + + initState, err := init_.Load(store, ia) + if err != nil { + return fmt.Errorf("reading init state: %v", err) + } + + if err := initState.SetNetworkName(name); err != nil { + return fmt.Errorf("setting network name: %v", err) + } + + c, err := store.Put(ctx, initState) + if err != nil { + return fmt.Errorf("writing new init state: %v", err) + } + ia.Head = c + + if err := tree.SetActor(ctx, builtin0.InitActorAddr, ia); err != nil { + return fmt.Errorf("setting init actor: %v", err) + } + + return nil +} + +// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting +func resetGenesisMsigs0(ctx context.Context, sm *ChainFork, store adt0.Store, tree *vmstate.State, startEpoch abi.ChainEpoch) error { + gb, err := sm.cr.GetGenesisBlock(ctx) + if err != nil { + return fmt.Errorf("getting genesis block: %v", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return fmt.Errorf("getting genesis tipset: %v", err) + } + + genesisTree, err := sm.StateTree(ctx, gts.Blocks()[0].ParentStateRoot) + if err != nil { + return fmt.Errorf("loading state tree: %v", err) + } + + err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error { + if genesisActor.Code == builtin0.MultisigActorCodeID { + currActor, find, err := tree.GetActor(ctx, addr) + if err != nil { + return fmt.Errorf("loading actor: %v", err) + } + if !find { + return fmt.Errorf("did not find actor: %s", addr.String()) + } + + var currState multisig0.State + if err := store.Get(ctx, currActor.Head, &currState); err != nil { + return fmt.Errorf("reading multisig state: %v", err) + } + + currState.StartEpoch = startEpoch + + head, err := store.Put(ctx, &currState) + if err != nil { + return fmt.Errorf("writing new multisig state: %v", err) + } + currActor.Head = head + + if err := tree.SetActor(ctx, addr, currActor); err != nil { + return fmt.Errorf("setting multisig actor: %v", err) + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("iterating over genesis actors: %v", err) + } + + return nil +} + +func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) { + var b bytes.Buffer + if err := splitAddr.MarshalCBOR(&b); err != nil { + return address.Undef, fmt.Errorf("marshalling split address: %v", err) + } + + if err := binary.Write(&b, binary.BigEndian, count); err != nil { + return address.Undef, fmt.Errorf("writing count into a buffer: %v", err) + } + + if err := binary.Write(&b, binary.BigEndian, []byte("Ignition upgrade")); err != nil { + return address.Undef, fmt.Errorf("writing fork name into a buffer: %v", err) + } + + addr, err := address.NewActorAddress(b.Bytes()) + if err != nil { + return address.Undef, fmt.Errorf("create actor address: %v", err) + } + + return addr, nil +} + +func splitGenesisMultisig0(ctx context.Context, addr address.Address, store adt0.Store, tree *vmstate.State, portions uint64, epoch abi.ChainEpoch) error { + if portions < 1 { + return fmt.Errorf("cannot split into 0 portions") + } + + mact, find, err := tree.GetActor(ctx, addr) + if err != nil { + return fmt.Errorf("getting msig actor: %v", err) + } + if !find { + return fmt.Errorf("did not find actor: %s", addr.String()) + } + + mst, err := multisig.Load(store, mact) + if err != nil { + return fmt.Errorf("getting msig state: %v", err) + } + + signers, err := mst.Signers() + if err != nil { + return fmt.Errorf("getting msig signers: %v", err) + } + + thresh, err := mst.Threshold() + if err != nil { + return fmt.Errorf("getting msig threshold: %v", err) + } + + ibal, err := mst.InitialBalance() + if err != nil { + return fmt.Errorf("getting msig initial balance: %v", err) + } + + se, err := mst.StartEpoch() + if err != nil { + return fmt.Errorf("getting msig start epoch: %v", err) + } + + ud, err := mst.UnlockDuration() + if err != nil { + return fmt.Errorf("getting msig unlock duration: %v", err) + } + + pending, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return fmt.Errorf("failed to create empty map: %v", err) + } + + newIbal := big.Div(ibal, big.NewInt(int64(portions))) + newState := &multisig0.State{ + Signers: signers, + NumApprovalsThreshold: thresh, + NextTxnID: 0, + InitialBalance: newIbal, + StartEpoch: se, + UnlockDuration: ud, + PendingTxns: pending, + } + + scid, err := store.Put(ctx, newState) + if err != nil { + return fmt.Errorf("storing new state: %v", err) + } + + newActor := types.Actor{ + Code: builtin0.MultisigActorCodeID, + Head: scid, + Nonce: 0, + Balance: big.Zero(), + } + + i := uint64(0) + for i < portions { + keyAddr, err := makeKeyAddr(addr, i) + if err != nil { + return fmt.Errorf("creating key address: %v", err) + } + + idAddr, err := tree.RegisterNewAddress(keyAddr) + if err != nil { + return fmt.Errorf("registering new address: %v", err) + } + + err = tree.SetActor(ctx, idAddr, &newActor) + if err != nil { + return fmt.Errorf("setting new msig actor state: %v", err) + } + + if err := doTransfer(tree, addr, idAddr, newIbal); err != nil { + return fmt.Errorf("transferring split msig balance: %v", err) + } + + i++ + } + + return nil +} + +func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *vmstate.State, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error { + act, find, err := tree.GetActor(ctx, addr) + if err != nil { + return fmt.Errorf("getting actor: %v", err) + } + if !find { + return fmt.Errorf("did not find actor: %s", addr.String()) + } + + if !builtin.IsMultisigActor(act.Code) { + return fmt.Errorf("actor wasn't msig: %v", err) + } + + var msigState multisig0.State + if err := store.Get(ctx, act.Head, &msigState); err != nil { + return fmt.Errorf("reading multisig state: %v", err) + } + + msigState.StartEpoch = startEpoch + msigState.UnlockDuration = duration + msigState.InitialBalance = balance + + head, err := store.Put(ctx, &msigState) + if err != nil { + return fmt.Errorf("writing new multisig state: %v", err) + } + act.Head = head + + if err := tree.SetActor(ctx, addr, act); err != nil { + return fmt.Errorf("setting multisig actor: %v", err) + } + + return nil +} + +func (c *ChainFork) UpgradeIgnition(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + store := adt.WrapStore(ctx, c.ipldstore) + + if c.forkUpgrade.UpgradeLiftoffHeight <= epoch { + return cid.Undef, fmt.Errorf("liftoff height must be beyond ignition height") + } + + nst, err := nv3.MigrateStateTree(ctx, store, root, epoch) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors state: %v", err) + } + + tree, err := c.StateTree(ctx, nst) + if err != nil { + return cid.Undef, fmt.Errorf("getting state tree: %v", err) + } + + err = setNetworkName(ctx, store, tree, "ignition") + if err != nil { + return cid.Undef, fmt.Errorf("setting network name: %v", err) + } + + split1, err := address.NewFromString("t0115") + if err != nil { + return cid.Undef, fmt.Errorf("first split address: %v", err) + } + + split2, err := address.NewFromString("t0116") + if err != nil { + return cid.Undef, fmt.Errorf("second split address: %v", err) + } + + err = resetGenesisMsigs0(ctx, c, store, tree, c.forkUpgrade.UpgradeLiftoffHeight) + if err != nil { + return cid.Undef, fmt.Errorf("resetting genesis msig start epochs: %v", err) + } + + err = splitGenesisMultisig0(ctx, split1, store, tree, 50, epoch) + if err != nil { + return cid.Undef, fmt.Errorf("splitting first msig: %v", err) + } + + err = splitGenesisMultisig0(ctx, split2, store, tree, 50, epoch) + if err != nil { + return cid.Undef, fmt.Errorf("splitting second msig: %v", err) + } + + err = nv3.CheckStateTree(ctx, store, nst, epoch, builtin0.TotalFilecoin) + if err != nil { + return cid.Undef, fmt.Errorf("sanity check after ignition upgrade failed: %v", err) + } + + return tree.Flush(ctx) +} + +func (c *ChainFork) UpgradeRefuel(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + store := adt.WrapStore(ctx, c.ipldstore) + tree, err := c.StateTree(ctx, root) + if err != nil { + return cid.Undef, fmt.Errorf("getting state tree: %v", err) + } + + err = resetMultisigVesting0(ctx, store, tree, builtin.SaftAddress, 0, 0, big.Zero()) + if err != nil { + return cid.Undef, fmt.Errorf("tweaking msig vesting: %v", err) + } + + err = resetMultisigVesting0(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero()) + if err != nil { + return cid.Undef, fmt.Errorf("tweaking msig vesting: %v", err) + } + + err = resetMultisigVesting0(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero()) + if err != nil { + return cid.Undef, fmt.Errorf("tweaking msig vesting: %v", err) + } + + return tree.Flush(ctx) +} + +func linksForObj(blk ipfsblock.Block, cb func(cid.Cid)) error { + switch blk.Cid().Prefix().Codec { + case cid.DagCBOR: + err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb) + if err != nil { + return fmt.Errorf("cbg.ScanForLinks: %v", err) + } + return nil + case cid.Raw: + // We implicitly have all children of raw blocks. + return nil + default: + return fmt.Errorf("vm flush copy method only supports dag cbor") + } +} + +func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid, cp func(ipfsblock.Block) error) error { + if root.Prefix().MhType == 0 { + // identity cid, skip + return nil + } + + blk, err := from.Get(ctx, root) + if err != nil { + return fmt.Errorf("get %s failed: %v", root, err) + } + + var lerr error + err = linksForObj(blk, func(link cid.Cid) { + if lerr != nil { + // Theres no erorr return on linksForObj callback :( + return + } + + prefix := link.Prefix() + if prefix.Codec == cid.FilCommitmentSealed || prefix.Codec == cid.FilCommitmentUnsealed { + return + } + + // We always have blocks inlined into CIDs, but we may not have their children. + if prefix.MhType == mh.IDENTITY { + // Unless the inlined block has no children. + if prefix.Codec == cid.Raw { + return + } + } else { + // If we have an object, we already have its children, skip the object. + has, err := to.Has(ctx, link) + if err != nil { + lerr = fmt.Errorf("has: %v", err) + return + } + if has { + return + } + } + + if err := copyRec(ctx, from, to, link, cp); err != nil { + lerr = err + return + } + }) + if err != nil { + return fmt.Errorf("linksForObj (%x): %v", blk.RawData(), err) + } + if lerr != nil { + return lerr + } + + if err := cp(blk); err != nil { + return fmt.Errorf("copy: %v", err) + } + return nil +} + +func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error { + ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint + defer span.End() + + var numBlocks int + var totalCopySize int + + const batchSize = 128 + const bufCount = 3 + freeBufs := make(chan []ipfsblock.Block, bufCount) + toFlush := make(chan []ipfsblock.Block, bufCount) + for i := 0; i < bufCount; i++ { + freeBufs <- make([]ipfsblock.Block, 0, batchSize) + } + + errFlushChan := make(chan error) + + go func() { + for b := range toFlush { + if err := to.PutMany(ctx, b); err != nil { + close(freeBufs) + errFlushChan <- fmt.Errorf("batch put in copy: %v", err) + return + } + freeBufs <- b[:0] + } + close(errFlushChan) + close(freeBufs) + }() + + batch := <-freeBufs + batchCp := func(blk ipfsblock.Block) error { + numBlocks++ + totalCopySize += len(blk.RawData()) + + batch = append(batch, blk) + + if len(batch) >= batchSize { + toFlush <- batch + var ok bool + batch, ok = <-freeBufs + if !ok { + return <-errFlushChan + } + } + return nil + } + + if err := copyRec(ctx, from, to, root, batchCp); err != nil { + return fmt.Errorf("copyRec: %v", err) + } + + if len(batch) > 0 { + toFlush <- batch + } + close(toFlush) // close the toFlush triggering the loop to end + err := <-errFlushChan // get error out or get nil if it was closed + if err != nil { + return err + } + + span.AddAttributes( + trace.Int64Attribute("numBlocks", int64(numBlocks)), + trace.Int64Attribute("copySize", int64(totalCopySize)), + ) + + return nil +} + +func (c *ChainFork) UpgradeActorsV2(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + buf := blockstoreutil.NewTieredBstore(c.bs, blockstoreutil.NewTemporarySync()) + store := chain.ActorStore(ctx, buf) + + info, err := store.Put(ctx, new(vmstate.StateInfo0)) + if err != nil { + return cid.Undef, fmt.Errorf("failed to create new state info for actors v2: %v", err) + } + + newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig()) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v2: %v", err) + } + + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion1, + Actors: newHamtRoot, + Info: info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %v", err) + } + + // perform some basic sanity checks to make sure everything still works. + if newSm, err := vmstate.LoadState(ctx, store, newRoot); err != nil { + return cid.Undef, fmt.Errorf("state tree sanity load failed: %v", err) + } else if newRoot2, err := newSm.Flush(ctx); err != nil { + return cid.Undef, fmt.Errorf("state tree sanity flush failed: %v", err) + } else if newRoot2 != newRoot { + return cid.Undef, fmt.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) + } else if _, _, err := newSm.GetActor(ctx, builtin0.InitActorAddr); err != nil { + return cid.Undef, fmt.Errorf("failed to load init actor after upgrade: %v", err) + } + + { + from := buf + to := buf.Read() + + if err := Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, fmt.Errorf("copying migrated tree: %v", err) + } + } + + return newRoot, nil +} + +func (c *ChainFork) UpgradeLiftoff(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + tree, err := c.StateTree(ctx, root) + if err != nil { + return cid.Undef, fmt.Errorf("getting state tree: %v", err) + } + + err = setNetworkName(ctx, adt.WrapStore(ctx, c.ipldstore), tree, "mainnet") + if err != nil { + return cid.Undef, fmt.Errorf("setting network name: %v", err) + } + + return tree.Flush(ctx) +} + +func (c *ChainFork) UpgradeCalico(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + if c.networkType != types.NetworkMainnet { + return root, nil + } + + store := chain.ActorStore(ctx, c.bs) + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %v", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion1 { + return cid.Undef, fmt.Errorf( + "expected state root version 1 for calico upgrade, got %d", + stateRoot.Version, + ) + } + + newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig()) + if err != nil { + return cid.Undef, fmt.Errorf("running nv7 migration: %v", err) + } + + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: stateRoot.Version, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %v", err) + } + + // perform some basic sanity checks to make sure everything still works. + if newSm, err := vmstate.LoadState(ctx, store, newRoot); err != nil { + return cid.Undef, fmt.Errorf("state tree sanity load failed: %v", err) + } else if newRoot2, err := newSm.Flush(ctx); err != nil { + return cid.Undef, fmt.Errorf("state tree sanity flush failed: %v", err) + } else if newRoot2 != newRoot { + return cid.Undef, fmt.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) + } else if _, _, err := newSm.GetActor(ctx, builtin0.InitActorAddr); err != nil { + return cid.Undef, fmt.Errorf("failed to load init actor after upgrade: %v", err) + } + + return newRoot, nil +} + +func terminateActor(ctx context.Context, tree *vmstate.State, addr address.Address, epoch abi.ChainEpoch) error { + a, found, err := tree.GetActor(context.TODO(), addr) + if err != nil { + return fmt.Errorf("failed to get actor to delete: %v", err) + } + if !found { + return types.ErrActorNotFound + } + + if err := doTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance); err != nil { + return fmt.Errorf("transferring terminated actor's balance: %v", err) + } + + err = tree.DeleteActor(ctx, addr) + if err != nil { + return fmt.Errorf("deleting actor from tree: %v", err) + } + + ia, found, err := tree.GetActor(ctx, init_.Address) + if err != nil { + return fmt.Errorf("loading init actor: %v", err) + } + if !found { + return types.ErrActorNotFound + } + + ias, err := init_.Load(&vmstate.AdtStore{IpldStore: tree.Store}, ia) + if err != nil { + return fmt.Errorf("loading init actor state: %v", err) + } + + if err := ias.Remove(addr); err != nil { + return fmt.Errorf("deleting entry from address map: %v", err) + } + + nih, err := tree.Store.Put(ctx, ias) + if err != nil { + return fmt.Errorf("writing new init actor state: %v", err) + } + + ia.Head = nih + + return tree.SetActor(ctx, init_.Address, ia) +} + +func (c *ChainFork) UpgradeActorsV3(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + + cfg := nv10.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + newRoot, err := c.upgradeActorsV3Common(ctx, cache, root, epoch, ts, cfg) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors v3 state: %v", err) + } + + tree, err := c.StateTree(ctx, newRoot) + if err != nil { + return cid.Undef, fmt.Errorf("getting state tree: %v", err) + } + + if c.networkType == types.NetworkMainnet { + err := terminateActor(ctx, tree, types.ZeroAddress, epoch) + if err != nil && !errors.Is(err, types.ErrActorNotFound) { + return cid.Undef, fmt.Errorf("deleting zero bls actor: %v", err) + } + + newRoot, err = tree.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing state tree: %v", err) + } + } + + return newRoot, nil +} + +func (c *ChainFork) PreUpgradeActorsV3(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + log.Info("PreUpgradeActorsV3 ......") + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + cfg := nv10.Config{MaxWorkers: uint(workerCount)} + _, err := c.upgradeActorsV3Common(ctx, cache, root, epoch, ts, cfg) + return err +} + +func (c *ChainFork) upgradeActorsV3Common( + ctx context.Context, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv10.Config, +) (cid.Cid, error) { + buf := blockstoreutil.NewTieredBstore(c.bs, blockstoreutil.NewTemporarySync()) + store := chain.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %v", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion1 { + return cid.Undef, fmt.Errorf( + "expected state root version 1 for actors v3 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v3: %v", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion2, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %v", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, fmt.Errorf("copying migrated tree: %v", err) + } + } + + return newRoot, nil +} + +func (c *ChainFork) UpgradeActorsV4(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv12.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := c.upgradeActorsV4Common(ctx, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors v4 state: %v", err) + } + + return newRoot, nil +} + +func (c *ChainFork) PreUpgradeActorsV4(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv12.Config{MaxWorkers: uint(workerCount)} + _, err := c.upgradeActorsV4Common(ctx, cache, root, epoch, ts, config) + return err +} + +func (c *ChainFork) upgradeActorsV4Common( + ctx context.Context, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv12.Config, +) (cid.Cid, error) { + buf := blockstoreutil.NewTieredBstore(c.bs, blockstoreutil.NewTemporarySync()) + store := chain.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %v", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion2 { + return cid.Undef, fmt.Errorf( + "expected state root version 2 for actors v4 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v4: %v", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion3, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %v", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, fmt.Errorf("copying migrated tree: %v", err) + } + } + + return newRoot, nil +} + +func (c *ChainFork) UpgradeActorsV5(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv13.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := c.upgradeActorsV5Common(ctx, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors v5 state: %v", err) + } + + return newRoot, nil +} + +func (c *ChainFork) PreUpgradeActorsV5(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv13.Config{MaxWorkers: uint(workerCount)} + _, err := c.upgradeActorsV5Common(ctx, cache, root, epoch, ts, config) + return err +} + +func (c *ChainFork) upgradeActorsV5Common( + ctx context.Context, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv13.Config, +) (cid.Cid, error) { + buf := blockstoreutil.NewTieredBstore(c.bs, blockstoreutil.NewTemporarySync()) + store := chain.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %v", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion3 { + return cid.Undef, fmt.Errorf( + "expected state root version 3 for actors v5 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v5: %v", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %v", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, fmt.Errorf("copying migrated tree: %v", err) + } + } + + return newRoot, nil +} + +func (c *ChainFork) UpgradeActorsV6(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv14.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := c.upgradeActorsV6Common(ctx, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors v5 state: %w", err) + } + + return newRoot, nil +} + +func (c *ChainFork) PreUpgradeActorsV6(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv14.Config{MaxWorkers: uint(workerCount)} + _, err := c.upgradeActorsV6Common(ctx, cache, root, epoch, ts, config) + return err +} + +func (c *ChainFork) upgradeActorsV6Common( + ctx context.Context, + cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, + ts *types.TipSet, + config nv14.Config, +) (cid.Cid, error) { + buf := blockstoreutil.NewTieredBstore(c.bs, blockstoreutil.NewTemporarySync()) + store := chain.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion4 { + return cid.Undef, fmt.Errorf( + "expected state root version 4 for actors v6 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v5: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, fmt.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func (c *ChainFork) UpgradeActorsV7(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv15.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := c.upgradeActorsV7Common(ctx, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors v6 state: %w", err) + } + + return newRoot, nil +} + +func (c *ChainFork) PreUpgradeActorsV7(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + ver := c.GetNetworkVersion(ctx, epoch) + lbts, lbRoot, err := c.cr.GetLookbackTipSetForRound(ctx, ts, epoch, ver) + if err != nil { + return fmt.Errorf("error getting lookback ts for premigration: %w", err) + } + + config := nv15.Config{ + MaxWorkers: uint(workerCount), + ProgressLogPeriod: time.Minute * 5, + } + + _, err = c.upgradeActorsV7Common(ctx, cache, lbRoot, epoch, lbts, config) + return err +} + +func (c *ChainFork) upgradeActorsV7Common( + ctx context.Context, + cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, + ts *types.TipSet, + config nv15.Config, +) (cid.Cid, error) { + writeStore := blockstoreutil.NewAutobatch(ctx, c.bs, units.GiB/4) + // TODO: pretty sure we'd achieve nothing by doing this, confirm in review + // buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), writeStore) + store := chain.ActorStore(ctx, writeStore) + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion4 { + return cid.Undef, fmt.Errorf( + "expected state root version 4 for actors v7 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv15.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v7: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %w", err) + } + + // Persists the new tree and shuts down the flush worker + if err := writeStore.Flush(ctx); err != nil { + return cid.Undef, fmt.Errorf("writeStore flush failed: %w", err) + } + + if err := writeStore.Shutdown(ctx); err != nil { + return cid.Undef, fmt.Errorf("writeStore shutdown failed: %w", err) + } + return newRoot, nil +} + +func (c *ChainFork) UpgradeActorsV8(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv16.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := c.upgradeActorsV8Common(ctx, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors v8 state: %w", err) + } + + fmt.Print(fvmLiftoffBanner) + + return newRoot, nil +} + +func (c *ChainFork) PreUpgradeActorsV8(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + ver := c.GetNetworkVersion(ctx, epoch) + lbts, lbRoot, err := c.cr.GetLookbackTipSetForRound(ctx, ts, epoch, ver) + if err != nil { + return fmt.Errorf("error getting lookback ts for premigration: %w", err) + } + + config := nv16.Config{ + MaxWorkers: uint(workerCount), + ProgressLogPeriod: time.Minute * 5, + } + + _, err = c.upgradeActorsV8Common(ctx, cache, lbRoot, epoch, lbts, config) + return err +} + +func (c *ChainFork) upgradeActorsV8Common( + ctx context.Context, cache MigrationCache, + root cid.Cid, + epoch abi.ChainEpoch, + ts *types.TipSet, + config nv16.Config, +) (cid.Cid, error) { + buf := blockstoreutil.NewTieredBstore(c.bs, blockstoreutil.NewTemporarySync()) + store := chain.ActorStore(ctx, buf) + + // ensure that the manifest is loaded in the blockstore + if err := actors.LoadBundles(ctx, buf, actorstypes.Version8); err != nil { + return cid.Undef, fmt.Errorf("failed to load manifest bundle: %w", err) + } + + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion4 { + return cid.Undef, fmt.Errorf( + "expected state root version 4 for actors v8 upgrade, got %d", + stateRoot.Version, + ) + } + + manifest, ok := actors.GetManifest(actorstypes.Version8) + if !ok { + return cid.Undef, fmt.Errorf("no manifest CID for v8 upgrade") + } + + // Perform the migration + newHamtRoot, err := nv16.MigrateStateTree(ctx, store, manifest, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v8: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, fmt.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func (c *ChainFork) UpgradeActorsV9(ctx context.Context, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv17.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := c.upgradeActorsV9Common(ctx, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, fmt.Errorf("migrating actors v8 state: %w", err) + } + + return newRoot, nil +} + +func (c *ChainFork) PreUpgradeActorsV9(ctx context.Context, + cache MigrationCache, + root cid.Cid, + epoch abi.ChainEpoch, + ts *types.TipSet, +) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + ver := c.GetNetworkVersion(ctx, epoch) + lbts, lbRoot, err := c.cr.GetLookbackTipSetForRound(ctx, ts, epoch, ver) + if err != nil { + return fmt.Errorf("error getting lookback ts for premigration: %w", err) + } + + config := nv17.Config{ + MaxWorkers: uint(workerCount), + ProgressLogPeriod: time.Minute * 5, + } + + _, err = c.upgradeActorsV9Common(ctx, cache, lbRoot, epoch, lbts, config) + return err +} + +func (c *ChainFork) upgradeActorsV9Common(ctx context.Context, + cache MigrationCache, + root cid.Cid, + epoch abi.ChainEpoch, + ts *types.TipSet, + config nv17.Config, +) (cid.Cid, error) { + writeStore := blockstoreutil.NewAutobatch(ctx, c.bs, units.GiB/4) + store := chain.ActorStore(ctx, writeStore) + + // ensure that the manifest is loaded in the blockstore + if err := actors.LoadBundles(ctx, c.bs, actorstypes.Version9); err != nil { + return cid.Undef, fmt.Errorf("failed to load manifest bundle: %w", err) + } + + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != vmstate.StateTreeVersion4 { + return cid.Undef, fmt.Errorf("expected state root version 4 for actors v9 upgrade, got %d", stateRoot.Version) + } + + manifest, ok := actors.GetManifest(actorstypes.Version9) + if !ok { + return cid.Undef, fmt.Errorf("no manifest CID for v9 upgrade") + } + + // Perform the migration + newHamtRoot, err := nv17.MigrateStateTree(ctx, store, manifest, stateRoot.Actors, epoch, config, + migrationLogger{}, cache) + if err != nil { + return cid.Undef, fmt.Errorf("upgrading to actors v9: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &vmstate.StateRoot{ + Version: vmstate.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to persist new state root: %w", err) + } + + // Persists the new tree and shuts down the flush worker + if err := writeStore.Flush(ctx); err != nil { + return cid.Undef, fmt.Errorf("writeStore flush failed: %w", err) + } + + if err := writeStore.Shutdown(ctx); err != nil { + return cid.Undef, fmt.Errorf("writeStore shutdown failed: %w", err) + } + + return newRoot, nil +} + +func (c *ChainFork) GetForkUpgrade() *config.ForkUpgradeConfig { + return c.forkUpgrade +} + +// Example upgrade function if upgrade requires only code changes +// func (c *ChainFork) upgradeActorsV9Common( +// ctx context.Context, cache MigrationCache, +// root cid.Cid, +// epoch abi.ChainEpoch, +// ts *types.TipSet, +// config nv16.Config, +// ) (cid.Cid, error) { +// buf := blockstoreutil.NewTieredBstore(c.bs, blockstoreutil.NewTemporarySync()) + +// av := actors.Version9 +// // This may change for upgrade +// newStateTreeVersion := vmstate.StateTreeVersion4 + +// // ensure that the manifest is loaded in the blockstore +// if err := actors.LoadBundles(ctx, buf, actors.Version9); err != nil { +// return cid.Undef, fmt.Errorf("failed to load manifest bundle: %w", err) +// } + +// newActorsManifestCid, ok := actors.GetManifest(av) +// if !ok { +// return cid.Undef, fmt.Errorf("no manifest CID for v8 upgrade") +// } + +// bstore := c.bs +// return LiteMigration(ctx, bstore, newActorsManifestCid, root, av, vmstate.StateTreeVersion4, newStateTreeVersion) +// } + +func LiteMigration(ctx context.Context, bstore blockstoreutil.Blockstore, newActorsManifestCid cid.Cid, root cid.Cid, oldAv actorstypes.Version, newAv actorstypes.Version, oldStateTreeVersion vmstate.StateTreeVersion, newStateTreeVersion vmstate.StateTreeVersion) (cid.Cid, error) { + buf := blockstoreutil.NewTieredBstore(bstore, blockstoreutil.NewTemporarySync()) + store := chain.ActorStore(ctx, buf) + adtStore := gstStore.WrapStore(ctx, store) + + // Load the state root. + var stateRoot vmstate.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, fmt.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != oldStateTreeVersion { + return cid.Undef, fmt.Errorf( + "expected state tree version %d for actors code upgrade, got %d", + oldStateTreeVersion, + stateRoot.Version, + ) + } + + st, err := vmstate.LoadState(ctx, store, root) + if err != nil { + return cid.Undef, fmt.Errorf("failed to load state tree: %w", err) + } + + oldManifestData, err := getManifestData(ctx, st) + if err != nil { + return cid.Undef, fmt.Errorf("error loading old actor manifest: %w", err) + } + + // load new manifest + newManifest, err := actors.LoadManifest(ctx, newActorsManifestCid, store) + if err != nil { + return cid.Undef, fmt.Errorf("error loading new manifest: %w", err) + } + + newManifestData := manifest.ManifestData{} + if err := store.Get(ctx, newManifest.Data, &newManifestData); err != nil { + return cid.Undef, fmt.Errorf("error loading new manifest data: %w", err) + } + + if len(oldManifestData.Entries) != len(actors.GetBuiltinActorsKeys(oldAv)) { + return cid.Undef, fmt.Errorf("incomplete old manifest with %d code CIDs", len(oldManifestData.Entries)) + } + if len(newManifestData.Entries) != len(actors.GetBuiltinActorsKeys(newAv)) { + return cid.Undef, fmt.Errorf("incomplete new manifest with %d code CIDs", len(newManifestData.Entries)) + } + + // Maps prior version code CIDs to migration functions. + migrations := make(map[cid.Cid]cid.Cid) + + for _, entry := range oldManifestData.Entries { + newCodeCid, ok := newManifest.Get(entry.Name) + if !ok { + return cid.Undef, fmt.Errorf("code cid for %s actor not found in new manifest", entry.Name) + } + + migrations[entry.Code] = newCodeCid + } + + startTime := time.Now() + + // Load output state tree + actorsOut, err := vmstate.NewState(adtStore, newStateTreeVersion) + if err != nil { + return cid.Undef, err + } + + // Insert migrated records in output state tree. + err = st.ForEach(func(addr address.Address, actorIn *types.Actor) error { + newCid, ok := migrations[actorIn.Code] + if !ok { + return fmt.Errorf("new code cid not found in migrations for actor %s", addr) + } + var head cid.Cid + if addr == system.Address { + newSystemState, err := system.MakeState(store, newAv, newManifest.Data) + if err != nil { + return fmt.Errorf("could not make system actor state: %w", err) + } + head, err = store.Put(ctx, newSystemState) + if err != nil { + return fmt.Errorf("could not set system actor state head: %w", err) + } + } else { + head = actorIn.Head + } + newActor := types.Actor{ + Code: newCid, + Head: head, + Nonce: actorIn.Nonce, + Balance: actorIn.Balance, + } + err = actorsOut.SetActor(ctx, addr, &newActor) + if err != nil { + return fmt.Errorf("could not set actor at address %s: %w", addr, err) + } + + return nil + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed update actor states: %w", err) + } + + elapsed := time.Since(startTime) + log.Infof("All done after %v. Flushing state tree root.", elapsed) + newRoot, err := actorsOut.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("failed to flush new actors: %w", err) + } + + // Persist the new tree. + { + from := buf + to := buf.Read() + + if err := Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, fmt.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func getManifestData(ctx context.Context, st *vmstate.State) (*manifest.ManifestData, error) { + wrapStore := gstStore.WrapStore(ctx, st.Store) + + systemActor, found, err := st.GetActor(ctx, system.Address) + if err != nil { + return nil, fmt.Errorf("failed to get system actor: %w", err) + } + if !found { + return nil, fmt.Errorf("not found actor") + } + systemActorState, err := system.Load(wrapStore, systemActor) + if err != nil { + return nil, fmt.Errorf("failed to load system actor state: %w", err) + } + + actorsManifestDataCid := systemActorState.GetBuiltinActors() + + var mfData manifest.ManifestData + if err := wrapStore.Get(ctx, actorsManifestDataCid, &mfData); err != nil { + return nil, fmt.Errorf("error fetching data: %w", err) + } + + return &mfData, nil +} diff --git a/pkg/fork/mock.go b/pkg/fork/mock.go new file mode 100644 index 0000000000..93e5ab60a6 --- /dev/null +++ b/pkg/fork/mock.go @@ -0,0 +1,59 @@ +package fork + +import ( + "context" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" +) + +var _ = IFork((*MockFork)(nil)) + +type MockFork struct{} + +// NewMockFork mock for test +func NewMockFork() *MockFork { + return &MockFork{} +} + +func (mockFork *MockFork) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + return root, nil +} + +func (mockFork *MockFork) GetNetworkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { + return network.Version0 +} + +func (mockFork *MockFork) HasExpensiveFork(ctx context.Context, height abi.ChainEpoch) bool { + return false +} + +func (mockFork *MockFork) HasExpensiveForkBetween(parent, height abi.ChainEpoch) bool { + return false +} + +func (mockFork *MockFork) GetForkUpgrade() *config.ForkUpgradeConfig { + return &config.ForkUpgradeConfig{ + UpgradeSmokeHeight: -1, + UpgradeBreezeHeight: -1, + UpgradeIgnitionHeight: -1, + UpgradeLiftoffHeight: -1, + UpgradeAssemblyHeight: -1, + UpgradeRefuelHeight: -1, + UpgradeTapeHeight: -1, + UpgradeKumquatHeight: -1, + BreezeGasTampingDuration: -1, + UpgradeCalicoHeight: -1, + UpgradePersianHeight: -1, + UpgradeOrangeHeight: -1, + UpgradeClausHeight: -1, + } +} + +func (mockFork *MockFork) Start(ctx context.Context) error { + return nil +} diff --git a/pkg/fvm/cbor_gen.go b/pkg/fvm/cbor_gen.go new file mode 100644 index 0000000000..0d2dbd2026 --- /dev/null +++ b/pkg/fvm/cbor_gen.go @@ -0,0 +1,390 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package fvm + +import ( + "fmt" + "io" + "math" + "sort" + + types "github.com/filecoin-project/venus/venus-shared/types" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufFvmExecutionTrace = []byte{133} + +func (t *FvmExecutionTrace) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufFvmExecutionTrace); err != nil { + return err + } + + // t.Msg (types.Message) (struct) + if err := t.Msg.MarshalCBOR(cw); err != nil { + return err + } + + // t.MsgRct (types.MessageReceipt) (struct) + if err := t.MsgRct.MarshalCBOR(cw); err != nil { + return err + } + + // t.Error (string) (string) + if len(t.Error) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Error was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Error))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Error)); err != nil { + return err + } + + // t.GasCharges ([]fvm.FvmGasCharge) (slice) + if len(t.GasCharges) > 1000000000 { + return xerrors.Errorf("Slice value in field t.GasCharges was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.GasCharges))); err != nil { + return err + } + for _, v := range t.GasCharges { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.Subcalls ([]fvm.FvmExecutionTrace) (slice) + if len(t.Subcalls) > 1000000000 { + return xerrors.Errorf("Slice value in field t.Subcalls was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Subcalls))); err != nil { + return err + } + for _, v := range t.Subcalls { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { + *t = FvmExecutionTrace{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Msg (types.Message) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Msg = new(types.Message) + if err := t.Msg.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Msg pointer: %w", err) + } + } + + } + // t.MsgRct (types.MessageReceipt) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.MsgRct = new(types.MessageReceipt) + if err := t.MsgRct.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MsgRct pointer: %w", err) + } + } + + } + // t.Error (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Error = string(sval) + } + // t.GasCharges ([]fvm.FvmGasCharge) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 1000000000 { + return fmt.Errorf("t.GasCharges: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.GasCharges = make([]FvmGasCharge, extra) + } + + for i := 0; i < int(extra); i++ { + + var v FvmGasCharge + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.GasCharges[i] = v + } + + // t.Subcalls ([]fvm.FvmExecutionTrace) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 1000000000 { + return fmt.Errorf("t.Subcalls: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Subcalls = make([]FvmExecutionTrace, extra) + } + + for i := 0; i < int(extra); i++ { + + var v FvmExecutionTrace + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Subcalls[i] = v + } + + return nil +} + +var lengthBufFvmGasCharge = []byte{132} + +func (t *FvmGasCharge) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufFvmGasCharge); err != nil { + return err + } + + // t.Name (string) (string) + if len(t.Name) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Name was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Name)); err != nil { + return err + } + + // t.TotalGas (int64) (int64) + if t.TotalGas >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalGas)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TotalGas-1)); err != nil { + return err + } + } + + // t.ComputeGas (int64) (int64) + if t.ComputeGas >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ComputeGas)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ComputeGas-1)); err != nil { + return err + } + } + + // t.StorageGas (int64) (int64) + if t.StorageGas >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StorageGas)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StorageGas-1)); err != nil { + return err + } + } + return nil +} + +func (t *FvmGasCharge) UnmarshalCBOR(r io.Reader) (err error) { + *t = FvmGasCharge{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Name (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Name = string(sval) + } + // t.TotalGas (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TotalGas = int64(extraI) + } + // t.ComputeGas (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ComputeGas = int64(extraI) + } + // t.StorageGas (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StorageGas = int64(extraI) + } + return nil +} diff --git a/pkg/fvm/fvm.go b/pkg/fvm/fvm.go new file mode 100644 index 0000000000..b9a5c74050 --- /dev/null +++ b/pkg/fvm/fvm.go @@ -0,0 +1,734 @@ +package fvm + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "os" + "sort" + "sync" + "sync/atomic" + "time" + + ffi "github.com/filecoin-project/filecoin-ffi" + ffi_cgo "github.com/filecoin-project/filecoin-ffi/cgo" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/aerrors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/account" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" +) + +// stat counters +var ( + StatApplied uint64 +) + +var fvmLog = logging.Logger("fvm") + +var ( + _ vm.Interface = (*FVM)(nil) + _ ffi_cgo.Externs = (*FvmExtern)(nil) +) + +type FvmExtern struct { // nolint + Rand + blockstoreutil.Blockstore + epoch abi.ChainEpoch + lbState vm.LookbackStateGetter + base cid.Cid + gasPriceSchedule *gas.PricesSchedule +} + +type FvmGasCharge struct { // nolint + Name string + TotalGas int64 + ComputeGas int64 + StorageGas int64 +} + +// This may eventually become identical to ExecutionTrace, but we can make incremental progress towards that +type FvmExecutionTrace struct { // nolint + Msg *types.Message + MsgRct *types.MessageReceipt + Error string + + GasCharges []FvmGasCharge `cborgen:"maxlen=1000000000"` + Subcalls []FvmExecutionTrace `cborgen:"maxlen=1000000000"` +} + +func (t *FvmExecutionTrace) ToExecutionTrace() types.ExecutionTrace { + if t == nil { + return types.ExecutionTrace{} + } + + ret := types.ExecutionTrace{ + Msg: t.Msg, + MsgRct: t.MsgRct, + Error: t.Error, + Duration: 0, + GasCharges: nil, + Subcalls: nil, // Should be nil when there are no subcalls for backwards compatibility + } + + if len(t.GasCharges) > 0 { + ret.GasCharges = make([]*types.GasTrace, len(t.GasCharges)) + for i, v := range t.GasCharges { + ret.GasCharges[i] = &types.GasTrace{ + Name: v.Name, + TotalGas: v.TotalGas, + ComputeGas: v.ComputeGas, + StorageGas: v.StorageGas, + } + } + } + + if len(t.Subcalls) > 0 { + ret.Subcalls = make([]types.ExecutionTrace, len(t.Subcalls)) + + for i, v := range t.Subcalls { + ret.Subcalls[i] = v.ToExecutionTrace() + } + } + + return ret +} + +// VerifyConsensusFault is similar to the one in syscalls.go used by the LegacyVM, except it never errors +// Errors are logged and "no fault" is returned, which is functionally what go-actors does anyway +func (x *FvmExtern) VerifyConsensusFault(ctx context.Context, a, b, extra []byte) (*ffi_cgo.ConsensusFault, int64) { + totalGas := int64(0) + ret := &ffi_cgo.ConsensusFault{ + Type: ffi_cgo.ConsensusFaultNone, + } + + // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. + // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. + // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so + // (which runs a syntactic check), we do it directly on the CIDs. + + // (0) cheap preliminary checks + + // can blocks be decoded properly? + var blockA, blockB types.BlockHeader + if decodeErr := blockA.UnmarshalCBOR(bytes.NewReader(a)); decodeErr != nil { + fvmLog.Infof("invalid consensus fault: cannot decode first block header: %w", decodeErr) + return ret, totalGas + } + + if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil { + fvmLog.Infof("invalid consensus fault: cannot decode second block header: %w", decodeErr) + return ret, totalGas + } + + // are blocks the same? + if blockA.Cid().Equals(blockB.Cid()) { + fvmLog.Infof("invalid consensus fault: submitted blocks are the same") + return ret, totalGas + } + // (1) check conditions necessary to any consensus fault + + // were blocks mined by same miner? + if blockA.Miner != blockB.Miner { + fvmLog.Infof("invalid consensus fault: blocks not mined by the same miner") + return ret, totalGas + } + + // block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain). + if blockB.Height < blockA.Height { + fvmLog.Infof("invalid consensus fault: first block must not be of higher height than second") + return ret, totalGas + } + + ret.Epoch = blockB.Height + + faultType := ffi_cgo.ConsensusFaultNone + + // (2) check for the consensus faults themselves + // (a) double-fork mining fault + if blockA.Height == blockB.Height { + faultType = ffi_cgo.ConsensusFaultDoubleForkMining + } + + // (b) time-offset mining fault + // strictly speaking no need to compare heights based on double fork mining check above, + // but at same height this would be a different fault. + if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { + faultType = ffi_cgo.ConsensusFaultTimeOffsetMining + } + + // (c) parent-grinding fault + // Here extra is the "witness", a third block that shows the connection between A and B as + // A's sibling and B's parent. + // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset + // + // B + // | + // [A, C] + var blockC types.BlockHeader + if len(extra) > 0 { + if decodeErr := blockC.UnmarshalCBOR(bytes.NewReader(extra)); decodeErr != nil { + fvmLog.Infof("invalid consensus fault: cannot decode extra: %w", decodeErr) + return ret, totalGas + } + + if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && + types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { + faultType = ffi_cgo.ConsensusFaultParentGrinding + } + } + + // (3) return if no consensus fault by now + if faultType == ffi_cgo.ConsensusFaultNone { + fvmLog.Infof("invalid consensus fault: no fault detected") + return ret, totalGas + } + + // else + // (4) expensive final checks + + // check blocks are properly signed by their respective miner + // note we do not need to check extra's: it is a parent to block b + // which itself is signed, so it was willingly included by the miner + gasA, sigErr := x.VerifyBlockSig(ctx, &blockA) + totalGas += gasA + if sigErr != nil { + fvmLog.Infof("invalid consensus fault: cannot verify first block sig: %w", sigErr) + return ret, totalGas + } + + gas2, sigErr := x.VerifyBlockSig(ctx, &blockB) + totalGas += gas2 + if sigErr != nil { + fvmLog.Infof("invalid consensus fault: cannot verify second block sig: %w", sigErr) + return ret, totalGas + } + + ret.Type = faultType + ret.Target = blockA.Miner + + return ret, totalGas +} + +func (x *FvmExtern) VerifyBlockSig(ctx context.Context, blk *types.BlockHeader) (int64, error) { + waddr, gasUsed, err := x.workerKeyAtLookback(ctx, blk.Miner, blk.Height) + if err != nil { + return gasUsed, err + } + + if blk.BlockSig == nil { + return 0, fmt.Errorf("no consensus fault: block %s has nil signature", blk.Cid()) + } + + sd, err := blk.SignatureData() + if err != nil { + return 0, err + } + + return gasUsed, crypto.Verify(blk.BlockSig, waddr, sd) +} + +func (x *FvmExtern) workerKeyAtLookback(ctx context.Context, minerID address.Address, height abi.ChainEpoch) (address.Address, int64, error) { + if height < x.epoch-policy.ChainFinality { + return address.Undef, 0, fmt.Errorf("cannot get worker key (currEpoch %d, height %d)", x.epoch, height) + } + gasTank := gas.NewGasTracker(constants.BlockGasLimit * 10000) + cstWithoutGas := cbor.NewCborStore(x.Blockstore) + cbb := vmcontext.NewGasChargeBlockStore(gasTank, x.gasPriceSchedule.PricelistByEpoch(x.epoch), x.Blockstore) + cstWithGas := cbor.NewCborStore(cbb) + + lbState, err := x.lbState(ctx, height) + if err != nil { + return address.Undef, 0, err + } + // get appropriate miner actor + act, err := lbState.LoadActor(ctx, minerID) + if err != nil { + return address.Undef, 0, err + } + + // use that to get the miner state + mas, err := miner.Load(adt.WrapStore(ctx, cstWithGas), act) + if err != nil { + return address.Undef, 0, err + } + + info, err := mas.Info() + if err != nil { + return address.Undef, 0, err + } + + st, err := tree.LoadState(ctx, cstWithoutGas, x.base) + if err != nil { + return address.Undef, 0, err + } + raddr, err := resolveToKeyAddr(st, info.Worker, cstWithGas) + if err != nil { + return address.Undef, 0, err + } + + return raddr, gasTank.GasUsed, nil +} + +func resolveToKeyAddr(state tree.Tree, addr address.Address, cst cbor.IpldStore) (address.Address, error) { + if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 { + return addr, nil + } + + act, found, err := state.GetActor(context.TODO(), addr) + if err != nil { + return address.Undef, fmt.Errorf("failed to find actor: %s", addr) + } + if !found { + return address.Undef, fmt.Errorf("signer resolution found no such actor %s", addr) + } + + aast, err := account.Load(adt.WrapStore(context.TODO(), cst), act) + if err != nil { + return address.Undef, fmt.Errorf("failed to get account actor state for %s: %w", addr, err) + } + + return aast.PubkeyAddress() +} + +type FVM struct { + fvm *ffi.FVM +} + +func defaultFVMOpts(ctx context.Context, opts *vm.VmOption) (*ffi.FVMOpts, error) { + state, err := tree.LoadState(ctx, cbor.NewCborStore(opts.Bsstore), opts.PRoot) + if err != nil { + return nil, fmt.Errorf("loading state tree: %w", err) + } + + circToReport, err := opts.CircSupplyCalculator(ctx, opts.Epoch, state) + if err != nil { + return nil, fmt.Errorf("calculating circ supply: %w", err) + } + return &ffi.FVMOpts{ + FVMVersion: 0, + Externs: &FvmExtern{ + Rand: NewWrapperRand(opts.Rnd), + Blockstore: opts.Bsstore, + epoch: opts.Epoch, + lbState: opts.LookbackStateGetter, + base: opts.PRoot, gasPriceSchedule: opts.GasPriceSchedule, + }, + Epoch: opts.Epoch, + BaseFee: opts.BaseFee, + BaseCircSupply: circToReport, + NetworkVersion: opts.NetworkVersion, + StateBase: opts.PRoot, + Tracing: opts.Tracing || gas.EnableDetailedTracing, + }, nil +} + +func NewFVM(ctx context.Context, opts *vm.VmOption) (*FVM, error) { + fvmOpts, err := defaultFVMOpts(ctx, opts) + if err != nil { + return nil, fmt.Errorf("creating fvm opts: %w", err) + } + if os.Getenv("VENUS_USE_FVM_CUSTOM_BUNDLE") == "1" { + av, err := actorstypes.VersionForNetwork(opts.NetworkVersion) + if err != nil { + return nil, fmt.Errorf("mapping network version to actors version: %w", err) + } + + c, ok := actors.GetManifest(av) + if !ok { + return nil, fmt.Errorf("no manifest for custom bundle (actors version %d)", av) + } + + fvmOpts.Manifest = c + } + + fvm, err := ffi.CreateFVM(fvmOpts) + if err != nil { + return nil, err + } + + return &FVM{ + fvm: fvm, + }, nil +} + +func NewDebugFVM(ctx context.Context, opts *vm.VmOption) (*FVM, error) { + baseBstore := opts.Bsstore + overlayBstore := blockstoreutil.NewTemporarySync() + cborStore := cbor.NewCborStore(overlayBstore) + vmBstore := blockstoreutil.NewTieredBstore(overlayBstore, baseBstore) + + opts.Bsstore = vmBstore + fvmOpts, err := defaultFVMOpts(ctx, opts) + if err != nil { + return nil, fmt.Errorf("creating fvm opts: %w", err) + } + + fvmOpts.Debug = true + + putMapping := func(ar map[cid.Cid]cid.Cid) (cid.Cid, error) { + var mapping xMapping + + mapping.redirects = make([]xRedirect, 0, len(ar)) + for from, to := range ar { + mapping.redirects = append(mapping.redirects, xRedirect{from: from, to: to}) + } + sort.Slice(mapping.redirects, func(i, j int) bool { + return bytes.Compare(mapping.redirects[i].from.Bytes(), mapping.redirects[j].from.Bytes()) < 0 + }) + + // Passing this as a pointer of structs has proven to be an enormous PiTA; hence this code. + mappingCid, err := cborStore.Put(context.TODO(), &mapping) + if err != nil { + return cid.Undef, err + } + + return mappingCid, nil + } + + createMapping := func(debugBundlePath string) error { + mfCid, err := actors.LoadBundleFromFile(ctx, overlayBstore, debugBundlePath) + if err != nil { + return fmt.Errorf("loading debug bundle: %w", err) + } + + mf, err := actors.LoadManifest(ctx, mfCid, adt.WrapStore(ctx, cborStore)) + if err != nil { + return fmt.Errorf("loading debug manifest: %w", err) + } + + av, err := actorstypes.VersionForNetwork(opts.NetworkVersion) + if err != nil { + return fmt.Errorf("getting actors version: %w", err) + } + + // create actor redirect mapping + actorRedirect := make(map[cid.Cid]cid.Cid) + for _, key := range actors.GetBuiltinActorsKeys(av) { + from, ok := actors.GetActorCodeID(av, key) + if !ok { + fvmLog.Warnf("actor missing in the from manifest %s", key) + continue + } + + to, ok := mf.Get(key) + if !ok { + fvmLog.Warnf("actor missing in the to manifest %s", key) + continue + } + + actorRedirect[from] = to + } + + if len(actorRedirect) > 0 { + mappingCid, err := putMapping(actorRedirect) + if err != nil { + return fmt.Errorf("error writing redirect mapping: %w", err) + } + fvmOpts.ActorRedirect = mappingCid + } + + return nil + } + + av, err := actorstypes.VersionForNetwork(opts.NetworkVersion) + if err != nil { + return nil, fmt.Errorf("error determining actors version for network version %d: %w", opts.NetworkVersion, err) + } + + debugBundlePath := os.Getenv(fmt.Sprintf("VENUS_FVM_DEBUG_BUNDLE_V%d", av)) + if debugBundlePath != "" { + if err := createMapping(debugBundlePath); err != nil { + fvmLog.Errorf("failed to create v%d debug mapping", av) + } + } + + fvm, err := ffi.CreateFVM(fvmOpts) + if err != nil { + return nil, err + } + + return &FVM{ + fvm: fvm, + }, nil +} + +func (fvm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*vm.Ret, error) { + start := constants.Clock.Now() + defer atomic.AddUint64(&StatApplied, 1) + vmMsg := cmsg.VMMessage() + msgBytes, err := vmMsg.Serialize() + if err != nil { + return nil, fmt.Errorf("serializing msg: %w", err) + } + + ret, err := fvm.fvm.ApplyMessage(msgBytes, uint(cmsg.ChainLength())) + if err != nil { + return nil, fmt.Errorf("applying msg: %w", err) + } + + duration := time.Since(start) + receipt := types.MessageReceipt{ + Return: ret.Return, + ExitCode: exitcode.ExitCode(ret.ExitCode), + GasUsed: ret.GasUsed, + } + + var aerr aerrors.ActorError + if ret.ExitCode != 0 { + amsg := ret.FailureInfo + if amsg == "" { + amsg = "unknown error" + } + aerr = aerrors.New(exitcode.ExitCode(ret.ExitCode), amsg) + } + + var et types.ExecutionTrace + if len(ret.ExecTraceBytes) != 0 { + var fvmEt FvmExecutionTrace + if err = fvmEt.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil { + return nil, fmt.Errorf("failed to unmarshal exectrace: %w", err) + } + et = fvmEt.ToExecutionTrace() + } + + // Set the top-level exectrace info from the message and receipt for backwards compatibility + et.Msg = vmMsg + et.MsgRct = &receipt + et.Duration = duration + if aerr != nil { + et.Error = aerr.Error() + } + + return &vm.Ret{ + Receipt: receipt, + OutPuts: gas.GasOutputs{ + BaseFeeBurn: ret.BaseFeeBurn, + OverEstimationBurn: ret.OverEstimationBurn, + MinerPenalty: ret.MinerPenalty, + MinerTip: ret.MinerTip, + Refund: ret.Refund, + GasRefund: ret.GasRefund, + GasBurned: ret.GasBurned, + }, + ActorErr: aerr, + GasTracker: &gas.GasTracker{ + ExecutionTrace: et, + }, + Duration: time.Since(start), + }, nil +} + +func (fvm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg types.ChainMsg) (*vm.Ret, error) { + start := constants.Clock.Now() + defer atomic.AddUint64(&StatApplied, 1) + vmMsg := cmsg.VMMessage() + vmMsg.GasLimit = math.MaxInt64 / 2 + msgBytes, err := vmMsg.Serialize() + if err != nil { + return nil, fmt.Errorf("serializing msg: %w", err) + } + + ret, err := fvm.fvm.ApplyImplicitMessage(msgBytes) + if err != nil { + return nil, fmt.Errorf("applying msg: %w", err) + } + + duration := time.Since(start) + receipt := types.MessageReceipt{ + Return: ret.Return, + ExitCode: exitcode.ExitCode(ret.ExitCode), + GasUsed: ret.GasUsed, + } + + var aerr aerrors.ActorError + if ret.ExitCode != 0 { + amsg := ret.FailureInfo + if amsg == "" { + amsg = "unknown error" + } + aerr = aerrors.New(exitcode.ExitCode(ret.ExitCode), amsg) + } + + var et types.ExecutionTrace + if len(ret.ExecTraceBytes) != 0 { + var fvmEt FvmExecutionTrace + if err = fvmEt.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil { + return nil, fmt.Errorf("failed to unmarshal exectrace: %w", err) + } + et = fvmEt.ToExecutionTrace() + } else { + et.Msg = vmMsg + et.MsgRct = &receipt + et.Duration = duration + if aerr != nil { + et.Error = aerr.Error() + } + } + + applyRet := &vm.Ret{ + Receipt: receipt, + OutPuts: gas.GasOutputs{}, + ActorErr: aerr, + GasTracker: &gas.GasTracker{ + ExecutionTrace: et, + }, + Duration: time.Since(start), + } + + if ret.ExitCode != 0 { + return applyRet, fmt.Errorf("implicit message failed with exit code: %d and error: %w", ret.ExitCode, applyRet.ActorErr) + } + + return applyRet, nil +} + +func (fvm *FVM) Flush(ctx context.Context) (cid.Cid, error) { + return fvm.fvm.Flush() +} + +type dualExecutionFVM struct { + main *FVM + debug *FVM +} + +var _ vm.Interface = (*dualExecutionFVM)(nil) + +func NewDualExecutionFVM(ctx context.Context, opts *vm.VmOption) (vm.Interface, error) { + main, err := NewFVM(ctx, opts) + if err != nil { + return nil, err + } + + debug, err := NewDebugFVM(ctx, opts) + if err != nil { + return nil, err + } + + return &dualExecutionFVM{ + main: main, + debug: debug, + }, nil +} + +func (vm *dualExecutionFVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (ret *vmcontext.Ret, err error) { + var wg sync.WaitGroup + + wg.Add(2) + + go func() { + defer wg.Done() + ret, err = vm.main.ApplyMessage(ctx, cmsg) + }() + + go func() { + defer wg.Done() + if _, err := vm.debug.ApplyMessage(ctx, cmsg); err != nil { + fvmLog.Errorf("debug execution failed: %w", err) + } + }() + + wg.Wait() + return ret, err +} + +func (vm *dualExecutionFVM) ApplyImplicitMessage(ctx context.Context, msg types.ChainMsg) (ret *vmcontext.Ret, err error) { + var wg sync.WaitGroup + + wg.Add(2) + + go func() { + defer wg.Done() + ret, err = vm.main.ApplyImplicitMessage(ctx, msg) + }() + + go func() { + defer wg.Done() + if _, err := vm.debug.ApplyImplicitMessage(ctx, msg); err != nil { + fvmLog.Errorf("debug execution failed: %s", err) + } + }() + + wg.Wait() + return ret, err +} + +func (vm *dualExecutionFVM) Flush(ctx context.Context) (cid.Cid, error) { + return vm.main.Flush(ctx) +} + +// Passing this as a pointer of structs has proven to be an enormous PiTA; hence this code. +type ( + xRedirect struct{ from, to cid.Cid } + xMapping struct{ redirects []xRedirect } +) + +func (m *xMapping) MarshalCBOR(w io.Writer) error { + scratch := make([]byte, 9) + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(m.redirects))); err != nil { + return err + } + + for _, v := range m.redirects { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + return nil +} + +func (r *xRedirect) MarshalCBOR(w io.Writer) error { + scratch := make([]byte, 9) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(2)); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, r.from); err != nil { + return fmt.Errorf("failed to write cid field from: %w", err) + } + + if err := cbg.WriteCidBuf(scratch, w, r.to); err != nil { + return fmt.Errorf("failed to write cid field from: %w", err) + } + + return nil +} + +// WARNING: You will not affect your node's execution by misusing this feature, but you will confuse yourself thoroughly! +// An envvar that allows the user to specify debug actors bundles to be used by the FVM +// alongside regular execution. This is basically only to be used to print out specific logging information. +// Message failures, unexpected terminations,gas costs, etc. should all be ignored. +var useFvmDebug = os.Getenv("VENUS_FVM_DEVELOPER_DEBUG") == "1" + +func NewVM(ctx context.Context, opts vm.VmOption) (vm.Interface, error) { + if opts.NetworkVersion >= network.Version16 { + if useFvmDebug { + return NewDualExecutionFVM(ctx, &opts) + } + return NewFVM(ctx, &opts) + } + + return vm.NewLegacyVM(ctx, opts) +} diff --git a/pkg/fvm/rand_wrapper.go b/pkg/fvm/rand_wrapper.go new file mode 100644 index 0000000000..d753914c8e --- /dev/null +++ b/pkg/fvm/rand_wrapper.go @@ -0,0 +1,32 @@ +package fvm + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + acrypto "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/venus/pkg/vm" +) + +type Rand interface { + GetChainRandomness(ctx context.Context, pers acrypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomness(ctx context.Context, pers acrypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) +} + +var _ Rand = (*wrapperRand)(nil) + +type wrapperRand struct { + vm.ChainRandomness +} + +func NewWrapperRand(r vm.ChainRandomness) Rand { + return wrapperRand{ChainRandomness: r} +} + +func (r wrapperRand) GetChainRandomness(ctx context.Context, pers acrypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.ChainGetRandomnessFromTickets(ctx, pers, round, entropy) +} + +func (r wrapperRand) GetBeaconRandomness(ctx context.Context, pers acrypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.ChainGetRandomnessFromBeacon(ctx, pers, round, entropy) +} diff --git a/pkg/gen/gen.go b/pkg/gen/gen.go new file mode 100644 index 0000000000..75472305a7 --- /dev/null +++ b/pkg/gen/gen.go @@ -0,0 +1,50 @@ +package gen + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + + "github.com/filecoin-project/venus/pkg/gen/genesis" +) + +// CarWalkFunc get each child node under the node (nd) +func CarWalkFunc(nd format.Node) (out []*format.Link, err error) { + for _, link := range nd.Links() { + pref := link.Cid.Prefix() + if pref.Codec == cid.FilCommitmentSealed || pref.Codec == cid.FilCommitmentUnsealed { + continue + } + out = append(out, link) + } + + return out, nil +} + +var rootkeyMultisig = genesis.MultisigMeta{ + Signers: []address.Address{remAccTestKey}, + Threshold: 1, + VestingDuration: 0, + VestingStart: 0, +} + +var DefaultVerifregRootkeyActor = genesis.Actor{ + Type: genesis.TMultisig, + Balance: big.NewInt(0), + Meta: rootkeyMultisig.ActorMeta(), +} + +var ( + remAccTestKey, _ = address.NewFromString("t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy") + remAccMeta = genesis.MultisigMeta{ + Signers: []address.Address{remAccTestKey}, + Threshold: 1, + } +) + +var DefaultRemainderAccountActor = genesis.Actor{ + Type: genesis.TMultisig, + Balance: big.NewInt(0), + Meta: remAccMeta.ActorMeta(), +} diff --git a/pkg/gen/genesis/f00_system.go b/pkg/gen/genesis/f00_system.go new file mode 100644 index 0000000000..16fb563e32 --- /dev/null +++ b/pkg/gen/genesis/f00_system.go @@ -0,0 +1,67 @@ +package genesis + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-state-types/big" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + systemtypes "github.com/filecoin-project/go-state-types/builtin/v8/system" + + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/system" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func SetupSystemActor(ctx context.Context, bs bstore.Blockstore, av actorstypes.Version) (*types.Actor, error) { + var st system.State + + cst := cbor.NewCborStore(bs) + // TODO pass in built-in actors cid for V8 and later + st, err := system.MakeState(adt.WrapStore(ctx, cst), av, cid.Undef) + if err != nil { + return nil, err + } + + if av >= actorstypes.Version8 { + mfCid, ok := actors.GetManifest(av) + if !ok { + return nil, fmt.Errorf("missing manifest for actors version %d", av) + } + + mf := manifest.Manifest{} + if err := cst.Get(ctx, mfCid, &mf); err != nil { + return nil, fmt.Errorf("loading manifest for actors version %d: %w", av, err) + } + + st8 := st.GetState().(*systemtypes.State) + st8.BuiltinActors = mf.Data + } + + statecid, err := cst.Put(ctx, st.GetState()) + if err != nil { + return nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.SystemKey) + if !found { + return nil, fmt.Errorf("failed to get system actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/pkg/gen/genesis/f01_init.go b/pkg/gen/genesis/f01_init.go new file mode 100644 index 0000000000..bf92a20e9f --- /dev/null +++ b/pkg/gen/genesis/f01_init.go @@ -0,0 +1,189 @@ +package genesis + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/specs-actors/actors/util/adt" + + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func SetupInitActor(ctx context.Context, bs bstore.Blockstore, netname string, initialActors []Actor, rootVerifier Actor, remainder Actor, av actorstypes.Version) (int64, *types.Actor, map[address.Address]address.Address, error) { + if len(initialActors) > MaxAccounts { + return 0, nil, nil, errors.New("too many initial actors") + } + + cst := cbor.NewCborStore(bs) + ist, err := init_.MakeState(adt.WrapStore(ctx, cst), av, netname) + if err != nil { + return 0, nil, nil, err + } + + if err = ist.SetNextID(MinerStart); err != nil { + return 0, nil, nil, err + } + + amap, err := ist.AddressMap() + if err != nil { + return 0, nil, nil, err + } + + keyToID := map[address.Address]address.Address{} + counter := int64(AccountStart) + + for _, a := range initialActors { + if a.Type == TMultisig { + var ainfo MultisigMeta + if err := json.Unmarshal(a.Meta, &ainfo); err != nil { + return 0, nil, nil, fmt.Errorf("unmarshaling account meta: %w", err) + } + for _, e := range ainfo.Signers { + + if _, ok := keyToID[e]; ok { + continue + } + + fmt.Printf("init set %s t0%d\n", e, counter) + + value := cbg.CborInt(counter) + if err := amap.Put(abi.AddrKey(e), &value); err != nil { + return 0, nil, nil, err + } + counter = counter + 1 + var err error + keyToID[e], err = address.NewIDAddress(uint64(value)) + if err != nil { + return 0, nil, nil, err + } + + } + // Need to add actors for all multisigs too + continue + } + + if a.Type != TAccount { + return 0, nil, nil, fmt.Errorf("unsupported account type: %s", a.Type) + } + + var ainfo AccountMeta + if err := json.Unmarshal(a.Meta, &ainfo); err != nil { + return 0, nil, nil, fmt.Errorf("unmarshaling account meta: %w", err) + } + + fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter) + + value := cbg.CborInt(counter) + if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { + return 0, nil, nil, err + } + counter = counter + 1 + + var err error + keyToID[ainfo.Owner], err = address.NewIDAddress(uint64(value)) + if err != nil { + return 0, nil, nil, err + } + } + + setupMsig := func(meta json.RawMessage) error { + var ainfo MultisigMeta + if err := json.Unmarshal(meta, &ainfo); err != nil { + return fmt.Errorf("unmarshaling account meta: %w", err) + } + for _, e := range ainfo.Signers { + if _, ok := keyToID[e]; ok { + continue + } + fmt.Printf("init set %s t0%d\n", e, counter) + + value := cbg.CborInt(counter) + if err := amap.Put(abi.AddrKey(e), &value); err != nil { + return err + } + counter = counter + 1 + var err error + keyToID[e], err = address.NewIDAddress(uint64(value)) + if err != nil { + return err + } + + } + + return nil + } + + if rootVerifier.Type == TAccount { + var ainfo AccountMeta + if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { + return 0, nil, nil, fmt.Errorf("unmarshaling account meta: %w", err) + } + value := cbg.CborInt(80) + if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { + return 0, nil, nil, err + } + } else if rootVerifier.Type == TMultisig { + err := setupMsig(rootVerifier.Meta) + if err != nil { + return 0, nil, nil, fmt.Errorf("setting up root verifier msig: %w", err) + } + } + + if remainder.Type == TAccount { + var ainfo AccountMeta + if err := json.Unmarshal(remainder.Meta, &ainfo); err != nil { + return 0, nil, nil, fmt.Errorf("unmarshaling account meta: %w", err) + } + + // TODO: Use builtin.ReserveAddress... + value := cbg.CborInt(90) + if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { + return 0, nil, nil, err + } + } else if remainder.Type == TMultisig { + err := setupMsig(remainder.Meta) + if err != nil { + return 0, nil, nil, fmt.Errorf("setting up remainder msig: %w", err) + } + } + + amapaddr, err := amap.Root() + if err != nil { + return 0, nil, nil, err + } + + if err = ist.SetAddressMap(amapaddr); err != nil { + return 0, nil, nil, err + } + + statecid, err := cst.Put(ctx, ist.GetState()) + if err != nil { + return 0, nil, nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.InitKey) + if !found { + return 0, nil, nil, fmt.Errorf("failed to get init actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return counter, act, keyToID, nil +} diff --git a/pkg/gen/genesis/f02_reward.go b/pkg/gen/genesis/f02_reward.go new file mode 100644 index 0000000000..1da5891b3a --- /dev/null +++ b/pkg/gen/genesis/f02_reward.go @@ -0,0 +1,44 @@ +package genesis + +import ( + "context" + "fmt" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + "github.com/filecoin-project/venus/venus-shared/types" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" +) + +func SetupRewardActor(ctx context.Context, bs bstore.Blockstore, qaPower big.Int, av actorstypes.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + rst, err := reward.MakeState(adt.WrapStore(ctx, cst), av, qaPower) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, rst.GetState()) + if err != nil { + return nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.RewardKey) + if !found { + return nil, fmt.Errorf("failed to get reward actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Balance: types.BigInt{Int: constants.InitialRewardBalance}, + Head: statecid, + } + + return act, nil +} diff --git a/pkg/gen/genesis/f03_cron.go b/pkg/gen/genesis/f03_cron.go new file mode 100644 index 0000000000..c72c0aeb67 --- /dev/null +++ b/pkg/gen/genesis/f03_cron.go @@ -0,0 +1,43 @@ +package genesis + +import ( + "context" + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/cron" + + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func SetupCronActor(ctx context.Context, bs bstore.Blockstore, av actorstypes.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + st, err := cron.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, st.GetState()) + if err != nil { + return nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.CronKey) + if !found { + return nil, fmt.Errorf("failed to get cron actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/pkg/gen/genesis/f04_power.go b/pkg/gen/genesis/f04_power.go new file mode 100644 index 0000000000..cc47a783dc --- /dev/null +++ b/pkg/gen/genesis/f04_power.go @@ -0,0 +1,44 @@ +package genesis + +import ( + "context" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actorstypes.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + pst, err := power.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.PowerKey) + if !found { + return nil, fmt.Errorf("failed to get power actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/pkg/gen/genesis/f05_market.go b/pkg/gen/genesis/f05_market.go new file mode 100644 index 0000000000..f0800900ff --- /dev/null +++ b/pkg/gen/genesis/f05_market.go @@ -0,0 +1,43 @@ +package genesis + +import ( + "context" + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func SetupStorageMarketActor(ctx context.Context, bs bstore.Blockstore, av actorstypes.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + mst, err := market.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.MarketKey) + if !found { + return nil, fmt.Errorf("failed to get market actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/pkg/gen/genesis/f06_vreg.go b/pkg/gen/genesis/f06_vreg.go new file mode 100644 index 0000000000..e17262cd64 --- /dev/null +++ b/pkg/gen/genesis/f06_vreg.go @@ -0,0 +1,57 @@ +package genesis + +import ( + "context" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/verifreg" + + "github.com/filecoin-project/go-address" + cbor "github.com/ipfs/go-ipld-cbor" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/specs-actors/actors/util/adt" + + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var RootVerifierID address.Address + +func init() { + idk, err := address.NewFromString("t080") + if err != nil { + panic(err) + } + + RootVerifierID = idk +} + +func SetupVerifiedRegistryActor(ctx context.Context, bs bstore.Blockstore, av actorstypes.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + vst, err := verifreg.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av, RootVerifierID) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, vst.GetState()) + if err != nil { + return nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.VerifregKey) + if !found { + return nil, fmt.Errorf("failed to get verifreg actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/pkg/gen/genesis/f07_dcap.go b/pkg/gen/genesis/f07_dcap.go new file mode 100644 index 0000000000..1d4377e098 --- /dev/null +++ b/pkg/gen/genesis/f07_dcap.go @@ -0,0 +1,56 @@ +package genesis + +import ( + "context" + + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/datacap" + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var GovernorID address.Address + +func init() { + idk, err := address.NewFromString("t06") + if err != nil { + panic(err) + } + + GovernorID = idk +} + +func SetupDatacapActor(ctx context.Context, bs bstore.Blockstore, av actorstypes.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + dst, err := datacap.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av, GovernorID, builtin.DefaultTokenActorBitwidth) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, dst.GetState()) + if err != nil { + return nil, err + } + + actcid, ok := actors.GetActorCodeID(av, actors.DatacapKey) + if !ok { + return nil, xerrors.Errorf("failed to get datacap actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/pkg/gen/genesis/genblock.go b/pkg/gen/genesis/genblock.go new file mode 100644 index 0000000000..e66b1c5f88 --- /dev/null +++ b/pkg/gen/genesis/genblock.go @@ -0,0 +1,43 @@ +package genesis + +import ( + "encoding/hex" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +const ( + genesisMultihashString = "1220107d821c25dc0735200249df94a8bebc9c8e489744f86a4ca8919e81f19dcd72" + genesisBlockHex = "a5684461746574696d6573323031372d30352d30352030313a32373a3531674e6574776f726b6846696c65636f696e65546f6b656e6846696c65636f696e6c546f6b656e416d6f756e7473a36b546f74616c537570706c796d322c3030302c3030302c303030664d696e6572736d312c3430302c3030302c3030306c50726f746f636f6c4c616273a36b446576656c6f706d656e746b3330302c3030302c3030306b46756e6472616973696e676b3230302c3030302c3030306a466f756e646174696f6e6b3130302c3030302c303030674d657373616765784854686973206973207468652047656e6573697320426c6f636b206f66207468652046696c65636f696e20446563656e7472616c697a65642053746f72616765204e6574776f726b2e" +) + +var cidBuilder = cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.SHA2_256} + +func expectedCid() cid.Cid { + mh, err := multihash.FromHexString(genesisMultihashString) + if err != nil { + panic(err) + } + return cid.NewCidV1(cidBuilder.Codec, mh) +} + +func getGenesisBlock() (blocks.Block, error) { + genesisBlockData, err := hex.DecodeString(genesisBlockHex) + if err != nil { + return nil, err + } + + genesisCid, err := cidBuilder.Sum(genesisBlockData) + if err != nil { + return nil, err + } + + block, err := blocks.NewBlockWithCid(genesisBlockData, genesisCid) + if err != nil { + return nil, err + } + + return block, nil +} diff --git a/pkg/gen/genesis/genesis.go b/pkg/gen/genesis/genesis.go new file mode 100644 index 0000000000..26e9a940b4 --- /dev/null +++ b/pkg/gen/genesis/genesis.go @@ -0,0 +1,686 @@ +package genesis + +import ( + "context" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + + "github.com/filecoin-project/venus/pkg/consensusfault" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/fvm" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/pkg/vmsupport" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/vm/gas" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/state/tree" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/datacap" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/multisig" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/account" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/verifreg" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/cron" + + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/system" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/pkg/chain" + sigs "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/vm" + bstore "github.com/filecoin-project/venus/venus-shared/blockstore" +) + +const ( + AccountStart = 100 + MinerStart = 1000 + MaxAccounts = MinerStart - AccountStart +) + +var log = logging.Logger("genesis") + +type GenesisBootstrap struct { //nolint + Genesis *types.BlockHeader +} + +/* +From a list of parameters, create a genesis block / initial state + +The process: +- Bootstrap state (MakeInitialStateTree) + - Create empty state + - Create system actor + - Make init actor + - Create accounts mappings + - Set NextID to MinerStart + - Setup Reward (1.4B fil) + - Setup Cron + - Create empty power actor + - Create empty market + - Create verified registry + - Setup burnt fund address + - Initialize account / msig balances +- Instantiate early vm with genesis syscalls + - Create miners + - Each: + - power.CreateMiner, set msg value to PowerBalance + - market.AddFunds with correct value + - market.PublishDeals for related sectors + - Set network power in the power actor to what we'll have after genesis creation + - Recreate reward actor state with the right power + - For each precommitted sector + - Get deal weight + - Calculate QA Power + - Remove fake power from the power actor + - Calculate pledge + - Precommit + - Confirm valid + +Data Types: + +PreSeal :{ + CommR CID + CommD CID + SectorID SectorNumber + Deal market.DealProposal # Start at 0, self-deal! +} + +Genesis: { + Accounts: [ # non-miner, non-singleton actors, max len = MaxAccounts + { + Type: "account" / "multisig", + Value: "attofil", + [Meta: {msig settings, account key..}] + },... + ], + Miners: [ + { + Owner, Worker Addr # ID + MarketBalance, PowerBalance TokenAmount + SectorSize uint64 + PreSeals []PreSeal + },... + ], +} + +*/ + +func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template Template) (*tree.State, map[address.Address]address.Address, error) { + // Create empty state tree + + cst := cbor.NewCborStore(bs) + _, err := cst.Put(context.TODO(), []struct{}{}) + if err != nil { + return nil, nil, fmt.Errorf("putting empty object: %w", err) + } + + sv, err := tree.VersionForNetwork(template.NetworkVersion) + if err != nil { + return nil, nil, fmt.Errorf("getting state tree version: %w", err) + } + + state, err := tree.NewState(cst, sv) + if err != nil { + return nil, nil, fmt.Errorf("making new state tree: %w", err) + } + + av, err := actorstypes.VersionForNetwork(template.NetworkVersion) + if err != nil { + return nil, nil, fmt.Errorf("get actor version: %w", err) + } + + if err := actors.LoadBundles(ctx, bs, av); err != nil { + return nil, nil, fmt.Errorf("loading actors for genesis block: %w", err) + } + // Create system actor + + sysact, err := SetupSystemActor(ctx, bs, av) + if err != nil { + return nil, nil, fmt.Errorf("setup system actor: %w", err) + } + if err := state.SetActor(ctx, system.Address, sysact); err != nil { + return nil, nil, fmt.Errorf("set system actor: %w", err) + } + + // Create init actor + + idStart, initact, keyIDs, err := SetupInitActor(ctx, bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount, av) + if err != nil { + return nil, nil, fmt.Errorf("setup init actor: %w", err) + } + if err := state.SetActor(ctx, init_.Address, initact); err != nil { + return nil, nil, fmt.Errorf("set init actor: %w", err) + } + + // Setup reward + // RewardActor's state is overwritten by SetupStorageMiners, but needs to exist for miner creation messages + rewact, err := SetupRewardActor(ctx, bs, big.Zero(), av) + if err != nil { + return nil, nil, fmt.Errorf("setup reward actor: %w", err) + } + + err = state.SetActor(ctx, reward.Address, rewact) + if err != nil { + return nil, nil, fmt.Errorf("set reward actor: %w", err) + } + + // Setup cron + cronact, err := SetupCronActor(ctx, bs, av) + if err != nil { + return nil, nil, fmt.Errorf("setup cron actor: %w", err) + } + if err := state.SetActor(ctx, cron.Address, cronact); err != nil { + return nil, nil, fmt.Errorf("set cron actor: %w", err) + } + + // Create empty power actor + spact, err := SetupStoragePowerActor(ctx, bs, av) + if err != nil { + return nil, nil, fmt.Errorf("setup storage power actor: %w", err) + } + if err := state.SetActor(ctx, power.Address, spact); err != nil { + return nil, nil, fmt.Errorf("set storage power actor: %w", err) + } + + // Create empty market actor + marketact, err := SetupStorageMarketActor(ctx, bs, av) + if err != nil { + return nil, nil, fmt.Errorf("setup storage market actor: %w", err) + } + if err := state.SetActor(ctx, market.Address, marketact); err != nil { + return nil, nil, fmt.Errorf("set storage market actor: %w", err) + } + + // Create verified registry + verifact, err := SetupVerifiedRegistryActor(ctx, bs, av) + if err != nil { + return nil, nil, fmt.Errorf("setup verified registry market actor: %w", err) + } + if err := state.SetActor(ctx, verifreg.Address, verifact); err != nil { + return nil, nil, fmt.Errorf("set verified registry actor: %w", err) + } + + // Create datacap actor + if av >= 9 { + dcapact, err := SetupDatacapActor(ctx, bs, av) + if err != nil { + return nil, nil, fmt.Errorf("setup datacap actor: %w", err) + } + if err := state.SetActor(ctx, datacap.Address, dcapact); err != nil { + return nil, nil, fmt.Errorf("set datacap actor: %w", err) + } + } + + bact, err := makeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero()) + if err != nil { + return nil, nil, fmt.Errorf("setup burnt funds actor state: %w", err) + } + if err := state.SetActor(ctx, builtin.BurntFundsActorAddr, bact); err != nil { + return nil, nil, fmt.Errorf("set burnt funds actor: %w", err) + } + + // Create accounts + for _, info := range template.Accounts { + switch info.Type { + case TAccount: + if err := createAccountActor(ctx, cst, state, info, keyIDs, av); err != nil { + return nil, nil, fmt.Errorf("failed to create account actor: %w", err) + } + + case TMultisig: + + ida, err := address.NewIDAddress(uint64(idStart)) + if err != nil { + return nil, nil, err + } + idStart++ + + if err := createMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil { + return nil, nil, err + } + default: + return nil, nil, errors.New("unsupported account type") + } + } + + switch template.VerifregRootKey.Type { + case TAccount: + var ainfo AccountMeta + if err := json.Unmarshal(template.VerifregRootKey.Meta, &ainfo); err != nil { + return nil, nil, fmt.Errorf("unmarshaling account meta: %w", err) + } + + _, ok := keyIDs[ainfo.Owner] + if ok { + return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner) + } + + vact, err := makeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance) + if err != nil { + return nil, nil, fmt.Errorf("setup verifreg rootkey account state: %w", err) + } + if err = state.SetActor(ctx, builtin.RootVerifierAddress, vact); err != nil { + return nil, nil, fmt.Errorf("set verifreg rootkey account actor: %w", err) + } + case TMultisig: + if err = createMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil { + return nil, nil, fmt.Errorf("failed to set up verified registry signer: %w", err) + } + default: + return nil, nil, fmt.Errorf("unknown account type for verifreg rootkey: %w", err) + } + + // Setup the first verifier as ID-address 81 + // TODO: remove this + skBytes, err := sigs.Generate(crypto.SigTypeBLS) + if err != nil { + return nil, nil, fmt.Errorf("creating random verifier secret key: %w", err) + } + + verifierPk, err := sigs.ToPublic(crypto.SigTypeBLS, skBytes) + if err != nil { + return nil, nil, fmt.Errorf("creating random verifier public key: %w", err) + } + + verifierAd, err := address.NewBLSAddress(verifierPk) + if err != nil { + return nil, nil, fmt.Errorf("creating random verifier address: %w", err) + } + + verifierId, err := address.NewIDAddress(81) // nolint + if err != nil { + return nil, nil, err + } + + verifierAct, err := makeAccountActor(ctx, cst, av, verifierAd, big.Zero()) + if err != nil { + return nil, nil, fmt.Errorf("setup first verifier state: %w", err) + } + + if err = state.SetActor(ctx, verifierId, verifierAct); err != nil { + return nil, nil, fmt.Errorf("set first verifier actor: %w", err) + } + + totalFilAllocated := big.Zero() + + err = state.ForEach(func(addr address.Address, act *types.Actor) error { + if act.Balance.Nil() { + panic(fmt.Sprintf("actor %s (%s) has nil balance", addr, builtin.ActorNameByCode(act.Code))) + } + totalFilAllocated = big.Add(totalFilAllocated, act.Balance) + return nil + }) + if err != nil { + return nil, nil, fmt.Errorf("summing account balances in state tree: %w", err) + } + + totalFil := big.Mul(big.NewInt(int64(constants.FilBase)), big.NewInt(int64(constants.FilecoinPrecision))) + remainingFil := big.Sub(totalFil, totalFilAllocated) + if remainingFil.Sign() < 0 { + return nil, nil, fmt.Errorf("somehow overallocated filecoin (allocated = %s)", types.FIL(totalFilAllocated)) + } + + template.RemainderAccount.Balance = remainingFil + + switch template.RemainderAccount.Type { + case TAccount: + var ainfo AccountMeta + if err := json.Unmarshal(template.RemainderAccount.Meta, &ainfo); err != nil { + return nil, nil, fmt.Errorf("unmarshaling account meta: %w", err) + } + + _, ok := keyIDs[ainfo.Owner] + if ok { + return nil, nil, fmt.Errorf("remainder account has already been declared, cannot be assigned 90: %s", ainfo.Owner) + } + + keyIDs[ainfo.Owner] = builtin.ReserveAddress + err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av) + if err != nil { + return nil, nil, fmt.Errorf("creating remainder acct: %w", err) + } + + case TMultisig: + if err = createMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil { + return nil, nil, fmt.Errorf("failed to set up remainder: %w", err) + } + default: + return nil, nil, fmt.Errorf("unknown account type for remainder: %w", err) + } + + return state, keyIDs, nil +} + +func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actorstypes.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) { + ast, err := account.MakeState(adt.WrapStore(ctx, cst), av, addr) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, ast.GetState()) + if err != nil { + return nil, err + } + + actcid, found := actors.GetActorCodeID(av, actors.AccountKey) + if !found { + return nil, fmt.Errorf("failed to get account actor code ID for actors version %d", av) + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: bal, + } + + return act, nil +} + +func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *tree.State, info Actor, keyIDs map[address.Address]address.Address, av actorstypes.Version) error { + var ainfo AccountMeta + if err := json.Unmarshal(info.Meta, &ainfo); err != nil { + return fmt.Errorf("unmarshaling account meta: %w", err) + } + + aa, err := makeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance) + if err != nil { + return err + } + + ida, ok := keyIDs[ainfo.Owner] + if !ok { + return fmt.Errorf("no registered ID for account actor: %s", ainfo.Owner) + } + + err = state.SetActor(ctx, ida, aa) + if err != nil { + return fmt.Errorf("setting account from actmap: %w", err) + } + return nil +} + +func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *tree.State, ida address.Address, info Actor, keyIDs map[address.Address]address.Address, av actorstypes.Version) error { + if info.Type != TMultisig { + return fmt.Errorf("can only call createMultisigAccount with multisig Actor info") + } + var ainfo MultisigMeta + if err := json.Unmarshal(info.Meta, &ainfo); err != nil { + return fmt.Errorf("unmarshaling account meta: %w", err) + } + + var signers []address.Address + + for _, e := range ainfo.Signers { + idAddress, ok := keyIDs[e] + if !ok { + return fmt.Errorf("no registered key ID for signer: %s", e) + } + + // Check if actor already exists + _, bFind, err := state.GetActor(ctx, e) + if err == nil && bFind { + signers = append(signers, idAddress) + continue + } + + aa, err := makeAccountActor(ctx, cst, av, e, big.Zero()) + if err != nil { + return err + } + + if err = state.SetActor(ctx, idAddress, aa); err != nil { + return fmt.Errorf("setting account from actmap: %w", err) + } + signers = append(signers, idAddress) + } + + mst, err := multisig.MakeState(adt.WrapStore(ctx, cst), av, signers, uint64(ainfo.Threshold), abi.ChainEpoch(ainfo.VestingStart), abi.ChainEpoch(ainfo.VestingDuration), info.Balance) + if err != nil { + return err + } + + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return err + } + + actcid, found := actors.GetActorCodeID(av, actors.MultisigKey) + if !found { + return fmt.Errorf("failed to get multisig actor code ID for actors version %d", av) + } + + err = state.SetActor(ctx, ida, &types.Actor{ + Code: actcid, + Balance: info.Balance, + Head: statecid, + }) + if err != nil { + return fmt.Errorf("setting account from actmap: %w", err) + } + + return nil +} + +func VerifyPreSealedData(ctx context.Context, cs *chain.Store, stateroot cid.Cid, template Template, keyIDs map[address.Address]address.Address, nv network.Version, para *config.ForkUpgradeConfig) (cid.Cid, error) { + verifNeeds := make(map[address.Address]abi.PaddedPieceSize) + var sum abi.PaddedPieceSize + + faultChecker := consensusfault.NewFaultChecker(cs, fork.NewMockFork()) + syscalls := vmsupport.NewSyscalls(faultChecker, impl.ProofVerifier) + + csc := func(context.Context, abi.ChainEpoch, tree.Tree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + + gasPriceSchedule := gas.NewPricesSchedule(para) + vmopt := vm.VmOption{ + CircSupplyCalculator: csc, + NetworkVersion: nv, + Rnd: &fakeRand{}, + BaseFee: big.NewInt(0), + Epoch: 0, + PRoot: stateroot, + Bsstore: cs.Blockstore(), + SysCallsImpl: mkFakedSigSyscalls(syscalls), + GasPriceSchedule: gasPriceSchedule, + } + + vm, err := fvm.NewVM(ctx, vmopt) + if err != nil { + return cid.Undef, fmt.Errorf("failed to create vm: %w", err) + } + + for mi, m := range template.Miners { + for si, s := range m.Sectors { + if s.Deal.Provider != m.ID { + return cid.Undef, fmt.Errorf("sector %d in miner %d in template had mismatch in provider and miner ID: %s != %s", si, mi, s.Deal.Provider, m.ID) + } + + amt := s.Deal.PieceSize + verifNeeds[keyIDs[s.Deal.Client]] += amt + sum += amt + } + } + + verifregRoot, err := address.NewIDAddress(80) + if err != nil { + return cid.Undef, err + } + + verifier, err := address.NewIDAddress(81) + if err != nil { + return cid.Undef, err + } + + // Note: This is brittle, if the methodNum / param changes, it could break things + _, err = doExecValue(ctx, vm, verifreg.Address, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{ + Address: verifier, + Allowance: abi.NewStoragePower(int64(sum)), // eh, close enough + + })) + if err != nil { + return cid.Undef, fmt.Errorf("failed to create verifier: %w", err) + } + + for c, amt := range verifNeeds { + // Note: This is brittle, if the methodNum / param changes, it could break things + _, err := doExecValue(ctx, vm, verifreg.Address, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{ + Address: c, + Allowance: abi.NewStoragePower(int64(amt)), + })) + if err != nil { + return cid.Undef, fmt.Errorf("failed to add verified client: %w", err) + } + } + + st, err := vm.Flush(ctx) + if err != nil { + return cid.Cid{}, fmt.Errorf("vm flush: %w", err) + } + + return st, nil +} + +func MakeGenesisBlock(ctx context.Context, rep repo.Repo, bs bstore.Blockstore, template Template, para *config.ForkUpgradeConfig) (*GenesisBootstrap, error) { + st, keyIDs, err := MakeInitialStateTree(ctx, bs, template) + if err != nil { + return nil, fmt.Errorf("make initial state tree failed: %w", err) + } + + stateroot, err := st.Flush(ctx) + if err != nil { + return nil, fmt.Errorf("flush state tree failed: %w", err) + } + + // temp chainstore + cs := chain.NewStore(rep.ChainDatastore(), bs, cid.Undef, chain.NewMockCirculatingSupplyCalculator()) + + // Verify PreSealed Data + stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs, template.NetworkVersion, para) + if err != nil { + return nil, fmt.Errorf("failed to verify presealed data: %w", err) + } + + stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners, template.NetworkVersion, para) + if err != nil { + return nil, fmt.Errorf("setup miners failed: %w", err) + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) + emptyroot, err := adt0.MakeEmptyArray(store).Root() + if err != nil { + return nil, fmt.Errorf("amt build failed: %w", err) + } + + mm := &types.MessageRoot{ + BlsRoot: emptyroot, + SecpkRoot: emptyroot, + } + mmb, err := mm.ToStorageBlock() + if err != nil { + return nil, fmt.Errorf("serializing msgmeta failed: %w", err) + } + if err := bs.Put(ctx, mmb); err != nil { + return nil, fmt.Errorf("putting msgmeta block to blockstore: %w", err) + } + + log.Infof("Empty Genesis root: %s", emptyroot) + + tickBuf := make([]byte, 32) + _, _ = rand.Read(tickBuf) + genesisticket := &types.Ticket{ + VRFProof: tickBuf, + } + + filecoinGenesisCid, err := cid.Decode("bafyreiaqpwbbyjo4a42saasj36kkrpv4tsherf2e7bvezkert2a7dhonoi") + if err != nil { + return nil, fmt.Errorf("failed to decode filecoin genesis block CID: %w", err) + } + + if !expectedCid().Equals(filecoinGenesisCid) { + return nil, fmt.Errorf("expectedCid != filecoinGenesisCid") + } + + gblk, err := getGenesisBlock() + if err != nil { + return nil, fmt.Errorf("failed to construct filecoin genesis block: %w", err) + } + + if !filecoinGenesisCid.Equals(gblk.Cid()) { + return nil, fmt.Errorf("filecoinGenesisCid != gblk.Cid") + } + + if err := bs.Put(ctx, gblk); err != nil { + return nil, fmt.Errorf("failed writing filecoin genesis block to blockstore: %w", err) + } + + b := &types.BlockHeader{ + Miner: system.Address, + Ticket: genesisticket, + Parents: types.NewTipSetKey(filecoinGenesisCid).Cids(), + Height: 0, + ParentWeight: types.NewInt(0), + ParentStateRoot: stateroot, + Messages: mmb.Cid(), + ParentMessageReceipts: emptyroot, + BLSAggregate: nil, + BlockSig: nil, + Timestamp: template.Timestamp, + ElectionProof: new(types.ElectionProof), + BeaconEntries: []types.BeaconEntry{ + { + Round: 0, + Data: make([]byte, 32), + }, + }, + ParentBaseFee: abi.NewTokenAmount(constants.InitialBaseFee), + } + + sb, err := b.ToStorageBlock() + if err != nil { + return nil, fmt.Errorf("serializing block header failed: %w", err) + } + + if err := bs.Put(ctx, sb); err != nil { + return nil, fmt.Errorf("putting header to blockstore: %w", err) + } + + return &GenesisBootstrap{ + Genesis: b, + }, nil +} diff --git a/pkg/gen/genesis/miners.go b/pkg/gen/genesis/miners.go new file mode 100644 index 0000000000..0616ee7a91 --- /dev/null +++ b/pkg/gen/genesis/miners.go @@ -0,0 +1,675 @@ +package genesis + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/rand" + + cborutil "github.com/filecoin-project/go-cbor-util" + + reward2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/reward" + + power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" + + smoothing9 "github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors/policy" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/network" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/consensusfault" + crypto2 "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/fvm" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + "github.com/filecoin-project/venus/pkg/vmsupport" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func MinerAddress(genesisIndex uint64) address.Address { + maddr, err := address.NewIDAddress(MinerStart + genesisIndex) + if err != nil { + panic(err) + } + + return maddr +} + +type fakedSigSyscalls struct { + vmcontext.SyscallsImpl +} + +func (fss *fakedSigSyscalls) VerifySignature(ctx context.Context, view vmcontext.SyscallsStateView, signature crypto.Signature, signer address.Address, plaintext []byte) error { + return nil +} + +func mkFakedSigSyscalls(sys vmcontext.SyscallsImpl) vmcontext.SyscallsImpl { + return &fakedSigSyscalls{ + sys, + } +} + +// Note: Much of this is brittle, if the methodNum / param / return changes, it will break things +func SetupStorageMiners(ctx context.Context, cs *chain.Store, sroot cid.Cid, miners []Miner, nv network.Version, para *config.ForkUpgradeConfig) (cid.Cid, error) { + cst := cbor.NewCborStore(cs.Blockstore()) + av, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return cid.Undef, fmt.Errorf("get actor version: %w", err) + } + + csc := func(context.Context, abi.ChainEpoch, tree.Tree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + + faultChecker := consensusfault.NewFaultChecker(cs, fork.NewMockFork()) + syscalls := vmsupport.NewSyscalls(faultChecker, impl.ProofVerifier) + gasPirceSchedule := gas.NewPricesSchedule(para) + + newVM := func(base cid.Cid) (vm.Interface, error) { + vmopt := vm.VmOption{ + CircSupplyCalculator: csc, + Rnd: &fakeRand{}, + BaseFee: big.NewInt(0), + Epoch: 0, + PRoot: base, + NetworkVersion: nv, + Bsstore: cs.Blockstore(), + SysCallsImpl: mkFakedSigSyscalls(syscalls), + GasPriceSchedule: gasPirceSchedule, + } + + return fvm.NewVM(ctx, vmopt) + } + + genesisVM, err := newVM(sroot) + if err != nil { + return cid.Undef, fmt.Errorf("failed to create vm: %w", err) + } + + if len(miners) == 0 { + return cid.Undef, errors.New("no genesis miners") + } + + minerInfos := make([]struct { + maddr address.Address + + presealExp abi.ChainEpoch + + dealIDs []abi.DealID + sectorWeight []abi.StoragePower + }, len(miners)) + + maxPeriods := policy.GetMaxSectorExpirationExtension() / minertypes.WPoStProvingPeriod + rawPow, qaPow := big.NewInt(0), big.NewInt(0) + for i, m := range miners { + // Create miner through power actor + i := i + m := m + + spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv) + if err != nil { + return cid.Undef, err + } + + { + constructorParams := &power0.CreateMinerParams{ + Owner: m.Worker, + Worker: m.Worker, + Peer: []byte(m.PeerID), + SealProofType: spt, + } + + params := mustEnc(constructorParams) + rval, err := doExecValue(ctx, genesisVM, power.Address, m.Owner, m.PowerBalance, power.Methods.CreateMiner, params) + if err != nil { + return cid.Undef, fmt.Errorf("failed to create genesis miner: %w", err) + } + + var ma power0.CreateMinerReturn + if err := ma.UnmarshalCBOR(bytes.NewReader(rval)); err != nil { + return cid.Undef, fmt.Errorf("unmarshaling CreateMinerReturn: %w", err) + } + + expma := MinerAddress(uint64(i)) + if ma.IDAddress != expma { + return cid.Undef, fmt.Errorf("miner assigned wrong address: %s != %s", ma.IDAddress, expma) + } + minerInfos[i].maddr = ma.IDAddress + + nh, err := genesisVM.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing vm: %w", err) + } + + nst, err := tree.LoadState(ctx, cst, nh) + if err != nil { + return cid.Undef, fmt.Errorf("loading new state tree: %w", err) + } + + mact, find, err := nst.GetActor(ctx, minerInfos[i].maddr) + if err != nil { + return cid.Undef, fmt.Errorf("getting newly created miner actor: %w", err) + } + + if !find { + return cid.Undef, errors.New("actor not found") + } + + mst, err := miner.Load(adt.WrapStore(ctx, cst), mact) + if err != nil { + return cid.Undef, fmt.Errorf("getting newly created miner state: %w", err) + } + + pps, err := mst.GetProvingPeriodStart() + if err != nil { + return cid.Undef, fmt.Errorf("getting newly created miner proving period start: %w", err) + } + + minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + pps - 1 + } + + // Add market funds + + if m.MarketBalance.GreaterThan(big.Zero()) { + params := mustEnc(&minerInfos[i].maddr) + _, err := doExecValue(ctx, genesisVM, market.Address, m.Worker, m.MarketBalance, market.Methods.AddBalance, params) + if err != nil { + return cid.Undef, fmt.Errorf("failed to create genesis miner (add balance): %w", err) + } + } + + // Publish preseal deals, and calculate the QAPower + + { + publish := func(params *types.PublishStorageDealsParams) error { + fmt.Printf("publishing %d storage deals on miner %s with worker %s\n", len(params.Deals), params.Deals[0].Proposal.Provider, m.Worker) + + ret, err := doExecValue(ctx, genesisVM, market.Address, m.Worker, big.Zero(), builtin0.MethodsMarket.PublishStorageDeals, mustEnc(params)) + if err != nil { + return fmt.Errorf("failed to create genesis miner (publish deals): %w", err) + } + retval, err := market.DecodePublishStorageDealsReturn(ret, nv) + if err != nil { + return fmt.Errorf("failed to create genesis miner (decoding published deals): %w", err) + } + ids, err := retval.DealIDs() + if err != nil { + return fmt.Errorf("failed to create genesis miner (getting published dealIDs): %w", err) + } + + if len(ids) != len(params.Deals) { + return fmt.Errorf("failed to create genesis miner (at least one deal was invalid on publication") + } + + minerInfos[i].dealIDs = append(minerInfos[i].dealIDs, ids...) + return nil + } + + params := &types.PublishStorageDealsParams{} + for _, preseal := range m.Sectors { + preseal.Deal.VerifiedDeal = true + preseal.Deal.EndEpoch = minerInfos[i].presealExp + p := types.ClientDealProposal{ + Proposal: preseal.Deal, + ClientSignature: crypto.Signature{Type: crypto.SigTypeBLS}, + } + + if av >= actorstypes.Version8 { + buf, err := cborutil.Dump(&preseal.Deal) + if err != nil { + return cid.Undef, fmt.Errorf("failed to marshal proposal: %w", err) + } + var sig *crypto.Signature + err = preseal.DealClientKey.UsePrivateKey(func(privateKey []byte) error { + var err error + sig, err = crypto2.Sign(buf, privateKey, preseal.DealClientKey.SigType) + return err + }) + if err != nil { + return cid.Undef, fmt.Errorf("failed to sign proposal: %w", err) + } + + p.ClientSignature = *sig + } + + params.Deals = append(params.Deals, p) + + if len(params.Deals) == cbg.MaxLength { + if err := publish(params); err != nil { + return cid.Undef, err + } + + params = &types.PublishStorageDealsParams{} + } + + rawPow = big.Add(rawPow, big.NewInt(int64(m.SectorSize))) + sectorWeight := builtin.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, big.Zero(), types.DealWeight(&preseal.Deal)) + minerInfos[i].sectorWeight = append(minerInfos[i].sectorWeight, sectorWeight) + qaPow = big.Add(qaPow, sectorWeight) + } + + if len(params.Deals) > 0 { + if err := publish(params); err != nil { + return cid.Undef, err + } + } + } + } + + { + nh, err := genesisVM.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing vm: %w", err) + } + if err != nil { + return cid.Undef, fmt.Errorf("flushing vm: %w", err) + } + + nst, err := tree.LoadState(ctx, cst, nh) + if err != nil { + return cid.Undef, fmt.Errorf("loading new state tree: %w", err) + } + + pact, find, err := nst.GetActor(ctx, power.Address) + if err != nil { + return cid.Undef, fmt.Errorf("getting power actor: %w", err) + } + + if !find { + return cid.Undef, errors.New("power actor not exist") + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, fmt.Errorf("getting power state: %w", err) + } + + if err = pst.SetTotalQualityAdjPower(qaPow); err != nil { + return cid.Undef, fmt.Errorf("setting TotalQualityAdjPower in power state: %w", err) + } + + if err = pst.SetTotalRawBytePower(rawPow); err != nil { + return cid.Undef, fmt.Errorf("setting TotalRawBytePower in power state: %w", err) + } + + if err = pst.SetThisEpochQualityAdjPower(qaPow); err != nil { + return cid.Undef, fmt.Errorf("setting ThisEpochQualityAdjPower in power state: %w", err) + } + + if err = pst.SetThisEpochRawBytePower(rawPow); err != nil { + return cid.Undef, fmt.Errorf("setting ThisEpochRawBytePower in power state: %w", err) + } + + pcid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return cid.Undef, fmt.Errorf("putting power state: %w", err) + } + + pact.Head = pcid + + if err = nst.SetActor(ctx, power.Address, pact); err != nil { + return cid.Undef, fmt.Errorf("setting power state: %w", err) + } + + ver, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return cid.Undef, fmt.Errorf("get actor version: %w", err) + } + + rewact, err := SetupRewardActor(ctx, cs.Blockstore(), big.Zero(), ver) + if err != nil { + return cid.Undef, fmt.Errorf("setup reward actor: %w", err) + } + + if err = nst.SetActor(ctx, reward.Address, rewact); err != nil { + return cid.Undef, fmt.Errorf("set reward actor: %w", err) + } + + nh, err = nst.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing state tree: %w", err) + } + + genesisVM, err = newVM(nh) + if err != nil { + return cid.Undef, fmt.Errorf("creating new vm: %w", err) + } + } + + for i, m := range miners { + // Commit sectors + { + for pi, preseal := range m.Sectors { + params := &minertypes.SectorPreCommitInfo{ + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + } + + sectorWeight := minerInfos[i].sectorWeight[pi] + + // we've added fake power for this sector above, remove it now + + nh, err := genesisVM.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing vm: %w", err) + } + + nst, err := tree.LoadState(ctx, cst, nh) + if err != nil { + return cid.Undef, fmt.Errorf("loading new state tree: %w", err) + } + + pact, find, err := nst.GetActor(ctx, power.Address) + if err != nil { + return cid.Undef, fmt.Errorf("getting power actor: %w", err) + } + + if !find { + return cid.Undef, errors.New("power actor not exist") + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, fmt.Errorf("getting power state: %w", err) + } + + pc, err := pst.TotalPower() + if err != nil { + return cid.Undef, fmt.Errorf("getting total power: %w", err) + } + + if err = pst.SetTotalRawBytePower(types.BigSub(pc.RawBytePower, types.NewInt(uint64(m.SectorSize)))); err != nil { + return cid.Undef, fmt.Errorf("setting TotalRawBytePower in power state: %w", err) + } + + if err = pst.SetTotalQualityAdjPower(types.BigSub(pc.QualityAdjPower, sectorWeight)); err != nil { + return cid.Undef, fmt.Errorf("setting TotalQualityAdjPower in power state: %w", err) + } + + pcid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return cid.Undef, fmt.Errorf("putting power state: %w", err) + } + + pact.Head = pcid + + if err = nst.SetActor(ctx, power.Address, pact); err != nil { + return cid.Undef, fmt.Errorf("setting power state: %w", err) + } + + nh, err = nst.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing state tree: %w", err) + } + + genesisVM, err = newVM(nh) + if err != nil { + return cid.Undef, fmt.Errorf("creating new vm: %w", err) + } + + baselinePower, rewardSmoothed, err := currentEpochBlockReward(ctx, genesisVM, minerInfos[i].maddr, av) + if err != nil { + return cid.Undef, fmt.Errorf("getting current epoch reward: %w", err) + } + + tpow, err := currentTotalPower(ctx, genesisVM, minerInfos[i].maddr) + if err != nil { + return cid.Undef, fmt.Errorf("getting current total power: %w", err) + } + + pcd := types.PreCommitDepositForPower(smoothing9.FilterEstimate(rewardSmoothed), smoothing9.FilterEstimate(*tpow.QualityAdjPowerSmoothed), types.QAPowerMax(m.SectorSize)) + + pledge := types.InitialPledgeForPower( + sectorWeight, + baselinePower, + smoothing9.FilterEstimate(rewardSmoothed), + smoothing9.FilterEstimate(*tpow.QualityAdjPowerSmoothed), + big.Zero(), + ) + + pledge = big.Add(pcd, pledge) + + fmt.Println(types.FIL(pledge)) + _, err = doExecValue(ctx, genesisVM, minerInfos[i].maddr, m.Worker, pledge, builtintypes.MethodsMiner.PreCommitSector, mustEnc(params)) + if err != nil { + return cid.Undef, fmt.Errorf("failed to confirm presealed sectors: %w", err) + } + + // Commit one-by-one, otherwise pledge math tends to explode + var paramBytes []byte + + if av >= actorstypes.Version6 { + // TODO: fixup + confirmParams := &builtin6.ConfirmSectorProofsParams{ + Sectors: []abi.SectorNumber{preseal.SectorID}, + } + + paramBytes = mustEnc(confirmParams) + } else { + confirmParams := &builtin0.ConfirmSectorProofsParams{ + Sectors: []abi.SectorNumber{preseal.SectorID}, + } + + paramBytes = mustEnc(confirmParams) + } + + _, err = doExecValue(ctx, genesisVM, minerInfos[i].maddr, power.Address, big.Zero(), builtintypes.MethodsMiner.ConfirmSectorProofsValid, paramBytes) + if err != nil { + return cid.Undef, fmt.Errorf("failed to confirm presealed sectors: %w", err) + } + + if av > actorstypes.Version2 { + // post v2, we need to explicitly Claim this power since ConfirmSectorProofsValid doesn't do it anymore + claimParams := &power4.UpdateClaimedPowerParams{ + RawByteDelta: types.NewInt(uint64(m.SectorSize)), + QualityAdjustedDelta: sectorWeight, + } + + _, err = doExecValue(ctx, genesisVM, power.Address, minerInfos[i].maddr, big.Zero(), power.Methods.UpdateClaimedPower, mustEnc(claimParams)) + if err != nil { + return cid.Undef, fmt.Errorf("failed to confirm presealed sectors: %w", err) + } + + nh, err := genesisVM.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing vm: %w", err) + } + + nst, err := tree.LoadState(ctx, cst, nh) + if err != nil { + return cid.Undef, fmt.Errorf("loading new state tree: %w", err) + } + + mact, find, err := nst.GetActor(ctx, minerInfos[i].maddr) + if err != nil { + return cid.Undef, fmt.Errorf("getting miner actor: %w", err) + } + + if !find { + return cid.Undef, errors.New("actor not found") + } + + mst, err := miner.Load(adt.WrapStore(ctx, cst), mact) + if err != nil { + return cid.Undef, fmt.Errorf("getting miner state: %w", err) + } + + if err = mst.EraseAllUnproven(); err != nil { + return cid.Undef, fmt.Errorf("failed to erase unproven sectors: %w", err) + } + + mcid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return cid.Undef, fmt.Errorf("putting miner state: %w", err) + } + + mact.Head = mcid + + if err = nst.SetActor(ctx, minerInfos[i].maddr, mact); err != nil { + return cid.Undef, fmt.Errorf("setting miner state: %w", err) + } + + nh, err = nst.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing state tree: %w", err) + } + + genesisVM, err = newVM(nh) + if err != nil { + return cid.Undef, fmt.Errorf("creating new vm: %w", err) + } + } + } + } + } + + // Sanity-check total network power + nh, err := genesisVM.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing vm: %w", err) + } + + nst, err := tree.LoadState(ctx, cst, nh) + if err != nil { + return cid.Undef, fmt.Errorf("loading new state tree: %w", err) + } + + pact, find, err := nst.GetActor(ctx, power.Address) + if err != nil { + return cid.Undef, fmt.Errorf("getting power actor: %w", err) + } + if !find { + return cid.Undef, errors.New("actor not found") + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, fmt.Errorf("getting power state: %w", err) + } + + pc, err := pst.TotalPower() + if err != nil { + return cid.Undef, fmt.Errorf("getting total power: %w", err) + } + + if !pc.RawBytePower.Equals(rawPow) { + return cid.Undef, fmt.Errorf("TotalRawBytePower (%s) doesn't match previously calculated rawPow (%s)", pc.RawBytePower, rawPow) + } + + if !pc.QualityAdjPower.Equals(qaPow) { + return cid.Undef, fmt.Errorf("QualityAdjPower (%s) doesn't match previously calculated qaPow (%s)", pc.QualityAdjPower, qaPow) + } + + // TODO: Should we re-ConstructState for the reward actor using rawPow as currRealizedPower here? + + c, err := genesisVM.Flush(ctx) + if err != nil { + return cid.Undef, fmt.Errorf("flushing vm: %w", err) + } + return c, nil +} + +// TODO: copied from actors test harness, deduplicate or remove from here +type fakeRand struct{} + +func (fr *fakeRand) ChainGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint + return out, nil +} + +func (fr *fakeRand) ChainGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint + return out, nil +} + +func currentTotalPower(ctx context.Context, vmi vm.Interface, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) { + pwret, err := doExecValue(ctx, vmi, power.Address, maddr, big.Zero(), builtin0.MethodsPower.CurrentTotalPower, nil) + if err != nil { + return nil, err + } + var pwr power0.CurrentTotalPowerReturn + if err := pwr.UnmarshalCBOR(bytes.NewReader(pwret)); err != nil { + return nil, err + } + + return &pwr, nil +} + +func currentEpochBlockReward(ctx context.Context, vm vm.Interface, maddr address.Address, av actorstypes.Version) (abi.StoragePower, builtin.FilterEstimate, error) { + rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil) + if err != nil { + return big.Zero(), builtin.FilterEstimate{}, err + } + + // TODO: This hack should move to reward actor wrapper + switch av { + case actorstypes.Version0: + var epochReward reward0.ThisEpochRewardReturn + + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { + return big.Zero(), builtin.FilterEstimate{}, err + } + + return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(*epochReward.ThisEpochRewardSmoothed), nil + case actorstypes.Version2: + var epochReward reward2.ThisEpochRewardReturn + + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { + return big.Zero(), builtin.FilterEstimate{}, err + } + + return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil + } + + var epochReward reward4.ThisEpochRewardReturn + + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { + return big.Zero(), builtin.FilterEstimate{}, err + } + + return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil +} diff --git a/pkg/gen/genesis/types.go b/pkg/gen/genesis/types.go new file mode 100644 index 0000000000..c59dafde95 --- /dev/null +++ b/pkg/gen/genesis/types.go @@ -0,0 +1,98 @@ +package genesis + +import ( + "encoding/json" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +type ActorType string + +const ( + TAccount ActorType = "account" + TMultisig ActorType = "multisig" +) + +type PreSeal struct { + CommR cid.Cid + CommD cid.Cid + SectorID abi.SectorNumber + Deal types.DealProposal + DealClientKey *crypto.KeyInfo + ProofType abi.RegisteredSealProof +} + +type Key struct { + wallet.KeyInfo + + PublicKey []byte + Address address.Address +} + +type Miner struct { + ID address.Address + Owner address.Address + Worker address.Address + PeerID peer.ID //nolint:golint + + MarketBalance abi.TokenAmount + PowerBalance abi.TokenAmount + + SectorSize abi.SectorSize + + Sectors []*PreSeal +} + +type AccountMeta struct { + Owner address.Address // bls / secpk +} + +func (am *AccountMeta) ActorMeta() json.RawMessage { + out, err := json.Marshal(am) + if err != nil { + panic(err) + } + return out +} + +type MultisigMeta struct { + Signers []address.Address + Threshold int + VestingDuration int + VestingStart int +} + +func (mm *MultisigMeta) ActorMeta() json.RawMessage { + out, err := json.Marshal(mm) + if err != nil { + panic(err) + } + return out +} + +type Actor struct { + Type ActorType + Balance abi.TokenAmount + + Meta json.RawMessage +} + +type Template struct { + NetworkVersion network.Version + Accounts []Actor + Miners []Miner + + NetworkName string + Timestamp uint64 `json:",omitempty"` + + VerifregRootKey Actor + RemainderAccount Actor +} diff --git a/pkg/gen/genesis/util.go b/pkg/gen/genesis/util.go new file mode 100644 index 0000000000..73dec9faf3 --- /dev/null +++ b/pkg/gen/genesis/util.go @@ -0,0 +1,44 @@ +package genesis + +import ( + "context" + "fmt" + + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func mustEnc(i cbg.CBORMarshaler) []byte { + enc, err := actors.SerializeParams(i) + if err != nil { + panic(err) // ok + } + return enc +} + +func doExecValue(ctx context.Context, vmi vm.Interface, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { + ret, err := vmi.ApplyImplicitMessage(context.TODO(), &types.Message{ + To: to, + From: from, + Method: method, + Params: params, + GasLimit: 1_000_000_000_000_000, + Value: value, + Nonce: 0, + }) + if err != nil { + return nil, fmt.Errorf("doExec apply message failed: %w", err) + } + + if ret.Receipt.ExitCode != 0 { + return nil, fmt.Errorf("failed to call method: %s", ret.Receipt.String()) + } + + return ret.Receipt.Return, nil +} diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go new file mode 100644 index 0000000000..928136d4e0 --- /dev/null +++ b/pkg/genesis/genesis.go @@ -0,0 +1,190 @@ +package genesis + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/go-merkledag" + "github.com/ipld/go-car" + "github.com/mitchellh/go-homedir" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/gen" + genesis2 "github.com/filecoin-project/venus/pkg/gen/genesis" + "github.com/filecoin-project/venus/pkg/repo" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/venus/fixtures/assets" + "github.com/filecoin-project/venus/fixtures/networks" +) + +var glog = logging.Logger("genesis") + +// InitFunc is the signature for function that is used to create a genesis block. +type InitFunc func(cst cbor.IpldStore, bs blockstoreutil.Blockstore) (*types.BlockHeader, error) + +// Ticket is the ticket to place in the genesis block header (which can't be derived from a prior ticket), +// used in the evaluation of the messages in the genesis block, +// and *also* the ticket value used when computing the genesis state (the parent state of the genesis block). +var Ticket = types.Ticket{ + VRFProof: []byte{ + 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, + 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, + }, +} + +// MakeGenesis return a func to construct a genesis block +func MakeGenesis(ctx context.Context, rep repo.Repo, outFile, genesisTemplate string, para *config.ForkUpgradeConfig) InitFunc { + return func(_ cbor.IpldStore, bs blockstoreutil.Blockstore) (*types.BlockHeader, error) { + glog.Warn("Generating new random genesis block, note that this SHOULD NOT happen unless you are setting up new network") + genesisTemplate, err := homedir.Expand(genesisTemplate) + if err != nil { + return nil, err + } + + fdata, err := os.ReadFile(genesisTemplate) + if err != nil { + return nil, fmt.Errorf("reading preseals json: %w", err) + } + + var template genesis2.Template + if err := json.Unmarshal(fdata, &template); err != nil { + return nil, err + } + + if template.Timestamp == 0 { + template.Timestamp = uint64(constants.Clock.Now().Unix()) + } + + // TODO potentially replace this cached blockstore by a CBOR cache. + cbs, err := blockstoreutil.CachedBlockstore(ctx, bs, blockstoreutil.DefaultCacheOpts()) + if err != nil { + return nil, err + } + + b, err := genesis2.MakeGenesisBlock(context.TODO(), rep, cbs, template, para) + if err != nil { + return nil, fmt.Errorf("make genesis block: %w", err) + } + + fmt.Printf("GENESIS MINER ADDRESS: t0%d\n", genesis2.MinerStart) + + f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + if err != nil { + return nil, err + } + + offl := offline.Exchange(cbs) + blkserv := blockservice.New(cbs, offl) + dserv := merkledag.NewDAGService(blkserv) + + if err := car.WriteCarWithWalker(context.TODO(), dserv, []cid.Cid{b.Genesis.Cid()}, f, gen.CarWalkFunc); err != nil { + return nil, err + } + + glog.Infof("WRITING GENESIS FILE AT %s", f.Name()) + + if err := f.Close(); err != nil { + return nil, err + } + + return b.Genesis, nil + } +} + +func LoadGenesis(ctx context.Context, rep repo.Repo, sourceName string, network string) (InitFunc, error) { + var ( + source io.ReadCloser + err error + ) + + if sourceName == "" { + networkType, err := networks.GetNetworkFromName(network) + if err != nil { + return nil, err + } + + bs, err := assets.GetGenesis(networkType) + if err != nil { + return nil, err + } + source = io.NopCloser(bytes.NewReader(bs)) + } else { + source, err = openGenesisSource(sourceName) + if err != nil { + return nil, err + } + } + + defer func() { _ = source.Close() }() + + genesisBlk, err := extractGenesisBlock(ctx, source, rep) + if err != nil { + return nil, err + } + + gif := func(cst cbor.IpldStore, bs blockstoreutil.Blockstore) (*types.BlockHeader, error) { + return genesisBlk, err + } + + return gif, nil +} + +func extractGenesisBlock(ctx context.Context, source io.ReadCloser, rep repo.Repo) (*types.BlockHeader, error) { + bs := rep.Datastore() + ch, err := car.LoadCar(ctx, bs, source) + if err != nil { + return nil, err + } + + // need to check if we are being handed a car file with a single genesis block or an entire chain. + bsBlk, err := bs.Get(ctx, ch.Roots[0]) + if err != nil { + return nil, err + } + cur, err := types.DecodeBlock(bsBlk.RawData()) + if err != nil { + return nil, err + } + + return cur, nil +} + +func openGenesisSource(sourceName string) (io.ReadCloser, error) { + sourceURL, err := url.Parse(sourceName) + if err != nil { + return nil, fmt.Errorf("invalid filepath or URL for genesis file: %s", sourceURL) + } + var source io.ReadCloser + if sourceURL.Scheme == "http" || sourceURL.Scheme == "https" { + // NOTE: This code is temporary. It allows downloading a genesis block via HTTP(S) to be able to join a + // recently deployed staging devnet. + response, err := http.Get(sourceName) + if err != nil { + return nil, err + } + source = response.Body + } else if sourceURL.Scheme != "" { + return nil, fmt.Errorf("unsupported protocol for genesis file: %s", sourceURL.Scheme) + } else { + file, err := os.Open(sourceName) + if err != nil { + return nil, err + } + source = file + } + return source, nil +} diff --git a/pkg/genesis/init.go b/pkg/genesis/init.go new file mode 100644 index 0000000000..0a09f4fe00 --- /dev/null +++ b/pkg/genesis/init.go @@ -0,0 +1,58 @@ +package genesis + +import ( + "context" + "encoding/json" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/repo" +) + +// Init initializes a DefaultSyncer in the given repo. +func Init(ctx context.Context, r repo.Repo, bs blockstoreutil.Blockstore, cst cbor.IpldStore, gen InitFunc) (*chain.Store, error) { + // TODO the following should be wrapped in the chain.Store or a sub + // interface. + // Generate the genesis tipset. + genesis, err := gen(cst, bs) + if err != nil { + return nil, err + } + genTipSet, err := types.NewTipSet([]*types.BlockHeader{genesis}) + if err != nil { + return nil, errors.Wrap(err, "failed to generate genesis block") + } + // todo give fork params + + chainStore := chain.NewStore(r.ChainDatastore(), bs, genesis.Cid(), chain.NewCirculatingSupplyCalculator(bs, genesis.Cid(), config.DefaultForkUpgradeParam)) + + // Persist the genesis tipset to the repo. + genTsas := &chain.TipSetMetadata{ + TipSet: genTipSet, + TipSetStateRoot: genesis.ParentStateRoot, + TipSetReceipts: genesis.ParentMessageReceipts, + } + if err = chainStore.PutTipSetMetadata(ctx, genTsas); err != nil { + return nil, errors.Wrap(err, "failed to put genesis block in chain store") + } + if err = chainStore.SetHead(ctx, genTipSet); err != nil { + return nil, errors.Wrap(err, "failed to persist genesis block in chain store") + } + // Persist the genesis cid to the repo. + val, err := json.Marshal(genesis.Cid()) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal genesis cid") + } + + if err = r.ChainDatastore().Put(ctx, chain.GenesisKey, val); err != nil { + return nil, errors.Wrap(err, "failed to persist genesis cid") + } + + return chainStore, nil +} diff --git a/internal/pkg/journal/journal.go b/pkg/journal/journal.go similarity index 100% rename from internal/pkg/journal/journal.go rename to pkg/journal/journal.go diff --git a/internal/pkg/journal/journal_test.go b/pkg/journal/journal_test.go similarity index 83% rename from internal/pkg/journal/journal_test.go rename to pkg/journal/journal_test.go index ce0bf743dd..94ec6d8650 100644 --- a/internal/pkg/journal/journal_test.go +++ b/pkg/journal/journal_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/clock" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" ) func TestSimpleInMemoryJournal(t *testing.T) { @@ -28,10 +28,10 @@ func TestSimpleInMemoryJournal(t *testing.T) { obj := struct { Name string Arg int - }{"bob", + }{ + "bob", 42, } topicJ.Write("event3", "object", obj, "name", "bob", "age", 42) assert.Equal(t, 3, len(memoryWriter.journal.topics["testing"])) - } diff --git a/internal/pkg/journal/noop.go b/pkg/journal/noop.go similarity index 100% rename from internal/pkg/journal/noop.go rename to pkg/journal/noop.go diff --git a/pkg/journal/testing.go b/pkg/journal/testing.go new file mode 100644 index 0000000000..15ce39a29a --- /dev/null +++ b/pkg/journal/testing.go @@ -0,0 +1,62 @@ +package journal + +import ( + "sync" + "testing" + "time" + + "github.com/filecoin-project/venus/pkg/clock" +) + +// NewInMemoryJournal returns a journal backed by an in-memory map. +func NewInMemoryJournal(t *testing.T, clk clock.Clock) Journal { + return &MemoryJournal{ + t: t, + clock: clk, + topics: make(map[string][]entry), + } +} + +// MemoryJournal represents a journal held in memory. +type MemoryJournal struct { + t *testing.T + clock clock.Clock + topicsMu sync.Mutex + topics map[string][]entry +} + +// Topic returns a Writer with the provided `topic`. +func (mj *MemoryJournal) Topic(topic string) Writer { + mr := &MemoryWriter{ + topic: topic, + journal: mj, + } + return mr +} + +type entry struct { + time time.Time + event string + kvs []interface{} +} + +// MemoryWriter writes journal entires in memory. +type MemoryWriter struct { + topic string + journal *MemoryJournal +} + +// Write records an operation and its metadata to a Journal accepting variadic key-value +// pairs. +func (mw *MemoryWriter) Write(event string, kvs ...interface{}) { + if len(kvs)%2 != 0 { + mw.journal.t.Fatalf("journal write call has odd number of key values pairs: %d event: %s topic: %s", len(kvs), event, mw.topic) + } + mw.journal.topicsMu.Lock() + mw.journal.topics[mw.topic] = append(mw.journal.topics[mw.topic], entry{ + event: event, + time: mw.journal.clock.Now(), + kvs: kvs, + }) + mw.journal.topicsMu.Unlock() +} diff --git a/internal/pkg/journal/zap.go b/pkg/journal/zap.go similarity index 93% rename from internal/pkg/journal/zap.go rename to pkg/journal/zap.go index bbc40f6c16..34221b2785 100644 --- a/internal/pkg/journal/zap.go +++ b/pkg/journal/zap.go @@ -45,7 +45,7 @@ type ZapWriter struct { topic string } -// Record records an operation and its metadata to a Journal accepting variadic key-value +// Write records an operation and its metadata to a Journal accepting variadic key-value // pairs. func (zw *ZapWriter) Write(event string, kvs ...interface{}) { zw.logger.Infow(event, kvs...) diff --git a/pkg/market/cbor_gen.go b/pkg/market/cbor_gen.go new file mode 100644 index 0000000000..9c9ef1a94c --- /dev/null +++ b/pkg/market/cbor_gen.go @@ -0,0 +1,124 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package market + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufFundedAddressState = []byte{131} + +func (t *FundedAddressState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufFundedAddressState); err != nil { + return err + } + + // t.Addr (address.Address) (struct) + if err := t.Addr.MarshalCBOR(cw); err != nil { + return err + } + + // t.AmtReserved (big.Int) (struct) + if err := t.AmtReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.MsgCid (cid.Cid) (struct) + + if t.MsgCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.MsgCid); err != nil { + return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err) + } + } + + return nil +} + +func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) (err error) { + *t = FundedAddressState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Addr (address.Address) (struct) + + { + + if err := t.Addr.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Addr: %w", err) + } + + } + // t.AmtReserved (big.Int) (struct) + + { + + if err := t.AmtReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.AmtReserved: %w", err) + } + + } + // t.MsgCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err) + } + + t.MsgCid = &c + } + + } + return nil +} diff --git a/pkg/market/fmgr.go b/pkg/market/fmgr.go new file mode 100644 index 0000000000..3e80033828 --- /dev/null +++ b/pkg/market/fmgr.go @@ -0,0 +1,48 @@ +package market + +import ( + "context" + + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" +) + +// fundManagerAPI is the specific methods called by the FundManager +// (used by the tests) +type fundManager interface { + MpoolPushMessage(context.Context, *types.Message, *types.MessageSendSpec) (*types.SignedMessage, error) + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (types.MarketBalance, error) + StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) +} + +type fmgr struct { + MPoolAPI v1api.IMessagePool + ChainInfoAPI v1api.IChainInfo + MinerStateAPI v1api.IMinerState +} + +func newFundmanager(p *FundManagerParams) fundManager { + fmAPI := &fmgr{ + MPoolAPI: p.MP, + ChainInfoAPI: p.CI, + MinerStateAPI: p.MS, + } + + return fmAPI +} + +func (o *fmgr) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) { + return o.MPoolAPI.MpoolPushMessage(ctx, msg, spec) +} + +func (o *fmgr) StateMarketBalance(ctx context.Context, address address.Address, tsk types.TipSetKey) (types.MarketBalance, error) { + return o.MinerStateAPI.StateMarketBalance(ctx, address, tsk) +} + +func (o *fmgr) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) { + return o.ChainInfoAPI.StateWaitMsg(ctx, c, confidence, limit, allowReplaced) +} diff --git a/pkg/market/fundmanager.go b/pkg/market/fundmanager.go new file mode 100644 index 0000000000..ffdd7783bf --- /dev/null +++ b/pkg/market/fundmanager.go @@ -0,0 +1,713 @@ +package market + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/venus-shared/actors" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("market_adapter") + +type FundManagerParams struct { + MP v1api.IMessagePool + CI v1api.IChainInfo + MS v1api.IMinerState + DS repo.Datastore +} + +// FundManager keeps track of funds in a set of addresses +type FundManager struct { + ctx context.Context + shutdown context.CancelFunc + api fundManager + str *Store + + lk sync.Mutex + fundedAddrs map[address.Address]*fundedAddress +} + +func NewFundManager(p *FundManagerParams) *FundManager { + fmgrapi := newFundmanager(p) + ctx, cancel := context.WithCancel(context.Background()) + return &FundManager{ + ctx: ctx, + shutdown: cancel, + api: fmgrapi, + str: &Store{p.DS}, + fundedAddrs: make(map[address.Address]*fundedAddress), + } +} + +// newFundManager is used by the tests +func newFundManager(api fundManager, ds datastore.Batching) *FundManager { + ctx, cancel := context.WithCancel(context.Background()) + return &FundManager{ + ctx: ctx, + shutdown: cancel, + api: api, + str: newStore(ds), + fundedAddrs: make(map[address.Address]*fundedAddress), + } +} + +func (fm *FundManager) Stop() { + fm.shutdown() +} + +func (fm *FundManager) Start(ctx context.Context) error { + fm.lk.Lock() + defer fm.lk.Unlock() + + // TODO: + // To save memory: + // - in State() only load addresses with in-progress messages + // - load the others just-in-time from getFundedAddress + // - delete(fm.fundedAddrs, addr) when the queue has been processed + return fm.str.forEach(ctx, func(state *FundedAddressState) { + fa := newFundedAddress(fm, state.Addr) + fa.state = state + fm.fundedAddrs[fa.state.Addr] = fa + fa.start(ctx) + }) +} + +// Creates a fundedAddress if it doesn't already exist, and returns it +func (fm *FundManager) getFundedAddress(addr address.Address) *fundedAddress { + fm.lk.Lock() + defer fm.lk.Unlock() + + fa, ok := fm.fundedAddrs[addr] + if !ok { + fa = newFundedAddress(fm, addr) + fm.fundedAddrs[addr] = fa + } + return fa +} + +// Reserve adds amt to `reserved`. If there are not enough available funds for +// the address, submits a message on chain to top up available funds. +// Returns the cid of the message that was submitted on chain, or cid.Undef if +// the required funds were already available. +func (fm *FundManager) Reserve(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return fm.getFundedAddress(addr).reserve(ctx, wallet, amt) +} + +// Subtract from `reserved`. +func (fm *FundManager) Release(addr address.Address, amt abi.TokenAmount) error { + return fm.getFundedAddress(addr).release(amt) +} + +// Withdraw unreserved funds. Only succeeds if there are enough unreserved +// funds for the address. +// Returns the cid of the message that was submitted on chain. +func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt) +} + +// GetReserved returns the amount that is currently reserved for the address +func (fm *FundManager) GetReserved(addr address.Address) abi.TokenAmount { + return fm.getFundedAddress(addr).getReserved() +} + +// FundedAddressState keeps track of the state of an address with funds in the +// datastore +type FundedAddressState struct { + Addr address.Address + // AmtReserved is the amount that must be kept in the address (cannot be + // withdrawn) + AmtReserved abi.TokenAmount + // MsgCid is the cid of an in-progress on-chain message + MsgCid *cid.Cid +} + +// fundedAddress keeps track of the state and request queues for a +// particular address +type fundedAddress struct { + ctx context.Context + env *fundManagerEnvironment + str *Store + + lk sync.RWMutex + state *FundedAddressState + + // Note: These request queues are ephemeral, they are not saved to store + reservations []*fundRequest + releases []*fundRequest + withdrawals []*fundRequest + + // Used by the tests + onProcessStartListener func() bool +} + +func newFundedAddress(fm *FundManager, addr address.Address) *fundedAddress { + return &fundedAddress{ + ctx: fm.ctx, + env: &fundManagerEnvironment{api: fm.api}, + str: fm.str, + state: &FundedAddressState{ + Addr: addr, + AmtReserved: abi.NewTokenAmount(0), + }, + } +} + +// If there is an in-progress on-chain message, don't submit any more messages +// on chain until it completes +func (a *fundedAddress) start(ctx context.Context) { + a.lk.Lock() + defer a.lk.Unlock() + + if a.state.MsgCid != nil { + a.debugf("restart: wait for %s", a.state.MsgCid) + a.startWaitForResults(ctx, *a.state.MsgCid) + } +} + +func (a *fundedAddress) getReserved() abi.TokenAmount { + a.lk.RLock() + defer a.lk.RUnlock() + + return a.state.AmtReserved +} + +func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return a.requestAndWait(ctx, wallet, amt, &a.reservations) +} + +func (a *fundedAddress) release(amt abi.TokenAmount) error { + _, err := a.requestAndWait(context.Background(), address.Undef, amt, &a.releases) + return err +} + +func (a *fundedAddress) withdraw(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return a.requestAndWait(ctx, wallet, amt, &a.withdrawals) +} + +func (a *fundedAddress) requestAndWait(ctx context.Context, wallet address.Address, amt abi.TokenAmount, reqs *[]*fundRequest) (cid.Cid, error) { + // Create a request and add it to the request queue + req := newFundRequest(ctx, wallet, amt) + + a.lk.Lock() + *reqs = append(*reqs, req) + a.lk.Unlock() + + // Process the queue + go a.process(ctx) + + // Wait for the results + select { + case <-ctx.Done(): + return cid.Undef, ctx.Err() + case r := <-req.Result: + return r.msgCid, r.err + } +} + +// Used by the tests +func (a *fundedAddress) onProcessStart(fn func() bool) { + a.lk.Lock() + defer a.lk.Unlock() + + a.onProcessStartListener = fn +} + +// Process queued requests +func (a *fundedAddress) process(ctx context.Context) { + a.lk.Lock() + defer a.lk.Unlock() + + // Used by the tests + if a.onProcessStartListener != nil { + done := a.onProcessStartListener() + if !done { + return + } + a.onProcessStartListener = nil + } + + // Check if we're still waiting for the response to a message + if a.state.MsgCid != nil { + return + } + + // Check if there's anything to do + haveReservations := len(a.reservations) > 0 || len(a.releases) > 0 + haveWithdrawals := len(a.withdrawals) > 0 + if !haveReservations && !haveWithdrawals { + return + } + + // Process reservations / releases + if haveReservations { + res, err := a.processReservations(a.reservations, a.releases) + if err == nil { + a.applyStateChange(ctx, res.msgCid, res.amtReserved) + } + a.reservations = filterOutProcessedReqs(a.reservations) + a.releases = filterOutProcessedReqs(a.releases) + } + + // If there was no message sent on chain by adding reservations, and all + // reservations have completed processing, process withdrawals + if haveWithdrawals && a.state.MsgCid == nil && len(a.reservations) == 0 { + withdrawalCid, err := a.processWithdrawals(a.withdrawals) + if err == nil && withdrawalCid != cid.Undef { + a.applyStateChange(ctx, &withdrawalCid, types.EmptyInt) + } + a.withdrawals = filterOutProcessedReqs(a.withdrawals) + } + + // If a message was sent on-chain + if a.state.MsgCid != nil { + // Start waiting for results of message (async) + a.startWaitForResults(ctx, *a.state.MsgCid) + } + + // Process any remaining queued requests + go a.process(ctx) +} + +// Filter out completed requests +func filterOutProcessedReqs(reqs []*fundRequest) []*fundRequest { + filtered := make([]*fundRequest, 0, len(reqs)) + for _, req := range reqs { + if !req.Completed() { + filtered = append(filtered, req) + } + } + return filtered +} + +// Apply the results of processing queues and save to the datastore +func (a *fundedAddress) applyStateChange(ctx context.Context, msgCid *cid.Cid, amtReserved abi.TokenAmount) { + a.state.MsgCid = msgCid + if !amtReserved.Nil() { + a.state.AmtReserved = amtReserved + } + a.saveState(ctx) +} + +// Clear the pending message cid so that a new message can be sent +func (a *fundedAddress) clearWaitState(ctx context.Context) { + a.state.MsgCid = nil + a.saveState(ctx) +} + +// Save state to datastore +func (a *fundedAddress) saveState(ctx context.Context) { + // Not much we can do if saving to the datastore fails, just log + err := a.str.save(ctx, a.state) + if err != nil { + log.Errorf("saving state to store for addr %s: %w", a.state.Addr, err) + } +} + +// The result of processing the reservation / release queues +type processResult struct { + // Requests that completed without adding funds + covered []*fundRequest + // Requests that added funds + added []*fundRequest + + // The new reserved amount + amtReserved abi.TokenAmount + // The message cid, if a message was submitted on-chain + msgCid *cid.Cid +} + +// process reservations and releases, and return the resulting changes to state +func (a *fundedAddress) processReservations(reservations []*fundRequest, releases []*fundRequest) (pr *processResult, prerr error) { + // When the function returns + defer func() { + // If there's an error, mark all requests as errored + if prerr != nil { + for _, req := range append(reservations, releases...) { + req.Complete(cid.Undef, prerr) + } + return + } + + // Complete all release requests + for _, req := range releases { + req.Complete(cid.Undef, nil) + } + + // Complete all requests that were covered by released amounts + for _, req := range pr.covered { + req.Complete(cid.Undef, nil) + } + + // If a message was sent + if pr.msgCid != nil { + // Complete all add funds requests + for _, req := range pr.added { + req.Complete(*pr.msgCid, nil) + } + } + }() + + // Split reservations into those that are covered by released amounts, + // and those to add to the reserved amount. + // Note that we process requests from the same wallet in batches. So some + // requests may not be included in covered if they don't match the first + // covered request's wallet. These will be processed on a subsequent + // invocation of processReservations. + toCancel, toAdd, reservedDelta := splitReservations(reservations, releases) + + // Apply the reserved delta to the reserved amount + reserved := big.Add(a.state.AmtReserved, reservedDelta) + if reserved.LessThan(abi.NewTokenAmount(0)) { + reserved = abi.NewTokenAmount(0) + } + res := &processResult{ + amtReserved: reserved, + covered: toCancel, + } + + // Work out the amount to add to the balance + amtToAdd := abi.NewTokenAmount(0) + if len(toAdd) > 0 && reserved.GreaterThan(abi.NewTokenAmount(0)) { + // Get available funds for address + avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr) + if err != nil { + return res, err + } + + // amount to add = new reserved amount - available + amtToAdd = big.Sub(reserved, avail) + a.debugf("reserved %d - avail %d = to add %d", reserved, avail, amtToAdd) + } + + // If there's nothing to add to the balance, bail out + if amtToAdd.LessThanEqual(abi.NewTokenAmount(0)) { + res.covered = append(res.covered, toAdd...) + return res, nil + } + + // Add funds to address + a.debugf("add funds %d", amtToAdd) + addFundsCid, err := a.env.AddFunds(a.ctx, toAdd[0].Wallet, a.state.Addr, amtToAdd) + if err != nil { + return res, err + } + + // Mark reservation requests as complete + res.added = toAdd + + // Save the message CID to state + res.msgCid = &addFundsCid + return res, nil +} + +// Split reservations into those that are under the total release amount +// (covered) and those that exceed it (to add). +// Note that we process requests from the same wallet in batches. So some +// requests may not be included in covered if they don't match the first +// covered request's wallet. +func splitReservations(reservations []*fundRequest, releases []*fundRequest) ([]*fundRequest, []*fundRequest, abi.TokenAmount) { + toCancel := make([]*fundRequest, 0, len(reservations)) + toAdd := make([]*fundRequest, 0, len(reservations)) + toAddAmt := abi.NewTokenAmount(0) + + // Sum release amounts + releaseAmt := abi.NewTokenAmount(0) + for _, req := range releases { + releaseAmt = big.Add(releaseAmt, req.Amount()) + } + + // We only want to combine requests that come from the same wallet + batchWallet := address.Undef + for _, req := range reservations { + amt := req.Amount() + + // If the amount to add to the reserve is cancelled out by a release + if amt.LessThanEqual(releaseAmt) { + // Cancel the request and update the release total + releaseAmt = big.Sub(releaseAmt, amt) + toCancel = append(toCancel, req) + continue + } + + // The amount to add is greater that the release total so we want + // to send an add funds request + + // The first time the wallet will be undefined + if batchWallet == address.Undef { + batchWallet = req.Wallet + } + // If this request's wallet is the same as the batch wallet, + // the requests will be combined + if batchWallet == req.Wallet { + delta := big.Sub(amt, releaseAmt) + toAddAmt = big.Add(toAddAmt, delta) + releaseAmt = abi.NewTokenAmount(0) + toAdd = append(toAdd, req) + } + } + + // The change in the reserved amount is "amount to add" - "amount to release" + reservedDelta := big.Sub(toAddAmt, releaseAmt) + + return toCancel, toAdd, reservedDelta +} + +// process withdrawal queue +func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid cid.Cid, prerr error) { + // If there's an error, mark all withdrawal requests as errored + defer func() { + if prerr != nil { + for _, req := range withdrawals { + req.Complete(cid.Undef, prerr) + } + } + }() + + // Get the net available balance + avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr) + if err != nil { + return cid.Undef, err + } + + netAvail := big.Sub(avail, a.state.AmtReserved) + + // Fit as many withdrawals as possible into the available balance, and fail + // the rest + withdrawalAmt := abi.NewTokenAmount(0) + allowedAmt := abi.NewTokenAmount(0) + allowed := make([]*fundRequest, 0, len(withdrawals)) + var batchWallet address.Address + for _, req := range withdrawals { + amt := req.Amount() + if amt.IsZero() { + // If the context for the request was cancelled, bail out + req.Complete(cid.Undef, err) + continue + } + + // If the amount would exceed the available amount, complete the + // request with an error + newWithdrawalAmt := big.Add(withdrawalAmt, amt) + if newWithdrawalAmt.GreaterThan(netAvail) { + msg := fmt.Sprintf("insufficient funds for withdrawal of %s: ", types.FIL(amt)) + msg += fmt.Sprintf("net available (%s) = available (%s) - reserved (%s)", + types.FIL(big.Sub(netAvail, withdrawalAmt)), types.FIL(avail), types.FIL(a.state.AmtReserved)) + if !withdrawalAmt.IsZero() { + msg += fmt.Sprintf(" - queued withdrawals (%s)", types.FIL(withdrawalAmt)) + } + err := fmt.Errorf(msg) + a.debugf("%s", err) + req.Complete(cid.Undef, err) + continue + } + + // If this is the first allowed withdrawal request in this batch, save + // its wallet address + if batchWallet == address.Undef { + batchWallet = req.Wallet + } + // If the request wallet doesn't match the batch wallet, bail out + // (the withdrawal will be processed after the current batch has + // completed) + if req.Wallet != batchWallet { + continue + } + + // Include this withdrawal request in the batch + withdrawalAmt = newWithdrawalAmt + a.debugf("withdraw %d", amt) + allowed = append(allowed, req) + allowedAmt = big.Add(allowedAmt, amt) + } + + // Check if there is anything to withdraw. + // Note that if the context for a request is cancelled, + // req.Amount() returns zero + if allowedAmt.Equals(abi.NewTokenAmount(0)) { + // Mark allowed requests as complete + for _, req := range allowed { + req.Complete(cid.Undef, nil) + } + return cid.Undef, nil + } + + // Withdraw funds + a.debugf("withdraw funds %d", allowedAmt) + withdrawFundsCid, err := a.env.WithdrawFunds(a.ctx, allowed[0].Wallet, a.state.Addr, allowedAmt) + if err != nil { + return cid.Undef, err + } + + // Mark allowed requests as complete + for _, req := range allowed { + req.Complete(withdrawFundsCid, nil) + } + + // Save the message CID to state + return withdrawFundsCid, nil +} + +// asynchonously wait for results of message +func (a *fundedAddress) startWaitForResults(ctx context.Context, msgCid cid.Cid) { + go func() { + err := a.env.WaitMsg(a.ctx, msgCid) + if err != nil { + // We don't really care about the results here, we're just waiting + // so as to only process one on-chain message at a time + log.Errorf("waiting for results of message %s for addr %s: %w", msgCid, a.state.Addr, err) + } + + a.lk.Lock() + a.debugf("complete wait") + a.clearWaitState(ctx) + a.lk.Unlock() + + a.process(ctx) + }() +} + +func (a *fundedAddress) debugf(args ...interface{}) { + fmtStr := args[0].(string) + args = args[1:] + log.Debugf(a.state.Addr.String()+": "+fmtStr, args...) +} + +// The result of a fund request +type reqResult struct { + msgCid cid.Cid + err error +} + +// A request to change funds +type fundRequest struct { + ctx context.Context + amt abi.TokenAmount + completed chan struct{} + Wallet address.Address + Result chan reqResult +} + +func newFundRequest(ctx context.Context, wallet address.Address, amt abi.TokenAmount) *fundRequest { + return &fundRequest{ + ctx: ctx, + amt: amt, + Wallet: wallet, + Result: make(chan reqResult), + completed: make(chan struct{}), + } +} + +// Amount returns zero if the context has expired +func (frp *fundRequest) Amount() abi.TokenAmount { + if frp.ctx.Err() != nil { + return abi.NewTokenAmount(0) + } + return frp.amt +} + +// Complete is called with the message CID when the funds request has been +// started or with the error if there was an error +func (frp *fundRequest) Complete(msgCid cid.Cid, err error) { + select { + case <-frp.completed: + case <-frp.ctx.Done(): + case frp.Result <- reqResult{msgCid: msgCid, err: err}: + } + close(frp.completed) +} + +// Completed indicates if Complete has already been called +func (frp *fundRequest) Completed() bool { + select { + case <-frp.completed: + return true + default: + return false + } +} + +// fundManagerEnvironment simplifies some API calls +type fundManagerEnvironment struct { + api fundManager +} + +func (env *fundManagerEnvironment) AvailableFunds(ctx context.Context, addr address.Address) (abi.TokenAmount, error) { + bal, err := env.api.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err != nil { + return abi.NewTokenAmount(0), err + } + + return big.Sub(bal.Escrow, bal.Locked), nil +} + +func (env *fundManagerEnvironment) AddFunds( + ctx context.Context, + wallet address.Address, + addr address.Address, + amt abi.TokenAmount, +) (cid.Cid, error) { + params, err := actors.SerializeParams(&addr) + if err != nil { + return cid.Undef, err + } + + smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{ + To: builtin.StorageMarketActorAddr, + From: wallet, + Value: amt, + Method: builtin.MethodsMarket.AddBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + return smsg.Cid(), nil +} + +func (env *fundManagerEnvironment) WithdrawFunds( + ctx context.Context, + wallet address.Address, + addr address.Address, + amt abi.TokenAmount, +) (cid.Cid, error) { + params, err := actors.SerializeParams(&types.MarketWithdrawBalanceParams{ + ProviderOrClientAddress: addr, + Amount: amt, + }) + if err != nil { + return cid.Undef, fmt.Errorf("serializing params: %w", err) + } + + smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{ + To: builtin.StorageMarketActorAddr, + From: wallet, + Value: big.NewInt(0), + Method: builtin.MethodsMarket.WithdrawBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + return smsg.Cid(), nil +} + +func (env *fundManagerEnvironment) WaitMsg(ctx context.Context, c cid.Cid) error { + _, err := env.api.StateWaitMsg(ctx, c, constants.MessageConfidence, constants.LookbackNoLimit, true) + return err +} diff --git a/pkg/market/fundmanager_test.go b/pkg/market/fundmanager_test.go new file mode 100644 index 0000000000..8bd0d504d2 --- /dev/null +++ b/pkg/market/fundmanager_test.go @@ -0,0 +1,849 @@ +// stm: #unit +package market + +import ( + "bytes" + "context" + "sync" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + + tutils "github.com/filecoin-project/specs-actors/v6/support/testing" + + "github.com/filecoin-project/venus/pkg/config" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// TestFundManagerBasic verifies that the basic fund manager operations work +func TestFundManagerBasic(t *testing.T) { + tf.UnitTest(t) + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + // balance: 0 -> 10 + // reserved: 0 -> 10 + amt := abi.NewTokenAmount(10) + // stm: @MARKET_FUND_MANAGER_RESERVE_001 + sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + // stm: @MARKET_FUND_MANAGER_GET_RESERVED_001 + amount := s.fm.GetReserved(s.acctAddr) + require.Equal(t, amount, amt) + + msg := s.mockAPI.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockAPI.completeMsg(sentinel) + + // Reserve 7 + // balance: 10 -> 17 + // reserved: 10 -> 17 + amt = abi.NewTokenAmount(7) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg = s.mockAPI.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockAPI.completeMsg(sentinel) + + // Release 5 + // balance: 17 + // reserved: 17 -> 12 + amt = abi.NewTokenAmount(5) + // stm: @MARKET_FUND_MANAGER_RELEASE_001 + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Withdraw 2 + // balance: 17 -> 15 + // reserved: 12 + amt = abi.NewTokenAmount(2) + // stm: @MARKET_FUND_MANAGER_WITHDRAW_001 + sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg = s.mockAPI.getSentMessage(sentinel) + checkWithdrawMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockAPI.completeMsg(sentinel) + + // Reserve 3 + // balance: 15 + // reserved: 12 -> 15 + // Note: reserved (15) is <= balance (15) so should not send on-chain + // message + msgCount := s.mockAPI.messageCount() + amt = abi.NewTokenAmount(3) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + require.Equal(t, msgCount, s.mockAPI.messageCount()) + require.Equal(t, sentinel, cid.Undef) + + // Reserve 1 + // balance: 15 -> 16 + // reserved: 15 -> 16 + // Note: reserved (16) is above balance (15) so *should* send on-chain + // message to top up balance + amt = abi.NewTokenAmount(1) + topUp := abi.NewTokenAmount(1) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + s.mockAPI.completeMsg(sentinel) + msg = s.mockAPI.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, topUp) + + // Withdraw 1 + // balance: 16 + // reserved: 16 + // Note: Expect failure because there is no available balance to withdraw: + // balance - reserved = 16 - 16 = 0 + amt = abi.NewTokenAmount(1) + _, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + require.Error(t, err) + +} + +// TestFundManagerParallel verifies that operations can be run in parallel +func TestFundManagerParallel(t *testing.T) { + tf.UnitTest(t) + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + // Wait until all the subsequent requests are queued up + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.withdrawals) == 1 && len(fa.reservations) == 2 && len(fa.releases) == 1 { + close(queueReady) + return true + } + return false + }) + + // Withdraw 5 (should not run until after reserves / releases) + withdrawReady := make(chan error) + go func() { + amt = abi.NewTokenAmount(5) + _, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + withdrawReady <- err + }() + + reserveSentinels := make(chan cid.Cid) + + // Reserve 3 + go func() { + amt := abi.NewTokenAmount(3) + sentinelReserve3, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + reserveSentinels <- sentinelReserve3 + }() + + // Reserve 5 + go func() { + amt := abi.NewTokenAmount(5) + sentinelReserve5, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + reserveSentinels <- sentinelReserve5 + }() + + // Release 2 + go func() { + amt := abi.NewTokenAmount(2) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + }() + + // Everything is queued up + <-queueReady + + // Complete the "Reserve 10" message + s.mockAPI.completeMsg(sentinelReserve10) + msg := s.mockAPI.getSentMessage(sentinelReserve10) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(10)) + + // The other requests should now be combined and be submitted on-chain as + // a single message + rs1 := <-reserveSentinels + rs2 := <-reserveSentinels + require.Equal(t, rs1, rs2) + + // Withdraw should not have been called yet, because reserve / release + // requests run first + select { + case <-withdrawReady: + require.Fail(t, "Withdraw should run after reserve / release") + default: + } + + // Complete the message + s.mockAPI.completeMsg(rs1) + msg = s.mockAPI.getSentMessage(rs1) + + // "Reserve 3" +3 + // "Reserve 5" +5 + // "Release 2" -2 + // Result: 6 + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(6)) + + // Expect withdraw to fail because not enough available funds + err = <-withdrawReady + require.Error(t, err) +} + +// TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet +func TestFundManagerReserveByWallet(t *testing.T) { + tf.UnitTest(t) + s := setup(t) + defer s.fm.Stop() + + ctx := context.Background() + walletAddrA, err := s.wllt.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + walletAddrB, err := s.wllt.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + + // Wait until all the reservation requests are queued up + walletAQueuedUp := make(chan struct{}) + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.reservations) == 1 { + close(walletAQueuedUp) + } + if len(fa.reservations) == 3 { + close(queueReady) + return true + } + return false + }) + + type reserveResult struct { + ws cid.Cid + err error + } + results := make(chan *reserveResult) + + amtA1 := abi.NewTokenAmount(1) + go func() { + // Wallet A: Reserve 1 + sentinelA1, err := s.fm.Reserve(s.ctx, walletAddrA, s.acctAddr, amtA1) + results <- &reserveResult{ + ws: sentinelA1, + err: err, + } + }() + + amtB1 := abi.NewTokenAmount(2) + amtB2 := abi.NewTokenAmount(3) + go func() { + // Wait for reservation for wallet A to be queued up + <-walletAQueuedUp + + // Wallet B: Reserve 2 + go func() { + sentinelB1, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB1) + results <- &reserveResult{ + ws: sentinelB1, + err: err, + } + }() + + // Wallet B: Reserve 3 + sentinelB2, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB2) + results <- &reserveResult{ + ws: sentinelB2, + err: err, + } + }() + + // All reservation requests are queued up + <-queueReady + + resA := <-results + sentinelA1 := resA.ws + + // Should send to wallet A + msg := s.mockAPI.getSentMessage(sentinelA1) + checkAddMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1) + + // Complete wallet A message + s.mockAPI.completeMsg(sentinelA1) + + resB1 := <-results + resB2 := <-results + require.NoError(t, resB1.err) + require.NoError(t, resB2.err) + sentinelB1 := resB1.ws + sentinelB2 := resB2.ws + + // Should send different message to wallet B + require.NotEqual(t, sentinelA1, sentinelB1) + // Should be single message combining amount 1 and 2 + require.Equal(t, sentinelB1, sentinelB2) + msg = s.mockAPI.getSentMessage(sentinelB1) + checkAddMessageFields(t, msg, walletAddrB, s.acctAddr, big.Add(amtB1, amtB2)) +} + +// TestFundManagerWithdrawal verifies that as many withdraw operations as +// possible are processed +func TestFundManagerWithdrawalLimit(t *testing.T) { + tf.UnitTest(t) + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + // Complete the "Reserve 10" message + s.mockAPI.completeMsg(sentinelReserve10) + + // Release 10 + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Queue up withdraw requests + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + withdrawalReqTotal := 3 + withdrawalReqEnqueued := 0 + withdrawalReqQueue := make(chan func(), withdrawalReqTotal) + fa.onProcessStart(func() bool { + // If a new withdrawal request was enqueued + if len(fa.withdrawals) > withdrawalReqEnqueued { + withdrawalReqEnqueued++ + + // Pop the next request and run it + select { + case fn := <-withdrawalReqQueue: + go fn() + default: + } + } + // Once all the requests have arrived, we're ready to process the queue + if withdrawalReqEnqueued == withdrawalReqTotal { + close(queueReady) + return true + } + return false + }) + + type withdrawResult struct { + reqIndex int + ws cid.Cid + err error + } + withdrawRes := make(chan *withdrawResult) + + // Queue up three "Withdraw 5" requests + enqueuedCount := 0 + for i := 0; i < withdrawalReqTotal; i++ { + withdrawalReqQueue <- func() { + idx := enqueuedCount + enqueuedCount++ + + amt := abi.NewTokenAmount(5) + ws, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + withdrawRes <- &withdrawResult{reqIndex: idx, ws: ws, err: err} + } + } + // Start the first request + fn := <-withdrawalReqQueue + go fn() + + // All withdrawal requests are queued up and ready to be processed + <-queueReady + + // Organize results in request order + results := make([]*withdrawResult, withdrawalReqTotal) + for i := 0; i < 3; i++ { + res := <-withdrawRes + results[res.reqIndex] = res + } + + // Available 10 + // Withdraw 5 + // Expect Success + require.NoError(t, results[0].err) + // Available 5 + // Withdraw 5 + // Expect Success + require.NoError(t, results[1].err) + // Available 0 + // Withdraw 5 + // Expect FAIL + require.Error(t, results[2].err) + + // Expect withdrawal requests that fit under reserved amount to be combined + // into a single message on-chain + require.Equal(t, results[0].ws, results[1].ws) +} + +// TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet +func TestFundManagerWithdrawByWallet(t *testing.T) { + tf.UnitTest(t) + s := setup(t) + defer s.fm.Stop() + + ctx := context.Background() + walletAddrA, err := s.wllt.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + walletAddrB, err := s.wllt.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + + // Reserve 10 + reserveAmt := abi.NewTokenAmount(10) + sentinelReserve, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, reserveAmt) + require.NoError(t, err) + s.mockAPI.completeMsg(sentinelReserve) + + time.Sleep(10 * time.Millisecond) + + // Release 10 + err = s.fm.Release(s.acctAddr, reserveAmt) + require.NoError(t, err) + + type withdrawResult struct { + ws cid.Cid + err error + } + results := make(chan *withdrawResult) + + // Wait until withdrawals are queued up + walletAQueuedUp := make(chan struct{}) + queueReady := make(chan struct{}) + withdrawalCount := 0 + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.withdrawals) == withdrawalCount { + return false + } + withdrawalCount = len(fa.withdrawals) + + if withdrawalCount == 1 { + close(walletAQueuedUp) + } else if withdrawalCount == 3 { + close(queueReady) + return true + } + return false + }) + + amtA1 := abi.NewTokenAmount(1) + go func() { + // Wallet A: Withdraw 1 + sentinelA1, err := s.fm.Withdraw(s.ctx, walletAddrA, s.acctAddr, amtA1) + results <- &withdrawResult{ + ws: sentinelA1, + err: err, + } + }() + + amtB1 := abi.NewTokenAmount(2) + amtB2 := abi.NewTokenAmount(3) + go func() { + // Wait until withdraw for wallet A is queued up + <-walletAQueuedUp + + // Wallet B: Withdraw 2 + go func() { + sentinelB1, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB1) + results <- &withdrawResult{ + ws: sentinelB1, + err: err, + } + }() + + // Wallet B: Withdraw 3 + sentinelB2, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB2) + results <- &withdrawResult{ + ws: sentinelB2, + err: err, + } + }() + + // Withdrawals are queued up + <-queueReady + + // Should withdraw from wallet A first + resA1 := <-results + sentinelA1 := resA1.ws + msg := s.mockAPI.getSentMessage(sentinelA1) + checkWithdrawMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1) + + // Complete wallet A message + s.mockAPI.completeMsg(sentinelA1) + + resB1 := <-results + resB2 := <-results + require.NoError(t, resB1.err) + require.NoError(t, resB2.err) + sentinelB1 := resB1.ws + sentinelB2 := resB2.ws + + // Should send different message for wallet B from wallet A + require.NotEqual(t, sentinelA1, sentinelB1) + // Should be single message combining amount 1 and 2 + require.Equal(t, sentinelB1, sentinelB2) + msg = s.mockAPI.getSentMessage(sentinelB1) + checkWithdrawMessageFields(t, msg, walletAddrB, s.acctAddr, big.Add(amtB1, amtB2)) +} + +// TestFundManagerRestart verifies that waiting for incomplete requests resumes +// on restart +func TestFundManagerRestart(t *testing.T) { + tf.UnitTest(t) + s := setup(t) + defer s.fm.Stop() + + ctx := context.Background() + + acctAddr2 := tutils.NewActorAddr(t, "addr2") + + // Address 1: Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelAddr1, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg := s.mockAPI.getSentMessage(sentinelAddr1) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + // Address 2: Reserve 7 + amt2 := abi.NewTokenAmount(7) + sentinelAddr2Res7, err := s.fm.Reserve(s.ctx, s.walletAddr, acctAddr2, amt2) + require.NoError(t, err) + + msg2 := s.mockAPI.getSentMessage(sentinelAddr2Res7) + checkAddMessageFields(t, msg2, s.walletAddr, acctAddr2, amt2) + + // Complete "Address 1: Reserve 10" + s.mockAPI.completeMsg(sentinelAddr1) + + // Give the completed state a moment to be stored before restart + time.Sleep(time.Millisecond * 10) + + // Restart + mockAPIAfter := s.mockAPI + fmAfter := newFundManager(mockAPIAfter, s.ds) + // stm: @MARKET_FUND_MANAGER_START_001 + err = fmAfter.Start(ctx) + require.NoError(t, err) + + amt3 := abi.NewTokenAmount(9) + reserveSentinel := make(chan cid.Cid) + go func() { + // Address 2: Reserve 9 + sentinel3, err := fmAfter.Reserve(s.ctx, s.walletAddr, acctAddr2, amt3) + require.NoError(t, err) + reserveSentinel <- sentinel3 + }() + + // Expect no message to be sent, because still waiting for previous + // message "Address 2: Reserve 7" to complete on-chain + select { + case <-reserveSentinel: + require.Fail(t, "Expected no message to be sent") + case <-time.After(10 * time.Millisecond): + } + + // Complete "Address 2: Reserve 7" + mockAPIAfter.completeMsg(sentinelAddr2Res7) + + // Expect waiting message to now be sent + sentinel3 := <-reserveSentinel + msg3 := mockAPIAfter.getSentMessage(sentinel3) + checkAddMessageFields(t, msg3, s.walletAddr, acctAddr2, amt3) +} + +// TestFundManagerReleaseAfterPublish verifies that release is successful in +// the following scenario: +// 1. Deal A adds 5 to addr1: reserved 0 -> 5 available 0 -> 5 +// 2. Deal B adds 7 to addr1: reserved 5 -> 12 available 5 -> 12 +// 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5 +// 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5 +func TestFundManagerReleaseAfterPublish(t *testing.T) { + tf.UnitTest(t) + s := setup(t) + defer s.fm.Stop() + + // Deal A: Reserve 5 + // balance: 0 -> 5 + // reserved: 0 -> 5 + amt := abi.NewTokenAmount(5) + sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + s.mockAPI.completeMsg(sentinel) + + // Deal B: Reserve 7 + // balance: 5 -> 12 + // reserved: 5 -> 12 + amt = abi.NewTokenAmount(7) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + s.mockAPI.completeMsg(sentinel) + + // Deal B: Publish (removes Deal B amount from balance) + // balance: 12 -> 5 + // reserved: 12 + amt = abi.NewTokenAmount(7) + s.mockAPI.publish(s.acctAddr, amt) + + // Deal A: Release 5 + // balance: 5 + // reserved: 12 -> 7 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Deal B: Release 7 + // balance: 5 + // reserved: 12 -> 7 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) +} + +type scaffold struct { + ctx context.Context + ds *ds_sync.MutexDatastore + wllt *wallet.Wallet + walletAddr address.Address + acctAddr address.Address + mockAPI *mockFundManagerAPI + fm *FundManager +} + +func setup(t *testing.T) *scaffold { + ctx := context.Background() + t.Log("create a backend") + ds := datastore.NewMapDatastore() + fs, err := wallet.NewDSBackend(ctx, ds, config.TestPassphraseConfig(), wallet.TestPassword) + assert.NoError(t, err) + t.Log("create a wallet with a single backend") + wllt := wallet.New(fs) + walletAddr, err := wllt.NewAddress(ctx, address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + acctAddr := tutils.NewActorAddr(t, "addr") + mockAPI := newMockFundManagerAPI(walletAddr) + dstore := ds_sync.MutexWrap(ds) + fm := newFundManager(mockAPI, dstore) + return &scaffold{ + ctx: ctx, + ds: dstore, + wllt: wllt, + walletAddr: walletAddr, + acctAddr: acctAddr, + mockAPI: mockAPI, + fm: fm, + } +} + +func checkAddMessageFields(t *testing.T, msg *types.Message, from address.Address, to address.Address, amt abi.TokenAmount) { + require.Equal(t, from, msg.From) + require.Equal(t, market.Address, msg.To) + require.Equal(t, amt, msg.Value) + + var paramsTo address.Address + err := paramsTo.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Equal(t, to, paramsTo) +} + +func checkWithdrawMessageFields(t *testing.T, msg *types.Message, from address.Address, addr address.Address, amt abi.TokenAmount) { + require.Equal(t, from, msg.From) + require.Equal(t, market.Address, msg.To) + require.Equal(t, abi.NewTokenAmount(0), msg.Value) + + var params types.MarketWithdrawBalanceParams + err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Equal(t, addr, params.ProviderOrClientAddress) + require.Equal(t, amt, params.Amount) +} + +type sentMsg struct { + msg *types.SignedMessage + ready chan struct{} +} + +type mockFundManagerAPI struct { + wallet address.Address + + lk sync.Mutex + escrow map[address.Address]abi.TokenAmount + sentMsgs map[cid.Cid]*sentMsg + completedMsgs map[cid.Cid]struct{} + waitingFor map[cid.Cid]chan struct{} +} + +func newMockFundManagerAPI(wallet address.Address) *mockFundManagerAPI { + return &mockFundManagerAPI{ + wallet: wallet, + escrow: make(map[address.Address]abi.TokenAmount), + sentMsgs: make(map[cid.Cid]*sentMsg), + completedMsgs: make(map[cid.Cid]struct{}), + waitingFor: make(map[cid.Cid]chan struct{}), + } +} + +func (mapi *mockFundManagerAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + smsg := &types.SignedMessage{Message: *msg} + smsgCid := smsg.Cid() + mapi.sentMsgs[smsgCid] = &sentMsg{msg: smsg, ready: make(chan struct{})} + + return smsg, nil +} + +func (mapi *mockFundManagerAPI) getSentMessage(c cid.Cid) *types.Message { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + for i := 0; i < 1000; i++ { + if pending, ok := mapi.sentMsgs[c]; ok { + return &pending.msg.Message + } + time.Sleep(time.Millisecond) + } + panic("expected message to be sent") +} + +func (mapi *mockFundManagerAPI) messageCount() int { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + return len(mapi.sentMsgs) +} + +func (mapi *mockFundManagerAPI) completeMsg(msgCid cid.Cid) { + mapi.lk.Lock() + + pmsg, ok := mapi.sentMsgs[msgCid] + if ok { + if pmsg.msg.Message.Method == market.Methods.AddBalance { + var escrowAcct address.Address + err := escrowAcct.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params)) + if err != nil { + panic(err) + } + + escrow := mapi.getEscrow(escrowAcct) + before := escrow + escrow = big.Add(escrow, pmsg.msg.Message.Value) + mapi.escrow[escrowAcct] = escrow + log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow) + } else { + var params types.MarketWithdrawBalanceParams + err := params.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params)) + if err != nil { + panic(err) + } + escrowAcct := params.ProviderOrClientAddress + + escrow := mapi.getEscrow(escrowAcct) + before := escrow + escrow = big.Sub(escrow, params.Amount) + mapi.escrow[escrowAcct] = escrow + log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow) + } + } + + mapi.completedMsgs[msgCid] = struct{}{} + + ready, ok := mapi.waitingFor[msgCid] + + mapi.lk.Unlock() + + if ok { + close(ready) + } +} + +func (mapi *mockFundManagerAPI) StateMarketBalance(ctx context.Context, address address.Address, tsk types.TipSetKey) (types.MarketBalance, error) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + return types.MarketBalance{ + Locked: abi.NewTokenAmount(0), + Escrow: mapi.getEscrow(address), + }, nil +} + +func (mapi *mockFundManagerAPI) getEscrow(a address.Address) abi.TokenAmount { + escrow := mapi.escrow[a] + if escrow.Nil() { + return abi.NewTokenAmount(0) + } + return escrow +} + +func (mapi *mockFundManagerAPI) publish(addr address.Address, amt abi.TokenAmount) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + escrow := mapi.escrow[addr] + if escrow.Nil() { + return + } + escrow = big.Sub(escrow, amt) + if escrow.LessThan(abi.NewTokenAmount(0)) { + escrow = abi.NewTokenAmount(0) + } + mapi.escrow[addr] = escrow +} + +func (mapi *mockFundManagerAPI) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64, limit abi.ChainEpoch, allwoReplaced bool) (*types.MsgLookup, error) { + res := &types.MsgLookup{ + Message: c, + Receipt: types.MessageReceipt{ + ExitCode: 0, + Return: nil, + }, + } + ready := make(chan struct{}) + + mapi.lk.Lock() + _, ok := mapi.completedMsgs[c] + if !ok { + mapi.waitingFor[c] = ready + } + mapi.lk.Unlock() + + if !ok { + select { + case <-ctx.Done(): + case <-ready: + } + } + return res, nil +} diff --git a/pkg/market/store.go b/pkg/market/store.go new file mode 100644 index 0000000000..16a37c60e0 --- /dev/null +++ b/pkg/market/store.go @@ -0,0 +1,93 @@ +package market + +import ( + "bytes" + "context" + + "github.com/filecoin-project/venus/pkg/repo" + + cborrpc "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + + "github.com/filecoin-project/go-address" +) + +const dsKeyAddr = "Addr" + +type Store struct { + ds datastore.Batching +} + +// for test +func newStore(ds repo.Datastore) *Store { + ds = namespace.Wrap(ds, datastore.NewKey("/fundmgr/")) + return &Store{ + ds: ds, + } +} + +// save the state to the datastore +func (ps *Store) save(ctx context.Context, state *FundedAddressState) error { + k := dskeyForAddr(state.Addr) + + b, err := cborrpc.Dump(state) + if err != nil { + return err + } + + return ps.ds.Put(ctx, k, b) +} + +// get the state for the given address +// nolint +func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) { + k := dskeyForAddr(addr) + + data, err := ps.ds.Get(ctx, k) + if err != nil { + return nil, err + } + + var state FundedAddressState + err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state) + if err != nil { + return nil, err + } + return &state, nil +} + +// forEach calls iter with each address in the datastore +func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr}) + if err != nil { + return err + } + defer res.Close() //nolint:errcheck + + for { + res, ok := res.NextSync() + if !ok { + break + } + + if res.Error != nil { + return err + } + + var stored FundedAddressState + if err := stored.UnmarshalCBOR(bytes.NewReader(res.Value)); err != nil { + return err + } + + iter(&stored) + } + + return nil +} + +// The datastore key used to identify the address state +func dskeyForAddr(addr address.Address) datastore.Key { + return datastore.KeyWithNamespaces([]string{dsKeyAddr, addr.String()}) +} diff --git a/pkg/messagepool/block_proba.go b/pkg/messagepool/block_proba.go new file mode 100644 index 0000000000..89d7fb4c4b --- /dev/null +++ b/pkg/messagepool/block_proba.go @@ -0,0 +1,106 @@ +package messagepool + +import ( + "math" + "sync" +) + +var ( + noWinnersProbCache []float64 + noWinnersProbOnce sync.Once +) + +func noWinnersProb() []float64 { + noWinnersProbOnce.Do(func() { + poissPdf := func(x float64) float64 { + const Mu = 5 + lg, _ := math.Lgamma(x + 1) + result := math.Exp((math.Log(Mu) * x) - lg - Mu) + return result + } + + out := make([]float64, 0, MaxBlocks) + for i := 0; i < MaxBlocks; i++ { + out = append(out, poissPdf(float64(i))) + } + noWinnersProbCache = out + }) + return noWinnersProbCache +} + +var ( + noWinnersProbAssumingCache []float64 + noWinnersProbAssumingOnce sync.Once +) + +func noWinnersProbAssumingMoreThanOne() []float64 { + noWinnersProbAssumingOnce.Do(func() { + cond := math.Log(-1 + math.Exp(5)) + poissPdf := func(x float64) float64 { + const Mu = 5 + lg, _ := math.Lgamma(x + 1) + result := math.Exp((math.Log(Mu) * x) - lg - cond) + return result + } + + out := make([]float64, 0, MaxBlocks) + for i := 0; i < MaxBlocks; i++ { + out = append(out, poissPdf(float64(i+1))) + } + noWinnersProbAssumingCache = out + }) + return noWinnersProbAssumingCache +} + +func binomialCoefficient(n, k float64) float64 { + if k > n { + return math.NaN() + } + r := 1.0 + for d := 1.0; d <= k; d++ { + r *= n + r /= d + n-- + } + return r +} + +func (mp *MessagePool) blockProbabilities(tq float64) []float64 { + noWinners := noWinnersProbAssumingMoreThanOne() + + p := 1 - tq + binoPdf := func(x, trials float64) float64 { + // based on https://github.com/atgjack/prob + if x > trials { + return 0 + } + if p == 0 { + if x == 0 { + return 1.0 + } + return 0.0 + } + if p == 1 { + if x == trials { + return 1.0 + } + return 0.0 + } + coef := binomialCoefficient(trials, x) + pow := math.Pow(p, x) * math.Pow(1-p, trials-x) + if math.IsInf(coef, 0) { + return 0 + } + return coef * pow + } + + out := make([]float64, 0, MaxBlocks) + for place := 0; place < MaxBlocks; place++ { + var pPlace float64 + for otherWinners, pCase := range noWinners { + pPlace += pCase * binoPdf(float64(place), float64(otherWinners)) + } + out = append(out, pPlace) + } + return out +} diff --git a/pkg/messagepool/block_proba_test.go b/pkg/messagepool/block_proba_test.go new file mode 100644 index 0000000000..1e0bac14b8 --- /dev/null +++ b/pkg/messagepool/block_proba_test.go @@ -0,0 +1,48 @@ +package messagepool + +import ( + "math" + "math/rand" + "testing" + "time" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestBlockProbability(t *testing.T) { + tf.UnitTest(t) + + mp := &MessagePool{} + bp := mp.blockProbabilities(1 - 0.15) + t.Logf("%+v\n", bp) + for i := 0; i < len(bp)-1; i++ { + if bp[i] < bp[i+1] { + t.Fatalf("expected decreasing block probabilities for this quality: %d %f %f", + i, bp[i], bp[i+1]) + } + } +} + +func TestWinnerProba(t *testing.T) { + tf.UnitTest(t) + + rand.Seed(time.Now().UnixNano()) + const N = 1000000 + winnerProba := noWinnersProb() + sum := 0 + for i := 0; i < N; i++ { + minersRand := rand.Float64() + j := 0 + for ; j < MaxBlocks; j++ { + minersRand -= winnerProba[j] + if minersRand < 0 { + break + } + } + sum += j + } + + if avg := float64(sum) / N; math.Abs(avg-5) > 0.01 { + t.Fatalf("avg too far off: %f", avg) + } +} diff --git a/pkg/messagepool/check.go b/pkg/messagepool/check.go new file mode 100644 index 0000000000..9ed1c35899 --- /dev/null +++ b/pkg/messagepool/check.go @@ -0,0 +1,429 @@ +package messagepool + +import ( + "context" + "fmt" + stdbig "math/big" + "sort" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var baseFeeUpperBoundFactor = types.NewInt(10) + +// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool +func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*types.MessagePrototype) ([][]types.MessageCheckStatus, error) { + flex := make([]bool, len(protos)) + msgs := make([]*types.Message, len(protos)) + for i, p := range protos { + flex[i] = !p.ValidNonce + msgs[i] = &p.Message + } + return mp.checkMessages(ctx, msgs, false, flex) +} + +// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor +func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]types.MessageCheckStatus, error) { + var msgs []*types.Message + mp.lk.Lock() + mset, ok := mp.pending[from] + if ok { + for _, sm := range mset.msgs { + msgs = append(msgs, &sm.Message) + } + } + mp.lk.Unlock() + + if len(msgs) == 0 { + return nil, nil + } + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Nonce < msgs[j].Nonce + }) + + return mp.checkMessages(ctx, msgs, true, nil) +} + +// CheckReplaceMessages performs a set of logical checks for related messages while performing a +// replacement. +func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*types.Message) ([][]types.MessageCheckStatus, error) { + msgMap := make(map[address.Address]map[uint64]*types.Message) + count := 0 + + mp.lk.Lock() + for _, m := range replace { + mmap, ok := msgMap[m.From] + if !ok { + mmap = make(map[uint64]*types.Message) + msgMap[m.From] = mmap + mset, ok := mp.pending[m.From] + if ok { + count += len(mset.msgs) + for _, sm := range mset.msgs { + mmap[sm.Message.Nonce] = &sm.Message + } + } else { + count++ + } + } + mmap[m.Nonce] = m + } + mp.lk.Unlock() + + msgs := make([]*types.Message, 0, count) + start := 0 + for _, mmap := range msgMap { + end := start + len(mmap) + + for _, m := range mmap { + msgs = append(msgs, m) + } + + sort.Slice(msgs[start:end], func(i, j int) bool { + return msgs[start+i].Nonce < msgs[start+j].Nonce + }) + + start = end + } + + return mp.checkMessages(ctx, msgs, true, nil) +} + +// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index +// has non-determied nonce at this point +func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]types.MessageCheckStatus, err error) { + if mp.api.IsLite() { + return nil, nil + } + mp.curTSLk.Lock() + curTS := mp.curTS + mp.curTSLk.Unlock() + + epoch := curTS.Height() + 1 + + var baseFee big.Int + if len(curTS.Blocks()) > 0 { + baseFee = curTS.Blocks()[0].ParentBaseFee + } else { + baseFee, err = mp.api.ChainComputeBaseFee(context.Background(), curTS) + if err != nil { + return nil, fmt.Errorf("error computing basefee: %w", err) + } + } + + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) + baseFeeUpperBound := types.BigMul(baseFee, baseFeeUpperBoundFactor) + + type actorState struct { + nextNonce uint64 + requiredFunds *stdbig.Int + } + + state := make(map[address.Address]*actorState) + balances := make(map[address.Address]big.Int) + + result = make([][]types.MessageCheckStatus, len(msgs)) + + for i, m := range msgs { + // pre-check: actor nonce + check := types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageGetStateNonce, + }, + } + + st, ok := state[m.From] + if !ok { + mp.lk.Lock() + mset, ok := mp.pending[m.From] + if ok && !interned { + st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds} + for _, m := range mset.msgs { + st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.Message.Value.Int) + } + state[m.From] = st + mp.lk.Unlock() + + check.OK = true + check.Hint = map[string]interface{}{ + "nonce": st.nextNonce, + } + } else { + mp.lk.Unlock() + + stateNonce, err := mp.getStateNonce(ctx, m.From, curTS) + if err != nil { + check.OK = false + check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error()) + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "nonce": stateNonce, + } + } + + st = &actorState{nextNonce: stateNonce, requiredFunds: new(stdbig.Int)} + state[m.From] = st + } + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + continue + } + + // pre-check: actor balance + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageGetStateBalance, + }, + } + + balance, ok := balances[m.From] + if !ok { + balance, err = mp.getStateBalance(ctx, m.From, curTS) + if err != nil { + check.OK = false + check.Err = fmt.Sprintf("error retrieving state balance: %s", err) + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "balance": balance, + } + } + + balances[m.From] = balance + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "balance": balance, + } + } + + result[i] = append(result[i], check) + if !check.OK { + continue + } + + // 1. Serialization + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageSerialize, + }, + } + + bytes, err := m.Serialize() + if err != nil { + check.OK = false + check.Err = err.Error() + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 2. Message size + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageSize, + }, + } + + if len(bytes) > MaxMessageSize-128 { // 128 bytes to account for signature size + check.OK = false + check.Err = "message too big" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 3. Syntactic validation + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageValidity, + }, + } + + nv := mp.sm.GetNetworkVersion(ctx, epoch) + if err := m.ValidForBlockInclusion(0, nv); err != nil { + check.OK = false + check.Err = fmt.Sprintf("syntactically invalid message: %s", err.Error()) + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + // skip remaining checks if it is a syntatically invalid message + continue + } + + // gas checks + + // 4. Min Gas + minGas := gas.NewPricesSchedule(mp.forkParams).PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) + + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageMinGas, + Hint: map[string]interface{}{ + "minGas": minGas, + }, + }, + } + + if m.GasLimit < minGas.Total() { + check.OK = false + check.Err = "GasLimit less than epoch minimum gas" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 5. Min Base Fee + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageMinBaseFee, + }, + } + + if m.GasFeeCap.LessThan(minimumBaseFee) { + check.OK = false + check.Err = "GasFeeCap less than minimum base fee" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + goto checkState + } + + // 6. Base Fee + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageBaseFee, + Hint: map[string]interface{}{ + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFee) { + check.OK = false + check.Err = "GasFeeCap less than current base fee" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 7. Base Fee lower bound + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageBaseFeeLowerBound, + Hint: map[string]interface{}{ + "baseFeeLowerBound": baseFeeLowerBound, + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFeeLowerBound) { + check.OK = false + check.Err = "GasFeeCap less than base fee lower bound for inclusion in next 20 epochs" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 8. Base Fee upper bound + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageBaseFeeUpperBound, + Hint: map[string]interface{}{ + "baseFeeUpperBound": baseFeeUpperBound, + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFeeUpperBound) { + check.OK = true // on purpose, the checks is more of a warning + check.Err = "GasFeeCap less than base fee upper bound for inclusion in next 20 epochs" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // stateful checks + checkState: + // 9. Message Nonce + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageNonce, + Hint: map[string]interface{}{ + "nextNonce": st.nextNonce, + }, + }, + } + + if (flexibleNonces == nil || !flexibleNonces[i]) && st.nextNonce != m.Nonce { + check.OK = false + check.Err = fmt.Sprintf("message nonce doesn't match next nonce (%d)", st.nextNonce) + } else { + check.OK = true + st.nextNonce++ + } + + result[i] = append(result[i], check) + + // check required funds -vs- balance + st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.RequiredFunds().Int) + st.requiredFunds.Add(st.requiredFunds, m.Value.Int) + + // 10. Balance + check = types.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: types.CheckStatus{ + Code: types.CheckStatusMessageBalance, + Hint: map[string]interface{}{ + "requiredFunds": big.Int{Int: stdbig.NewInt(0).Set(st.requiredFunds)}, + }, + }, + } + + if balance.Int.Cmp(st.requiredFunds) < 0 { + check.OK = false + check.Err = "insufficient balance" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + } + + return result, nil +} diff --git a/pkg/messagepool/config.go b/pkg/messagepool/config.go new file mode 100644 index 0000000000..a54e313404 --- /dev/null +++ b/pkg/messagepool/config.go @@ -0,0 +1,109 @@ +package messagepool + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/ipfs/go-datastore" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/repo" +) + +var ( + ReplaceByFeeRatioDefault = 1.25 + MemPoolSizeLimitHiDefault = 30000 + MemPoolSizeLimitLoDefault = 20000 + PruneCooldownDefault = time.Minute + GasLimitOverestimation = 1.25 + + ConfigKey = datastore.NewKey("/mpool/config") +) + +type MpoolConfig struct { + PriorityAddrs []address.Address + SizeLimitHigh int + SizeLimitLow int + ReplaceByFeeRatio float64 + PruneCooldown time.Duration + GasLimitOverestimation float64 +} + +func (mc *MpoolConfig) Clone() *MpoolConfig { + r := new(MpoolConfig) + *r = *mc + return r +} + +func loadConfig(ctx context.Context, ds repo.Datastore) (*MpoolConfig, error) { + haveCfg, err := ds.Has(ctx, ConfigKey) + if err != nil { + return nil, err + } + + if !haveCfg { + return DefaultConfig(), nil + } + + cfgBytes, err := ds.Get(ctx, ConfigKey) + if err != nil { + return nil, err + } + cfg := new(MpoolConfig) + err = json.Unmarshal(cfgBytes, cfg) + return cfg, err +} + +func saveConfig(ctx context.Context, cfg *MpoolConfig, ds repo.Datastore) error { + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return err + } + return ds.Put(ctx, ConfigKey, cfgBytes) +} + +func (mp *MessagePool) GetConfig() *MpoolConfig { + mp.cfgLk.Lock() + defer mp.cfgLk.Unlock() + return mp.cfg.Clone() +} + +func validateConfg(cfg *MpoolConfig) error { + if cfg.ReplaceByFeeRatio < ReplaceByFeeRatioDefault { + return fmt.Errorf("'ReplaceByFeeRatio' is less than required %f < %f", + cfg.ReplaceByFeeRatio, ReplaceByFeeRatioDefault) + } + if cfg.GasLimitOverestimation < 1 { + return fmt.Errorf("'GasLimitOverestimation' cannot be less than 1") + } + return nil +} + +func (mp *MessagePool) SetConfig(ctx context.Context, cfg *MpoolConfig) error { + if err := validateConfg(cfg); err != nil { + return err + } + cfg = cfg.Clone() + + mp.cfgLk.Lock() + mp.cfg = cfg + err := saveConfig(ctx, cfg, mp.ds) + if err != nil { + log.Warnf("error persisting mpool config: %s", err) + } + mp.cfgLk.Unlock() + + return nil +} + +func DefaultConfig() *MpoolConfig { + return &MpoolConfig{ + SizeLimitHigh: MemPoolSizeLimitHiDefault, + SizeLimitLow: MemPoolSizeLimitLoDefault, + ReplaceByFeeRatio: ReplaceByFeeRatioDefault, + PruneCooldown: PruneCooldownDefault, + GasLimitOverestimation: GasLimitOverestimation, + } +} diff --git a/pkg/messagepool/gas.go b/pkg/messagepool/gas.go new file mode 100644 index 0000000000..68f2f3641a --- /dev/null +++ b/pkg/messagepool/gas.go @@ -0,0 +1,439 @@ +package messagepool + +import ( + "context" + "errors" + "fmt" + "math" + stdbig "math/big" + "math/rand" + "sort" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + lru "github.com/hashicorp/golang-lru" + + builtin2 "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" +) + +const MinGasPremium = 100e3 + +// const MaxSpendOnFeeDenom = 100 + +type GasPriceCache struct { + c *lru.TwoQueueCache +} + +type GasMeta struct { + Price big.Int + Limit int64 +} + +func NewGasPriceCache() *GasPriceCache { + // 50 because we usually won't access more than 40 + c, err := lru.New2Q(50) + if err != nil { + // err only if parameter is bad + panic(err) + } + + return &GasPriceCache{ + c: c, + } +} + +func (g *GasPriceCache) GetTSGasStats(ctx context.Context, provider Provider, ts *types.TipSet) ([]GasMeta, error) { + i, has := g.c.Get(ts.Key()) + if has { + return i.([]GasMeta), nil + } + + var prices []GasMeta + msgs, err := provider.MessagesForTipset(ctx, ts) + if err != nil { + return nil, fmt.Errorf("loading messages: %w", err) + } + for _, msg := range msgs { + prices = append(prices, GasMeta{ + Price: msg.VMMessage().GasPremium, + Limit: msg.VMMessage().GasLimit, + }) + } + + g.c.Add(ts.Key(), prices) + + return prices, nil +} + +func (mp *MessagePool) GasEstimateFeeCap( + ctx context.Context, + msg *types.Message, + maxqueueblks int64, + tsk types.TipSetKey, +) (big.Int, error) { + ts, err := mp.api.ChainHead(ctx) + if err != nil { + return types.NewGasFeeCap(0), err + } + + parentBaseFee := ts.Blocks()[0].ParentBaseFee + increaseFactor := math.Pow(1.+1./float64(constants.BaseFeeMaxChangeDenom), float64(maxqueueblks)) + + feeInFuture := types.BigMul(parentBaseFee, types.NewInt(uint64(increaseFactor*(1<<8)))) + out := types.BigDiv(feeInFuture, types.NewInt(1<<8)) + + if msg.GasPremium != types.EmptyInt { + out = types.BigAdd(out, msg.GasPremium) + } + + return out, nil +} + +// finds 55th percntile instead of median to put negative pressure on gas price +func medianGasPremium(prices []GasMeta, blocks int) abi.TokenAmount { + sort.Slice(prices, func(i, j int) bool { + // sort desc by price + return prices[i].Price.GreaterThan(prices[j].Price) + }) + + at := constants.BlockGasTarget * int64(blocks) / 2 + at += constants.BlockGasTarget * int64(blocks) / (2 * 20) // move 5% further + prev1, prev2 := big.Zero(), big.Zero() + for _, price := range prices { + prev1, prev2 = price.Price, prev1 + at -= price.Limit + if at < 0 { + break + } + } + + premium := prev1 + if prev2.Sign() != 0 { + premium = big.Div(big.Add(prev1, prev2), big.NewInt(2)) + } + + return premium +} + +func (mp *MessagePool) GasEstimateGasPremium( + ctx context.Context, + nblocksincl uint64, + sender address.Address, + gaslimit int64, + _ types.TipSetKey, + cache *GasPriceCache, +) (big.Int, error) { + if nblocksincl == 0 { + nblocksincl = 1 + } + + var prices []GasMeta + var blocks int + + ts, err := mp.api.ChainHead(ctx) + if err != nil { + return big.Int{}, err + } + + for i := uint64(0); i < nblocksincl*2; i++ { + if ts.Height() == 0 { + break // genesis + } + + pts, err := mp.api.LoadTipSet(ctx, ts.Parents()) + if err != nil { + return types.BigInt{}, err + } + + blocks += len(pts.Blocks()) + meta, err := cache.GetTSGasStats(ctx, mp.api, pts) + if err != nil { + return types.BigInt{}, err + } + prices = append(prices, meta...) + + ts = pts + } + + premium := medianGasPremium(prices, blocks) + + if big.Cmp(premium, big.NewInt(MinGasPremium)) < 0 { + switch nblocksincl { + case 1: + premium = big.NewInt(2 * MinGasPremium) + case 2: + premium = big.NewInt(1.5 * MinGasPremium) + default: + premium = big.NewInt(MinGasPremium) + } + } + + // add some noise to normalize behaviour of message selection + const precision = 32 + // mean 1, stddev 0.005 => 95% within +-1% + noise := 1 + rand.NormFloat64()*0.005 + premium = types.BigMul(premium, types.NewInt(uint64(noise*(1<> 10 + + // Special case for PaymentChannel collect, which is deleting actor + // We ignore errors in this special case since they CAN occur, + // and we just want to detect existing payment channel actors + _, st, err := mp.sm.ParentState(ctx, ts) + if err == nil { + act, found, err := st.GetActor(ctx, msg.To) + if err == nil && found && builtin.IsPaymentChannelActor(act.Code) && msgIn.Method == builtin2.MethodsPaych.Collect { + // add the refunded gas for DestroyActor back into the gas used + ret += 76e3 + } + } + + return ret, nil +} + +func (mp *MessagePool) GasEstimateMessageGas(ctx context.Context, estimateMessage *types.EstimateMessage, _ types.TipSetKey) (*types.Message, error) { + if estimateMessage == nil || estimateMessage.Msg == nil { + return nil, fmt.Errorf("estimate message is nil") + } + log.Debugf("call GasEstimateMessageGas %v, send spec: %v", estimateMessage.Msg, estimateMessage.Spec) + if estimateMessage.Msg.GasLimit == 0 { + gasLimit, err := mp.GasEstimateGasLimit(ctx, estimateMessage.Msg, types.TipSetKey{}) + if err != nil { + return nil, fmt.Errorf("estimating gas used: %w", err) + } + gasLimitOverestimation := mp.GetConfig().GasLimitOverestimation + if estimateMessage.Spec != nil && estimateMessage.Spec.GasOverEstimation > 0 { + gasLimitOverestimation = estimateMessage.Spec.GasOverEstimation + } + estimateMessage.Msg.GasLimit = int64(float64(gasLimit) * gasLimitOverestimation) + } + + if estimateMessage.Msg.GasPremium == types.EmptyInt || types.BigCmp(estimateMessage.Msg.GasPremium, types.NewInt(0)) == 0 { + gasPremium, err := mp.GasEstimateGasPremium(ctx, 10, estimateMessage.Msg.From, estimateMessage.Msg.GasLimit, types.TipSetKey{}, mp.PriceCache) + if err != nil { + return nil, fmt.Errorf("estimating gas price: %w", err) + } + if estimateMessage.Spec != nil && estimateMessage.Spec.GasOverPremium > 0 { + olgGasPremium := gasPremium + newGasPremium, _ := new(stdbig.Float).Mul(new(stdbig.Float).SetInt(stdbig.NewInt(gasPremium.Int64())), stdbig.NewFloat(estimateMessage.Spec.GasOverPremium)).Int(nil) + gasPremium = big.NewFromGo(newGasPremium) + log.Debugf("call GasEstimateMessageGas old premium %v, new premium %v, premium ration %f", olgGasPremium, newGasPremium, estimateMessage.Spec.GasOverPremium) + } + estimateMessage.Msg.GasPremium = gasPremium + } + + if estimateMessage.Msg.GasFeeCap == types.EmptyInt || types.BigCmp(estimateMessage.Msg.GasFeeCap, types.NewInt(0)) == 0 { + feeCap, err := mp.GasEstimateFeeCap(ctx, estimateMessage.Msg, 20, types.EmptyTSK) + if err != nil { + return nil, fmt.Errorf("estimating fee cap: %w", err) + } + estimateMessage.Msg.GasFeeCap = feeCap + } + + CapGasFee(mp.GetMaxFee, estimateMessage.Msg, estimateMessage.Spec) + + return estimateMessage.Msg, nil +} + +func (mp *MessagePool) GasBatchEstimateMessageGas(ctx context.Context, estimateMessages []*types.EstimateMessage, fromNonce uint64, tsk types.TipSetKey) ([]*types.EstimateResult, error) { + if len(estimateMessages) == 0 { + return nil, errors.New("estimate messages are empty") + } + + // ChainTipSet will determine if tsk is empty + currTS, err := mp.api.ChainTipSet(ctx, tsk) + if err != nil { + return nil, fmt.Errorf("getting tipset: %w", err) + } + + fromA, err := mp.sm.ResolveToKeyAddress(ctx, estimateMessages[0].Msg.From, currTS) + if err != nil { + return nil, fmt.Errorf("getting key address: %w", err) + } + + pending, ts := mp.PendingFor(ctx, fromA) + priorMsgs := make([]types.ChainMsg, 0, len(pending)) + for _, m := range pending { + priorMsgs = append(priorMsgs, m) + } + + var estimateResults []*types.EstimateResult + for _, estimateMessage := range estimateMessages { + estimateMsg := estimateMessage.Msg + estimateMsg.Nonce = fromNonce + + log.Debugf("call GasBatchEstimateMessageGas msg %v, spec %v", estimateMsg, estimateMessage.Spec) + + if estimateMsg.GasLimit == 0 { + gasUsed, err := mp.evalMessageGasLimit(ctx, estimateMsg, priorMsgs, ts) + if err != nil { + estimateMsg.Nonce = 0 + estimateResults = append(estimateResults, &types.EstimateResult{ + Msg: estimateMsg, + Err: fmt.Sprintf("estimating gas limit: %v", err), + }) + continue + } + estimateMsg.GasLimit = int64(float64(gasUsed) * estimateMessage.Spec.GasOverEstimation) + } + + if estimateMsg.GasPremium == types.EmptyInt || types.BigCmp(estimateMsg.GasPremium, types.NewInt(0)) == 0 { + gasPremium, err := mp.GasEstimateGasPremium(ctx, 10, estimateMsg.From, estimateMsg.GasLimit, types.TipSetKey{}, mp.PriceCache) + if err != nil { + estimateMsg.Nonce = 0 + estimateResults = append(estimateResults, &types.EstimateResult{ + Msg: estimateMsg, + Err: fmt.Sprintf("estimating gas premium: %v", err), + }) + continue + } + if estimateMessage.Spec != nil && estimateMessage.Spec.GasOverPremium > 0 { + olgGasPremium := gasPremium + newGasPremium, _ := new(stdbig.Float).Mul(new(stdbig.Float).SetInt(stdbig.NewInt(gasPremium.Int64())), stdbig.NewFloat(estimateMessage.Spec.GasOverPremium)).Int(nil) + gasPremium = big.NewFromGo(newGasPremium) + log.Debugf("call GasBatchEstimateMessageGas old premium %v, new premium %v, premium ration %f", olgGasPremium, newGasPremium, estimateMessage.Spec.GasOverPremium) + } + estimateMsg.GasPremium = gasPremium + } + + if estimateMsg.GasFeeCap == types.EmptyInt || types.BigCmp(estimateMsg.GasFeeCap, types.NewInt(0)) == 0 { + feeCap, err := mp.GasEstimateFeeCap(ctx, estimateMsg, 20, types.EmptyTSK) + if err != nil { + estimateMsg.Nonce = 0 + estimateResults = append(estimateResults, &types.EstimateResult{ + Msg: estimateMsg, + Err: fmt.Sprintf("estimating fee cap: %v", err), + }) + continue + } + estimateMsg.GasFeeCap = feeCap + } + + CapGasFee(mp.GetMaxFee, estimateMsg, estimateMessage.Spec) + + estimateResults = append(estimateResults, &types.EstimateResult{ + Msg: estimateMsg, + }) + priorMsgs = append(priorMsgs, estimateMsg) + fromNonce++ + } + return estimateResults, nil +} diff --git a/pkg/messagepool/gasguess/guessgas.go b/pkg/messagepool/gasguess/guessgas.go new file mode 100644 index 0000000000..338dd35003 --- /dev/null +++ b/pkg/messagepool/gasguess/guessgas.go @@ -0,0 +1,101 @@ +package gasguess + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" +) + +type ActorLookup func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) + +const ( + failedGasGuessRatio = 0.5 + failedGasGuessMax = 25_000_000 +) + +const ( + MinGas = 1298450 + MaxGas = 1600271356 +) + +type CostKey struct { + Code cid.Cid + M abi.MethodNum +} + +var Costs = map[CostKey]int64{ + {builtin0.InitActorCodeID, 2}: 8916753, + {builtin0.StorageMarketActorCodeID, 2}: 6955002, + {builtin0.StorageMarketActorCodeID, 4}: 245436108, + {builtin0.StorageMinerActorCodeID, 4}: 2315133, + {builtin0.StorageMinerActorCodeID, 5}: 1600271356, + {builtin0.StorageMinerActorCodeID, 6}: 22864493, + {builtin0.StorageMinerActorCodeID, 7}: 142002419, + {builtin0.StorageMinerActorCodeID, 10}: 23008274, + {builtin0.StorageMinerActorCodeID, 11}: 19303178, + {builtin0.StorageMinerActorCodeID, 14}: 566356835, + {builtin0.StorageMinerActorCodeID, 16}: 5325185, + {builtin0.StorageMinerActorCodeID, 18}: 2328637, + {builtin0.StoragePowerActorCodeID, 2}: 23600956, + // TODO: Just reuse v0 values for now, this isn't actually used + {builtin2.InitActorCodeID, 2}: 8916753, + {builtin2.StorageMarketActorCodeID, 2}: 6955002, + {builtin2.StorageMarketActorCodeID, 4}: 245436108, + {builtin2.StorageMinerActorCodeID, 4}: 2315133, + {builtin2.StorageMinerActorCodeID, 5}: 1600271356, + {builtin2.StorageMinerActorCodeID, 6}: 22864493, + {builtin2.StorageMinerActorCodeID, 7}: 142002419, + {builtin2.StorageMinerActorCodeID, 10}: 23008274, + {builtin2.StorageMinerActorCodeID, 11}: 19303178, + {builtin2.StorageMinerActorCodeID, 14}: 566356835, + {builtin2.StorageMinerActorCodeID, 16}: 5325185, + {builtin2.StorageMinerActorCodeID, 18}: 2328637, + {builtin2.StoragePowerActorCodeID, 2}: 23600956, +} + +func failedGuess(msg *types.SignedMessage) int64 { + guess := int64(float64(msg.Message.GasLimit) * failedGasGuessRatio) + if guess > failedGasGuessMax { + guess = failedGasGuessMax + } + return guess +} + +func GuessGasUsed(ctx context.Context, tsk types.TipSetKey, msg *types.SignedMessage, al ActorLookup) (int64, error) { + // MethodSend is the same in all versions. + if msg.Message.Method == builtin.MethodSend { + switch msg.Message.From.Protocol() { + case address.BLS: + return 1298450, nil + case address.SECP256K1: + return 1385999, nil + default: + // who knows? + return 1298450, nil + } + } + + to, err := al(ctx, msg.Message.To, tsk) + if err != nil { + return failedGuess(msg), fmt.Errorf("could not lookup actor: %v", err) + } + + guess, ok := Costs[CostKey{to.Code, msg.Message.Method}] + if !ok { + return failedGuess(msg), fmt.Errorf("unknown code-method combo") + } + if guess > msg.Message.GasLimit { + guess = msg.Message.GasLimit + } + return guess, nil +} diff --git a/pkg/messagepool/journal/env.go b/pkg/messagepool/journal/env.go new file mode 100644 index 0000000000..af9e4bb297 --- /dev/null +++ b/pkg/messagepool/journal/env.go @@ -0,0 +1,19 @@ +package journal + +import ( + "os" +) + +// envJournalDisabledEvents is the environment variable through which disabled +// journal events can be customized. +const envDisabledEvents = "VENUS_JOURNAL_DISABLED_EVENTS" + +func EnvDisabledEvents() DisabledEvents { + if env, ok := os.LookupEnv(envDisabledEvents); ok { + if ret, err := ParseDisabledEvents(env); err == nil { + return ret + } + } + // fallback if env variable is not set, or if it failed to parse. + return DefaultDisabledEvents +} diff --git a/pkg/messagepool/journal/fs.go b/pkg/messagepool/journal/fs.go new file mode 100644 index 0000000000..da1dc84c96 --- /dev/null +++ b/pkg/messagepool/journal/fs.go @@ -0,0 +1,138 @@ +package journal + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/repo" +) + +const RFC3339nocolon = "2006-01-02T150405Z0700" + +// fsJournal is a basic journal backed by files on a filesystem. +type fsJournal struct { + EventTypeRegistry + + dir string + sizeLimit int64 + + fi *os.File + fSize int64 + + incoming chan *Event + + closing chan struct{} + closed chan struct{} +} + +// OpenFSJournal constructs a rolling filesystem journal, with a default +// per-file size limit of 1GiB. +func OpenFSJournal(lr repo.Repo, disabled DisabledEvents) (Journal, error) { + path, err := lr.Path() + if err != nil { + return nil, err + } + dir := filepath.Join(path, "journal") + if err := os.MkdirAll(dir, 0o755); err != nil { + return nil, fmt.Errorf("failed to mk directory %s for file journal: %w", dir, err) + } + + f := &fsJournal{ + EventTypeRegistry: NewEventTypeRegistry(disabled), + dir: dir, + sizeLimit: 1 << 30, + incoming: make(chan *Event, 32), + closing: make(chan struct{}), + closed: make(chan struct{}), + } + + if err := f.rollJournalFile(); err != nil { + return nil, err + } + + go f.runLoop() + + return f, nil +} + +func (f *fsJournal) RecordEvent(evtType EventType, supplier func() interface{}) { + defer func() { + if r := recover(); r != nil { + log.Warnf("recovered from panic while recording journal event; type=%s, err=%v", evtType, r) + } + }() + + if !evtType.Enabled() { + return + } + + je := &Event{ + EventType: evtType, + Timestamp: constants.Clock.Now(), + Data: supplier(), + } + select { + case f.incoming <- je: + case <-f.closing: + log.Warnw("journal closed but tried to log event", "event", je) + } +} + +func (f *fsJournal) Close() error { + close(f.closing) + <-f.closed + return nil +} + +func (f *fsJournal) putEvent(evt *Event) error { + b, err := json.Marshal(evt) + if err != nil { + return err + } + n, err := f.fi.Write(append(b, '\n')) + if err != nil { + return err + } + + f.fSize += int64(n) + + if f.fSize >= f.sizeLimit { + _ = f.rollJournalFile() + } + + return nil +} + +func (f *fsJournal) rollJournalFile() error { + if f.fi != nil { + _ = f.fi.Close() + } + + nfi, err := os.Create(filepath.Join(f.dir, fmt.Sprintf("lotus-journal-%s.ndjson", constants.Clock.Now().Format(RFC3339nocolon)))) + if err != nil { + return fmt.Errorf("failed to open journal file: %w", err) + } + + f.fi = nfi + f.fSize = 0 + return nil +} + +func (f *fsJournal) runLoop() { + defer close(f.closed) + + for { + select { + case je := <-f.incoming: + if err := f.putEvent(je); err != nil { + log.Errorw("failed to write out journal event", "event", je, "err", err) + } + case <-f.closing: + _ = f.fi.Close() + return + } + } +} diff --git a/pkg/messagepool/journal/nil.go b/pkg/messagepool/journal/nil.go new file mode 100644 index 0000000000..fa72fa373a --- /dev/null +++ b/pkg/messagepool/journal/nil.go @@ -0,0 +1,16 @@ +package journal + +type nilJournal struct{} + +// nilj is a singleton nil journal. +var nilj Journal = &nilJournal{} + +func NilJournal() Journal { + return nilj +} + +func (n *nilJournal) RegisterEventType(_, _ string) EventType { return EventType{} } + +func (n *nilJournal) RecordEvent(_ EventType, _ func() interface{}) {} + +func (n *nilJournal) Close() error { return nil } diff --git a/pkg/messagepool/journal/registry.go b/pkg/messagepool/journal/registry.go new file mode 100644 index 0000000000..e72b53a2e7 --- /dev/null +++ b/pkg/messagepool/journal/registry.go @@ -0,0 +1,56 @@ +package journal + +import "sync" + +// EventTypeRegistry is a component that constructs tracked EventType tokens, +// for usage with a Journal. +type EventTypeRegistry interface { + // RegisterEventType introduces a new event type to a journal, and + // returns an EventType token that components can later use to check whether + // journalling for that type is enabled/suppressed, and to tag journal + // entries appropriately. + RegisterEventType(system, event string) EventType +} + +// eventTypeRegistry is an embeddable mixin that takes care of tracking disabled +// event types, and returning initialized/safe EventTypes when requested. +type eventTypeRegistry struct { + sync.Mutex + + m map[string]EventType +} + +var _ EventTypeRegistry = (*eventTypeRegistry)(nil) + +func NewEventTypeRegistry(disabled DisabledEvents) EventTypeRegistry { + ret := &eventTypeRegistry{ + m: make(map[string]EventType, len(disabled)+32), // + extra capacity. + } + + for _, et := range disabled { + et.enabled, et.safe = false, true + ret.m[et.System+":"+et.Event] = et + } + + return ret +} + +func (d *eventTypeRegistry) RegisterEventType(system, event string) EventType { + d.Lock() + defer d.Unlock() + + key := system + ":" + event + if et, ok := d.m[key]; ok { + return et + } + + et := EventType{ + System: system, + Event: event, + enabled: true, + safe: true, + } + + d.m[key] = et + return et +} diff --git a/pkg/messagepool/journal/registry_test.go b/pkg/messagepool/journal/registry_test.go new file mode 100644 index 0000000000..27eca14f3b --- /dev/null +++ b/pkg/messagepool/journal/registry_test.go @@ -0,0 +1,55 @@ +package journal + +import ( + "testing" + + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestDisabledEvents(t *testing.T) { + tf.UnitTest(t) + + req := require.New(t) + + test := func(dis DisabledEvents) func(*testing.T) { + return func(t *testing.T) { + registry := NewEventTypeRegistry(dis) + + reg1 := registry.RegisterEventType("system1", "disabled1") + reg2 := registry.RegisterEventType("system1", "disabled2") + + req.False(reg1.Enabled()) + req.False(reg2.Enabled()) + req.True(reg1.safe) + req.True(reg2.safe) + + reg3 := registry.RegisterEventType("system3", "enabled3") + req.True(reg3.Enabled()) + req.True(reg3.safe) + } + } + + t.Run("direct", test(DisabledEvents{ + EventType{System: "system1", Event: "disabled1"}, + EventType{System: "system1", Event: "disabled2"}, + })) + + dis, err := ParseDisabledEvents("system1:disabled1,system1:disabled2") + req.NoError(err) + + t.Run("parsed", test(dis)) + + dis, err = ParseDisabledEvents(" system1:disabled1 , system1:disabled2 ") + req.NoError(err) + + t.Run("parsed_spaces", test(dis)) +} + +func TestParseDisableEvents(t *testing.T) { + tf.UnitTest(t) + + _, err := ParseDisabledEvents("system1:disabled1:failed,system1:disabled2") + require.Error(t, err) +} diff --git a/pkg/messagepool/journal/types.go b/pkg/messagepool/journal/types.go new file mode 100644 index 0000000000..88122aaeef --- /dev/null +++ b/pkg/messagepool/journal/types.go @@ -0,0 +1,100 @@ +package journal + +import ( + "fmt" + "strings" + "time" + + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("journal") + +// DefaultDisabledEvents lists the journal events disabled by +// default, usually because they are considered noisy. +var DefaultDisabledEvents = DisabledEvents{ + EventType{System: "mpool", Event: "add"}, + EventType{System: "mpool", Event: "remove"}, +} + +// DisabledEvents is the set of event types whose journaling is suppressed. +type DisabledEvents []EventType + +// ParseDisabledEvents parses a string of the form: "system1:event1,system1:event2[,...]" +// into a DisabledEvents object, returning an error if the string failed to parse. +// +// It sanitizes strings via strings.TrimSpace. +func ParseDisabledEvents(s string) (DisabledEvents, error) { + s = strings.TrimSpace(s) // sanitize + evts := strings.Split(s, ",") + ret := make(DisabledEvents, 0, len(evts)) + for _, evt := range evts { + evt = strings.TrimSpace(evt) // sanitize + s := strings.Split(evt, ":") + if len(s) != 2 { + return nil, fmt.Errorf("invalid event type: %s", s) + } + ret = append(ret, EventType{System: s[0], Event: s[1]}) + } + return ret, nil +} + +// EventType represents the signature of an event. +type EventType struct { + System string + Event string + + // enabled stores whether this event type is enabled. + enabled bool + + // safe is a sentinel marker that's set to true if this EventType was + // constructed correctly (via Journal#RegisterEventType). + safe bool +} + +func (et EventType) String() string { + return et.System + ":" + et.Event +} + +// Enabled returns whether this event type is enabled in the journaling +// subsystem. Users are advised to check this before actually attempting to +// add a journal entry, as it helps bypass object construction for events that +// would be discarded anyway. +// +// All event types are enabled by default, and specific event types can only +// be disabled at Journal construction time. +func (et EventType) Enabled() bool { + return et.safe && et.enabled +} + +// Journal represents an audit trail of system actions. +// +// Every entry is tagged with a timestamp, a system name, and an event name. +// The supplied data can be any type, as long as it is JSON serializable, +// including structs, map[string]interface{}, or primitive types. +// +// For cleanliness and type safety, we recommend to use typed events. See the +// *Evt struct types in this package for more info. +type Journal interface { + EventTypeRegistry + + // RecordEvent records this event to the journal, if and only if the + // EventType is enabled. If so, it calls the supplier function to obtain + // the payload to record. + // + // Implementations MUST recover from panics raised by the supplier function. + RecordEvent(evtType EventType, supplier func() interface{}) + + // Close closes this journal for further writing. + Close() error +} + +// Event represents a journal entry. +// +// See godocs on Journal for more information. +type Event struct { + EventType + + Timestamp time.Time + Data interface{} +} diff --git a/pkg/messagepool/locker.go b/pkg/messagepool/locker.go new file mode 100644 index 0000000000..b8b49e802b --- /dev/null +++ b/pkg/messagepool/locker.go @@ -0,0 +1,44 @@ +package messagepool + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" +) + +type MpoolLocker struct { + m map[address.Address]chan struct{} + lk sync.Mutex +} + +func NewMpoolLocker() *MpoolLocker { + return &MpoolLocker{ + lk: sync.Mutex{}, + } +} + +func (ml *MpoolLocker) TakeLock(ctx context.Context, a address.Address) (func(), error) { + ml.lk.Lock() + if ml.m == nil { + ml.m = make(map[address.Address]chan struct{}) + } + lk, ok := ml.m[a] + if !ok { + lk = make(chan struct{}, 1) + ml.m[a] = lk + } + ml.lk.Unlock() + + select { + case lk <- struct{}{}: + case <-ctx.Done(): + return nil, ctx.Err() + } + return func() { + <-lk + }, nil +} + +type DefaultMaxFeeFunc func() (abi.TokenAmount, error) diff --git a/pkg/messagepool/messagepool.go b/pkg/messagepool/messagepool.go new file mode 100644 index 0000000000..ca143643ef --- /dev/null +++ b/pkg/messagepool/messagepool.go @@ -0,0 +1,1640 @@ +package messagepool + +import ( + "bytes" + "context" + "errors" + "fmt" + "math" + stdbig "math/big" + "os" + "sort" + "sync" + "time" + + lps "github.com/filecoin-project/pubsub" + "github.com/hashicorp/go-multierror" + lru "github.com/hashicorp/golang-lru" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/minio/blake2b-simd" + "github.com/raulk/clock" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + crypto2 "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/messagepool/journal" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var log = logging.Logger("messagepool") + +var futureDebug = false + +var ( + rbfNumBig = big.NewInt(int64((ReplaceByFeeRatioDefault - 1) * RbfDenom)) + rbfDenomBig = big.NewInt(RbfDenom) +) + +const RbfDenom = 256 + +var RepublishInterval time.Duration + +func setRepublishInterval(propagationDelaySecs uint64) { + republishInterval := time.Duration(10*constants.MainNetBlockDelaySecs+propagationDelaySecs) * time.Second + + // if the republish interval is too short compared to the pubsub timecache, adjust it + minInterval := pubsub.TimeCacheDuration + time.Duration(propagationDelaySecs)*time.Second + if republishInterval < minInterval { + republishInterval = minInterval + } + RepublishInterval = republishInterval +} + +var ( + minimumBaseFee = big.NewInt(int64(constants.MinimumBaseFee)) + baseFeeLowerBoundFactor = big.NewInt(10) + baseFeeLowerBoundFactorConservative = big.NewInt(100) +) + +var ( + MaxActorPendingMessages = 1000 + MaxUntrustedActorPendingMessages = 10 +) + +var MaxNonceGap = uint64(4) + +const MaxMessageSize = 64 << 10 // 64KiB + +var ( + ErrMessageTooBig = errors.New("message too big") + + ErrMessageValueTooHigh = errors.New("cannot send more filecoin than will ever exist") + + ErrNonceTooLow = errors.New("message nonce too low") + + ErrGasFeeCapTooLow = errors.New("gas fee cap too low") + + ErrNotEnoughFunds = errors.New("not enough funds to execute transaction") + + ErrInvalidToAddr = errors.New("message had invalid to address") + + ErrSoftValidationFailure = errors.New("validation failure") + ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium") + ErrTooManyPendingMessages = errors.New("too many pending messages for actor") + ErrNonceGap = errors.New("unfulfilled nonce gap") +) + +const ( + localMsgsDs = "/mpool/local" + + localUpdates = "update" +) + +// Journal event types. +const ( + evtTypeMpoolAdd = iota + evtTypeMpoolRemove + evtTypeMpoolRepub +) + +// MessagePoolEvt is the journal entry for message pool events. +type MessagePoolEvt struct { // nolint + Action string + Messages []MessagePoolEvtMessage + Error error `json:",omitempty"` +} + +type MessagePoolEvtMessage struct { // nolint + types.Message + + CID cid.Cid +} + +type MessagePool struct { + lk sync.Mutex + + sm *statemanger.Stmgr + ds repo.Datastore + + addSema chan struct{} + + closer chan struct{} + + repubTk *clock.Ticker + repubTrigger chan struct{} + + republished map[cid.Cid]struct{} + + // do NOT access this map directly, use isLocal, setLocal, and forEachLocal respectively + localAddrs map[address.Address]struct{} + + // do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively + pending map[address.Address]*msgSet + + keyCache map[address.Address]address.Address + + curTSLk sync.Mutex // DO NOT LOCK INSIDE lk + curTS *types.TipSet + + cfgLk sync.Mutex + cfg *MpoolConfig + + api Provider + + minGasPrice big.Int + + currentSize int + + // pruneTrigger is a channel used to trigger a mempool pruning + pruneTrigger chan struct{} + + // pruneCooldown is a channel used to allow a cooldown time between prunes + pruneCooldown chan struct{} + + blsSigCache *lru.TwoQueueCache + + changes *lps.PubSub + + localMsgs datastore.Datastore + + netName string + + sigValCache *lru.TwoQueueCache + + evtTypes [3]journal.EventType + journal journal.Journal + + forkParams *config.ForkUpgradeConfig + gasPriceSchedule *gas.PricesSchedule + + GetMaxFee DefaultMaxFeeFunc + PriceCache *GasPriceCache +} + +func newDefaultMaxFeeFunc(maxFee types.FIL) DefaultMaxFeeFunc { + return func() (out abi.TokenAmount, err error) { + out = abi.TokenAmount{Int: maxFee.Int} + return + } +} + +type msgSet struct { + msgs map[uint64]*types.SignedMessage + nextNonce uint64 + requiredFunds *stdbig.Int +} + +func newMsgSet(nonce uint64) *msgSet { + return &msgSet{ + msgs: make(map[uint64]*types.SignedMessage), + nextNonce: nonce, + requiredFunds: stdbig.NewInt(0), + } +} + +func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount { + minPrice := big.Add(curPrem, big.Div(big.Mul(curPrem, rbfNumBig), rbfDenomBig)) + return big.Add(minPrice, big.NewInt(1)) +} + +func CapGasFee(mff DefaultMaxFeeFunc, msg *types.Message, sendSepc *types.MessageSendSpec) { + var maxFee abi.TokenAmount + if sendSepc != nil { + maxFee = sendSepc.MaxFee + } + if maxFee.Int == nil || maxFee.Equals(big.Zero()) { + mf, err := mff() + if err != nil { + log.Errorf("failed to get default max gas fee: %+v", err) + mf = big.Zero() + } + maxFee = mf + } + + gl := types.NewInt(uint64(msg.GasLimit)) + totalFee := types.BigMul(msg.GasFeeCap, gl) + + if totalFee.LessThanEqual(maxFee) { + msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) + return + } + + msg.GasFeeCap = big.Div(maxFee, gl) + msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap +} + +func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted bool) (bool, error) { + nextNonce := ms.nextNonce + nonceGap := false + + maxNonceGap := MaxNonceGap + maxActorPendingMessages := MaxActorPendingMessages + if untrusted { + maxNonceGap = 0 + maxActorPendingMessages = MaxUntrustedActorPendingMessages + } + + switch { + case m.Message.Nonce == nextNonce: + nextNonce++ + // advance if we are filling a gap + for _, fillGap := ms.msgs[nextNonce]; fillGap; _, fillGap = ms.msgs[nextNonce] { + nextNonce++ + } + + case strict && m.Message.Nonce > nextNonce+maxNonceGap: + return false, fmt.Errorf("message nonce has too big a gap from expected nonce (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap) + + case m.Message.Nonce > nextNonce: + nonceGap = true + } + + exms, has := ms.msgs[m.Message.Nonce] + if has { + // refuse RBF if we have a gap + if strict && nonceGap { + return false, fmt.Errorf("rejecting replace by fee because of nonce gap (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap) + } + + mc := m.Cid() + exmsc := exms.Cid() + if mc != exmsc { + // check if RBF passes + minPrice := ComputeMinRBF(exms.Message.GasPremium) + if big.Cmp(m.Message.GasPremium, minPrice) >= 0 { + log.Debugw("add with RBF", "oldpremium", exms.Message.GasPremium, + "newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce) + } else { + log.Debugf("add with duplicate nonce. message from %s with nonce %d already in mpool,"+ + " increase GasPremium to %s from %s to trigger replace by fee: %s", + m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium, + ErrRBFTooLowPremium) + return false, fmt.Errorf("message from %s with nonce %d already in mpool,"+ + " increase GasPremium to %s from %s to trigger replace by fee: %w", + m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium, + ErrRBFTooLowPremium) + } + } else { + return false, fmt.Errorf("message from %s with nonce %d already in mpool: %w", + m.Message.From, m.Message.Nonce, ErrSoftValidationFailure) + } + + ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int) + // ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int) + } + + if !has && strict && len(ms.msgs) >= maxActorPendingMessages { + log.Errorf("too many pending messages from actor %s", m.Message.From) + return false, ErrTooManyPendingMessages + } + + if strict && nonceGap { + log.Debugf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)", + m.Message.From, m.Message.Nonce, nextNonce) + } + + ms.nextNonce = nextNonce + ms.msgs[m.Message.Nonce] = m + ms.requiredFunds.Add(ms.requiredFunds, m.Message.RequiredFunds().Int) + // ms.requiredFunds.Add(ms.requiredFunds, m.Message.Value.Int) + + return !has, nil +} + +func (ms *msgSet) rm(nonce uint64, applied bool) { + m, has := ms.msgs[nonce] + if !has { + if applied && nonce >= ms.nextNonce { + // we removed a message we did not know about because it was applied + // we need to adjust the nonce and check if we filled a gap + ms.nextNonce = nonce + 1 + for _, fillGap := ms.msgs[ms.nextNonce]; fillGap; _, fillGap = ms.msgs[ms.nextNonce] { + ms.nextNonce++ + } + } + return + } + + ms.requiredFunds.Sub(ms.requiredFunds, m.Message.RequiredFunds().Int) + // ms.requiredFunds.Sub(ms.requiredFunds, m.Message.Value.Int) + delete(ms.msgs, nonce) + + // adjust next nonce + if applied { + // we removed a (known) message because it was applied in a tipset + // we can't possibly have filled a gap in this case + if nonce >= ms.nextNonce { + ms.nextNonce = nonce + 1 + } + return + } + + // we removed a message because it was pruned + // we have to adjust the nonce if it creates a gap or rewinds state + if nonce < ms.nextNonce { + ms.nextNonce = nonce + } +} + +func (ms *msgSet) getRequiredFunds(nonce uint64) big.Int { + requiredFunds := new(stdbig.Int).Set(ms.requiredFunds) + + m, has := ms.msgs[nonce] + if has { + requiredFunds.Sub(requiredFunds, m.Message.RequiredFunds().Int) + // requiredFunds.Sub(requiredFunds, m.Message.Value.Int) + } + + return big.Int{Int: requiredFunds} +} + +func (ms *msgSet) toSlice() []*types.SignedMessage { + set := make([]*types.SignedMessage, 0, len(ms.msgs)) + + for _, m := range ms.msgs { + set = append(set, m) + } + + sort.Slice(set, func(i, j int) bool { + return set[i].Message.Nonce < set[j].Message.Nonce + }) + + return set +} + +func New(ctx context.Context, + api Provider, + sm *statemanger.Stmgr, + ds repo.Datastore, + networkParams *config.NetworkParamsConfig, + mpoolCfg *config.MessagePoolConfig, + netName string, + j journal.Journal, +) (*MessagePool, error) { + cache, _ := lru.New2Q(constants.BlsSignatureCacheSize) + verifcache, _ := lru.New2Q(constants.VerifSigCacheSize) + + cfg, err := loadConfig(ctx, ds) + if err != nil { + return nil, fmt.Errorf("error loading mpool config: %v", err) + } + + if j == nil { + j = journal.NilJournal() + } + + setRepublishInterval(networkParams.PropagationDelaySecs) + + mp := &MessagePool{ + ds: ds, + addSema: make(chan struct{}, 1), + closer: make(chan struct{}), + repubTk: constants.Clock.Ticker(RepublishInterval), + repubTrigger: make(chan struct{}, 1), + localAddrs: make(map[address.Address]struct{}), + pending: make(map[address.Address]*msgSet), + keyCache: make(map[address.Address]address.Address), + minGasPrice: big.NewInt(0), + pruneTrigger: make(chan struct{}, 1), + pruneCooldown: make(chan struct{}, 1), + blsSigCache: cache, + sigValCache: verifcache, + changes: lps.New(50), + localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), + api: api, + sm: sm, + netName: netName, + cfg: cfg, + evtTypes: [...]journal.EventType{ + evtTypeMpoolAdd: j.RegisterEventType("mpool", "add"), + evtTypeMpoolRemove: j.RegisterEventType("mpool", "remove"), + evtTypeMpoolRepub: j.RegisterEventType("mpool", "repub"), + }, + journal: j, + forkParams: networkParams.ForkUpgradeParam, + gasPriceSchedule: gas.NewPricesSchedule(networkParams.ForkUpgradeParam), + GetMaxFee: newDefaultMaxFeeFunc(mpoolCfg.MaxFee), + PriceCache: NewGasPriceCache(), + } + + // enable initial prunes + mp.pruneCooldown <- struct{}{} + + ctx, cancel := context.WithCancel(context.TODO()) + + // load the current tipset and subscribe to head changes _before_ loading local messages + mp.curTS = api.SubscribeHeadChanges(ctx, func(rev, app []*types.TipSet) error { + err := mp.HeadChange(ctx, rev, app) + if err != nil { + log.Errorf("mpool head notif handler error: %+v", err) + } + return err + }) + + mp.curTSLk.Lock() + mp.lk.Lock() + + go func() { + defer cancel() + err := mp.loadLocal(ctx) + + mp.lk.Unlock() + mp.curTSLk.Unlock() + + if err != nil { + log.Errorf("loading local messages: %+v", err) + } + + log.Info("mpool ready") + + mp.runLoop(ctx) + }() + + return mp, nil +} + +func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) { + // check the cache + a, f := mp.keyCache[addr] + if f { + return a, nil + } + + // resolve the address + ka, err := mp.api.StateAccountKeyAtFinality(ctx, addr, mp.curTS) + if err != nil { + return address.Undef, err + } + + // place both entries in the cache (may both be key addresses, which is fine) + mp.keyCache[addr] = ka + mp.keyCache[ka] = ka + + return ka, nil +} + +func (mp *MessagePool) getPendingMset(ctx context.Context, addr address.Address) (*msgSet, bool, error) { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return nil, false, err + } + + ms, f := mp.pending[ra] + + return ms, f, nil +} + +func (mp *MessagePool) setPendingMset(ctx context.Context, addr address.Address, ms *msgSet) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + mp.pending[ra] = ms + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) forEachPending(f func(address.Address, *msgSet)) { + for la, ms := range mp.pending { + f(la, ms) + } +} + +func (mp *MessagePool) deletePendingMset(ctx context.Context, addr address.Address) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + delete(mp.pending, ra) + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) clearPending() { + mp.pending = make(map[address.Address]*msgSet) +} + +func (mp *MessagePool) isLocal(ctx context.Context, addr address.Address) (bool, error) { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return false, err + } + + _, f := mp.localAddrs[ra] + + return f, nil +} + +func (mp *MessagePool) setLocal(ctx context.Context, addr address.Address) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + mp.localAddrs[ra] = struct{}{} + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) forEachLocal(ctx context.Context, f func(context.Context, address.Address)) { + for la := range mp.localAddrs { + f(ctx, la) + } +} + +func (mp *MessagePool) DeleteByAdress(address address.Address) error { + mp.lk.Lock() + defer mp.lk.Unlock() + + if mp.pending != nil { + delete(mp.pending, address) + } + return nil +} + +func (mp *MessagePool) PublishMsgForWallet(ctx context.Context, addr address.Address) error { + now := time.Now() + defer func() { + diff := time.Since(now).Seconds() + if diff > 1 { + log.Infof("publish msg wallet spent time:%f", diff) + } + }() + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + out := make([]*types.SignedMessage, 0) + for a := range mp.pending { + if a.String() == addr.String() { + out = append(out, mp.pendingFor(ctx, a)...) + break + } + } + + log.Infof("mpool has [%v] msg for [%s], will republish ...", len(out), addr.String()) + + // start to broadcast message through libp2p + for _, msg := range out { + msgb, err := msg.Serialize() + if err != nil { + log.Errorf("could not serialize: %s", err) + continue + } + + err = mp.api.PubSubPublish(ctx, types.MessageTopic(mp.netName), msgb) + if err != nil { + log.Errorf("could not publish: %s", err) + continue + } + } + + return nil +} + +func (mp *MessagePool) PublishMsg(ctx context.Context, smsg *types.SignedMessage) error { + msgb, err := smsg.Serialize() + if err != nil { + return fmt.Errorf("could not serialize: %s", err) + } + + err = mp.api.PubSubPublish(ctx, types.MessageTopic(mp.netName), msgb) + if err != nil { + return fmt.Errorf("could not publish: %s", err) + } + return nil +} + +func (mp *MessagePool) Close() error { + close(mp.closer) + return mp.journal.Close() +} + +func (mp *MessagePool) Prune() { + // this magic incantation of triggering prune thrice is here to make the Prune method + // synchronous: + // so, its a single slot buffered channel. The first send fills the channel, + // the second send goes through when the pruning starts, + // and the third send goes through (and noops) after the pruning finishes + // and goes through the loop again + mp.pruneTrigger <- struct{}{} + mp.pruneTrigger <- struct{}{} + mp.pruneTrigger <- struct{}{} +} + +func (mp *MessagePool) runLoop(ctx context.Context) { + for { + select { + case <-mp.repubTk.C: + if err := mp.republishPendingMessages(ctx); err != nil { + log.Errorf("error while republishing messages: %s", err) + } + case <-mp.repubTrigger: + if err := mp.republishPendingMessages(ctx); err != nil { + log.Errorf("error while republishing messages: %s", err) + } + + case <-mp.pruneTrigger: + if err := mp.pruneExcessMessages(); err != nil { + log.Errorf("failed to prune excess messages from mempool: %s", err) + } + + case <-mp.closer: + mp.repubTk.Stop() + return + } + } +} + +func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) error { + if err := mp.setLocal(ctx, m.Message.From); err != nil { + return err + } + + buf := new(bytes.Buffer) + err := m.MarshalCBOR(buf) + if err != nil { + return fmt.Errorf("error serializing message: %v", err) + } + msgb := buf.Bytes() + + c := m.Cid() + if err := mp.localMsgs.Put(ctx, datastore.NewKey(string(c.Bytes())), msgb); err != nil { + return fmt.Errorf("persisting local message: %v", err) + } + + return nil +} + +// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusion +// and whether the message has enough funds to be included in the next 20 blocks. +// If the message is not valid for block inclusion, it returns an error. +// For local messages, if the message can be included in the next 20 blocks, it returns true to +// signal that it should be immediately published. If the message cannot be included in the next 20 +// blocks, it returns false so that the message doesn't immediately get published (and ignored by our +// peers); instead it will be published through the republish loop, once the base fee has fallen +// sufficiently. +// For non local messages, if the message cannot be included in the next 20 blocks it returns +// a (soft) validation error. +func (mp *MessagePool) verifyMsgBeforeAdd(ctx context.Context, m *types.SignedMessage, curTS *types.TipSet, local bool) (bool, error) { + epoch := curTS.Height() + 1 + minGas := mp.gasPriceSchedule.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) + + if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), mp.api.StateNetworkVersion(ctx, epoch)); err != nil { + return false, fmt.Errorf("message will not be included in a block: %v", err) + } + + // this checks if the GasFeeCap is sufficiently high for inclusion in the next 20 blocks + // if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely + // on republish to push it through later, if the baseFee has fallen. + // this is a defensive check that stops minimum baseFee spam attacks from overloading validation + // queues. + // Note that for local messages, we always add them so that they can be accepted and republished + // automatically. + publish := local + + var baseFee big.Int + if len(curTS.Blocks()) > 0 { + baseFee = curTS.Blocks()[0].ParentBaseFee + } else { + var err error + baseFee, err = mp.api.ChainComputeBaseFee(context.TODO(), curTS) + if err != nil { + return false, fmt.Errorf("computing basefee: %v", err) + } + } + + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactorConservative) + if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) { + if local { + log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)", + m.Message.GasFeeCap, baseFeeLowerBound) + publish = false + } else { + return false, fmt.Errorf("GasFeeCap doesn't meet base fee lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s): %w", + m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure) + } + } + + return publish, nil +} + +func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) { + err := mp.checkMessage(ctx, m) + if err != nil { + return cid.Undef, err + } + + // serialize push access to reduce lock contention + mp.addSema <- struct{}{} + defer func() { + <-mp.addSema + }() + + mp.curTSLk.Lock() + publish, err := mp.addTS(ctx, m, mp.curTS, true, false) + if err != nil { + mp.curTSLk.Unlock() + return cid.Undef, err + } + mp.curTSLk.Unlock() + + if publish { + buf := new(bytes.Buffer) + err := m.MarshalCBOR(buf) + if err != nil { + return cid.Undef, fmt.Errorf("error serializing message: %v", err) + } + + err = mp.api.PubSubPublish(ctx, types.MessageTopic(mp.netName), buf.Bytes()) + if err != nil { + return cid.Undef, fmt.Errorf("error publishing message: %v", err) + } + } + + return m.Cid(), nil +} + +func (mp *MessagePool) checkMessage(ctx context.Context, m *types.SignedMessage) error { + // big messages are bad, anti DOS + if m.ChainLength() > MaxMessageSize { + return fmt.Errorf("mpool message too large (%dB): %w", m.ChainLength(), ErrMessageTooBig) + } + + // Perform syntactic validation, minGas=0 as we check the actual mingas before we add it + if err := m.Message.ValidForBlockInclusion(0, mp.api.StateNetworkVersion(ctx, mp.curTS.Height())); err != nil { + return fmt.Errorf("message not valid for block inclusion: %v", err) + } + + if m.Message.To == address.Undef { + return ErrInvalidToAddr + } + + if !m.Message.Value.LessThan(types.TotalFilecoinInt) { + return ErrMessageValueTooHigh + } + + if m.Message.GasFeeCap.LessThan(minimumBaseFee) { + return ErrGasFeeCapTooLow + } + + if err := mp.VerifyMsgSig(m); err != nil { + log.Warnf("signature verification failed: %s", err) + return err + } + + return nil +} + +func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error { + err := mp.checkMessage(ctx, m) + if err != nil { + return err + } + + // serialize push access to reduce lock contention + mp.addSema <- struct{}{} + defer func() { + <-mp.addSema + }() + + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + _, err = mp.addTS(ctx, m, mp.curTS, false, false) + return err +} + +func sigCacheKey(m *types.SignedMessage) (string, error) { + switch m.Signature.Type { + case crypto.SigTypeBLS: + if len(m.Signature.Data) != crypto2.BLSSignatureBytes { + return "", fmt.Errorf("bls signature incorrectly sized") + } + + hashCache := blake2b.Sum256(append(m.Cid().Bytes(), m.Signature.Data...)) + return string(hashCache[:]), nil + case crypto.SigTypeSecp256k1: + return string(m.Cid().Bytes()), nil + default: + return "", fmt.Errorf("unrecognized signature type: %d", m.Signature.Type) + } +} + +func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error { + sck, err := sigCacheKey(m) + if err != nil { + return err + } + + _, ok := mp.sigValCache.Get(sck) + if ok { + // already validated, great + return nil + } + + c := m.Message.Cid() + if err := crypto2.Verify(&m.Signature, m.Message.From, c.Bytes()); err != nil { + return err + } + + mp.sigValCache.Add(sck, struct{}{}) + + return nil +} + +func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTS *types.TipSet) error { + balance, err := mp.getStateBalance(ctx, m.Message.From, curTS) + if err != nil { + return fmt.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure) + } + + requiredFunds := m.Message.RequiredFunds() + if big.Cmp(balance, requiredFunds) < 0 { + return fmt.Errorf("not enough funds (required: %s, balance: %s): %v", types.FIL(requiredFunds), types.FIL(balance), ErrNotEnoughFunds) + } + + // add Value for soft failure check + // requiredFunds = types.BigAdd(requiredFunds, m.Message.Value) + + mset, ok, err := mp.getPendingMset(ctx, m.Message.From) + if err != nil { + log.Debugf("mpoolcheckbalance failed to get pending mset: %s", err) + return err + } + + if ok { + requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce)) + } + + if big.Cmp(balance, requiredFunds) < 0 { + // Note: we fail here for ErrSoftValidationFailure to signal a soft failure because we might + // be out of sync. + return fmt.Errorf("not enough funds including pending messages (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrSoftValidationFailure) + } + + return nil +} + +func (mp *MessagePool) addTS(ctx context.Context, m *types.SignedMessage, curTS *types.TipSet, local, untrusted bool) (bool, error) { + snonce, err := mp.getStateNonce(ctx, m.Message.From, curTS) + if err != nil { + return false, fmt.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) + } + + if snonce > m.Message.Nonce { + return false, fmt.Errorf("minimum expected nonce is %d: %v", snonce, ErrNonceTooLow) + } + + mp.lk.Lock() + defer mp.lk.Unlock() + + publish, err := mp.verifyMsgBeforeAdd(ctx, m, curTS, local) + if err != nil { + return false, err + } + + if err := mp.checkBalance(ctx, m, curTS); err != nil { + return false, err + } + + err = mp.addLocked(ctx, m, !local, untrusted) + if err != nil { + return false, err + } + + if local { + err = mp.addLocal(ctx, m) + if err != nil { + return false, fmt.Errorf("error persisting local message: %v", err) + } + } + + return publish, nil +} + +func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) error { + err := mp.checkMessage(ctx, m) + if err != nil { + return err + } + + curTS := mp.curTS + + if curTS == nil { + return fmt.Errorf("current tipset not loaded") + } + + snonce, err := mp.getStateNonce(ctx, m.Message.From, curTS) + if err != nil { + return fmt.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) + } + + if snonce > m.Message.Nonce { + return fmt.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow) + } + + _, err = mp.verifyMsgBeforeAdd(ctx, m, curTS, true) + if err != nil { + return err + } + + if err := mp.checkBalance(ctx, m, curTS); err != nil { + return err + } + + return mp.addLocked(ctx, m, false, false) +} + +func (mp *MessagePool) addSkipChecks(ctx context.Context, m *types.SignedMessage) error { + mp.lk.Lock() + defer mp.lk.Unlock() + + return mp.addLocked(ctx, m, false, false) +} + +func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, strict, untrusted bool) error { + log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce) + if m.Signature.Type == crypto.SigTypeBLS { + mp.blsSigCache.Add(m.Cid(), m.Signature) + } + + if _, err := mp.api.PutMessage(ctx, m); err != nil { + log.Warnf("mpooladd sm.PutMessage failed: %s", err) + return err + } + + if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil { + log.Warnf("mpooladd sm.PutMessage failed: %s", err) + return err + } + + // Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work + mset, ok, err := mp.getPendingMset(ctx, m.Message.From) + if err != nil { + log.Debug(err) + return err + } + + if !ok { + nonce, err := mp.getStateNonce(ctx, m.Message.From, mp.curTS) + if err != nil { + return fmt.Errorf("failed to get initial actor nonce: %w", err) + } + + mset = newMsgSet(nonce) + if err = mp.setPendingMset(ctx, m.Message.From, mset); err != nil { + return fmt.Errorf("failed to set pending mset: %w", err) + } + } + + incr, err := mset.add(m, mp, strict, untrusted) + if err != nil { + log.Debug(err) + return err + } + + if incr { + mp.currentSize++ + if mp.currentSize > mp.cfg.SizeLimitHigh { + // send signal to prune messages if it hasnt already been sent + select { + case mp.pruneTrigger <- struct{}{}: + default: + } + } + } + + mp.changes.Pub(types.MpoolUpdate{ + Type: types.MpoolAdd, + Message: m, + }, localUpdates) + + mp.journal.RecordEvent(mp.evtTypes[evtTypeMpoolAdd], func() interface{} { + mc := m.Cid() + return MessagePoolEvt{ + Action: "add", + Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: mc}}, + } + }) + + return nil +} + +func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) { + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + return mp.getNonceLocked(ctx, addr, mp.curTS) +} + +// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling +func (mp *MessagePool) GetActor(ctx context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) { + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + return mp.api.GetActorAfter(ctx, addr, mp.curTS) +} + +func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTS *types.TipSet) (uint64, error) { + stateNonce, err := mp.getStateNonce(ctx, addr, curTS) // sanity check + if err != nil { + return 0, err + } + + mset, ok, err := mp.getPendingMset(ctx, addr) + if err != nil { + log.Debugf("mpoolgetnonce failed to get mset: %s", err) + return 0, err + } + + if ok { + if stateNonce > mset.nextNonce { + log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce) + + return stateNonce, nil + } + + return mset.nextNonce, nil + } + + return stateNonce, nil +} + +func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, curTS *types.TipSet) (uint64, error) { + act, err := mp.api.GetActorAfter(ctx, addr, curTS) + if err != nil { + return 0, err + } + + return act.Nonce, nil +} + +func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (big.Int, error) { + act, err := mp.api.GetActorAfter(ctx, addr, ts) + if err != nil { + return big.Zero(), err + } + + return act.Balance, nil +} + +// this method is provided for the gateway to push messages. +// differences from Push: +// - strict checks are enabled +// - extra strict add checks are used when adding the messages to the msgSet +// that means: no nonce gaps, at most 10 pending messages for the actor +func (mp *MessagePool) PushUntrusted(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) { + err := mp.checkMessage(ctx, m) + if err != nil { + return cid.Undef, err + } + + // serialize push access to reduce lock contention + mp.addSema <- struct{}{} + defer func() { + <-mp.addSema + }() + + mp.curTSLk.Lock() + publish, err := mp.addTS(ctx, m, mp.curTS, false, true) + if err != nil { + mp.curTSLk.Unlock() + return cid.Undef, err + } + mp.curTSLk.Unlock() + + if publish { + buf := new(bytes.Buffer) + err := m.MarshalCBOR(buf) + if err != nil { + return cid.Undef, fmt.Errorf("error serializing message: %v", err) + } + + err = mp.api.PubSubPublish(ctx, types.MessageTopic(mp.netName), buf.Bytes()) + if err != nil { + return cid.Undef, fmt.Errorf("error publishing message: %v", err) + } + } + + return m.Cid(), nil +} + +func (mp *MessagePool) Remove(ctx context.Context, from address.Address, nonce uint64, applied bool) { + mp.lk.Lock() + defer mp.lk.Unlock() + + mp.remove(ctx, from, nonce, applied) +} + +func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce uint64, applied bool) { + mset, ok, err := mp.getPendingMset(ctx, from) + if err != nil { + log.Debugf("mpoolremove failed to get mset: %s", err) + return + } + + if !ok { + return + } + + if m, ok := mset.msgs[nonce]; ok { + mp.changes.Pub(types.MpoolUpdate{ + Type: types.MpoolRemove, + Message: m, + }, localUpdates) + + mp.journal.RecordEvent(mp.evtTypes[evtTypeMpoolRemove], func() interface{} { + return MessagePoolEvt{ + Action: "remove", + Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}}, + } + }) + + mp.currentSize-- + } + + // NB: This deletes any message with the given nonce. This makes sense + // as two messages with the same sender cannot have the same nonce + mset.rm(nonce, applied) + + if len(mset.msgs) == 0 { + if err = mp.deletePendingMset(ctx, from); err != nil { + log.Debugf("mpoolremove failed to delete mset: %s", err) + return + } + } +} + +func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + return mp.allPending(ctx) +} + +func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { + out := make([]*types.SignedMessage, 0) + mp.forEachPending(func(a address.Address, mset *msgSet) { + out = append(out, mset.toSlice()...) + }) + + return out, mp.curTS +} + +func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) { + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + return mp.pendingFor(ctx, a), mp.curTS +} + +func (mp *MessagePool) pendingFor(ctx context.Context, a address.Address) []*types.SignedMessage { + mset, ok, err := mp.getPendingMset(ctx, a) + if err != nil { + log.Debugf("mpoolpendingfor failed to get mset: %s", err) + return nil + } + + if mset == nil || !ok || len(mset.msgs) == 0 { + return nil + } + + return mset.toSlice() +} + +func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, apply []*types.TipSet) error { + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + repubTrigger := false + rmsgs := make(map[address.Address]map[uint64]*types.SignedMessage) + add := func(m *types.SignedMessage) { + s, ok := rmsgs[m.Message.From] + if !ok { + s = make(map[uint64]*types.SignedMessage) + rmsgs[m.Message.From] = s + } + s[m.Message.Nonce] = m + } + rm := func(from address.Address, nonce uint64) { + s, ok := rmsgs[from] + if !ok { + mp.Remove(ctx, from, nonce, true) + return + } + + if _, ok := s[nonce]; ok { + delete(s, nonce) + return + } + + mp.Remove(ctx, from, nonce, true) + } + + maybeRepub := func(cid cid.Cid) { + if !repubTrigger { + mp.lk.Lock() + _, republished := mp.republished[cid] + mp.lk.Unlock() + if republished { + repubTrigger = true + } + } + } + + var merr error + + for _, ts := range revert { + tsKey := ts.Parents() + pts, err := mp.api.LoadTipSet(ctx, tsKey) + if err != nil { + log.Errorf("error loading reverted tipset parent: %s", err) + merr = multierror.Append(merr, err) + continue + } + + mp.curTS = pts + + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) + if err != nil { + log.Errorf("error retrieving messages for reverted block: %s", err) + merr = multierror.Append(merr, err) + continue + } + + for _, msg := range msgs { + add(msg) + } + } + + for _, ts := range apply { + mp.curTS = ts + + for _, b := range ts.Blocks() { + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) + if err != nil { + xerr := fmt.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %v", b.Cid(), b.Height, b.Messages, err) + log.Errorf("error retrieving messages for block: %s", xerr) + merr = multierror.Append(merr, xerr) + continue + } + + for _, msg := range smsgs { + rm(msg.Message.From, msg.Message.Nonce) + maybeRepub(msg.Cid()) + } + + for _, msg := range bmsgs { + rm(msg.From, msg.Nonce) + maybeRepub(msg.Cid()) + } + } + } + + if repubTrigger { + select { + case mp.repubTrigger <- struct{}{}: + default: + } + } + + for _, s := range rmsgs { + for _, msg := range s { + if err := mp.addSkipChecks(ctx, msg); err != nil { + log.Errorf("Failed to readd message from reorg to mpool: %s", err) + } + } + } + + if len(revert) > 0 && futureDebug { + mp.lk.Lock() + msgs, ts := mp.allPending(ctx) + mp.lk.Unlock() + + buckets := map[address.Address]*statBucket{} + + for _, v := range msgs { + bkt, ok := buckets[v.Message.From] + if !ok { + bkt = &statBucket{ + msgs: map[uint64]*types.SignedMessage{}, + } + buckets[v.Message.From] = bkt + } + + bkt.msgs[v.Message.Nonce] = v + } + + for a, bkt := range buckets { + // TODO that might not be correct with GatActorAfter but it is only debug code + act, err := mp.api.GetActorAfter(ctx, a, ts) + if err != nil { + log.Debugf("%s, err: %s\n", a, err) + continue + } + + var cmsg *types.SignedMessage + var ok bool + + cur := act.Nonce + for { + cmsg, ok = bkt.msgs[cur] + if !ok { + break + } + cur++ + } + + ff := uint64(math.MaxUint64) + for k := range bkt.msgs { + if k > cur && k < ff { + ff = k + } + } + + if ff != math.MaxUint64 { + m := bkt.msgs[ff] + mc := m.Cid() + + // cmsg can be nil if no messages from the current nonce are in the mpool + ccid := "nil" + if cmsg != nil { + ccid = cmsg.Cid().String() + } + + log.Debugw("Nonce gap", + "actor", a, + "future_cid", mc, + "future_nonce", ff, + "current_cid", ccid, + "current_nonce", cur, + "revert_tipset", revert[0].Key(), + "new_head", ts.Key(), + ) + } + } + } + + return merr +} + +func (mp *MessagePool) runHeadChange(ctx context.Context, from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { + add := func(m *types.SignedMessage) { + s, ok := rmsgs[m.Message.From] + if !ok { + s = make(map[uint64]*types.SignedMessage) + rmsgs[m.Message.From] = s + } + s[m.Message.Nonce] = m + } + rm := func(from address.Address, nonce uint64) { + s, ok := rmsgs[from] + if !ok { + return + } + + if _, ok := s[nonce]; ok { + delete(s, nonce) + return + } + } + + revert, apply, err := chain.ReorgOps(mp.api.LoadTipSet, from, to) + if err != nil { + return fmt.Errorf("failed to compute reorg ops for mpool pending messages: %v", err) + } + + var merr error + + for _, ts := range revert { + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) + if err != nil { + log.Errorf("error retrieving messages for reverted block: %s", err) + merr = multierror.Append(merr, err) + continue + } + + for _, msg := range msgs { + add(msg) + } + } + + for _, ts := range apply { + for _, b := range ts.Blocks() { + bmsgs, smsgs, err := mp.api.MessagesForBlock(context.TODO(), b) + if err != nil { + xerr := fmt.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %v", b.Cid(), b.Height, b.Messages, err) + log.Errorf("error retrieving messages for block: %s", xerr) + merr = multierror.Append(merr, xerr) + continue + } + + for _, msg := range smsgs { + rm(msg.Message.From, msg.Message.Nonce) + } + + for _, msg := range bmsgs { + rm(msg.From, msg.Nonce) + } + } + } + + return merr +} + +type statBucket struct { + msgs map[uint64]*types.SignedMessage +} + +func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.BlockHeader) ([]*types.SignedMessage, error) { + out := make([]*types.SignedMessage, 0) + + for _, b := range blks { + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) + if err != nil { + return nil, fmt.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %v", b.Cid(), b.Height, b.Messages, err) + } + out = append(out, smsgs...) + + for _, msg := range bmsgs { + smsg := mp.RecoverSig(msg) + if smsg != nil { + out = append(out, smsg) + } else { + log.Debugf("could not recover signature for bls message %s", msg.Cid()) + } + } + } + + return out, nil +} + +func (mp *MessagePool) RecoverSig(msg *types.Message) *types.SignedMessage { + val, ok := mp.blsSigCache.Get(msg.Cid()) + if !ok { + return nil + } + sig, ok := val.(crypto.Signature) + if !ok { + log.Errorf("value in signature cache was not a signature (got %T)", val) + return nil + } + + return &types.SignedMessage{ + Message: *msg, + Signature: sig, + } +} + +func (mp *MessagePool) Updates(ctx context.Context) (<-chan types.MpoolUpdate, error) { + out := make(chan types.MpoolUpdate, 20) + sub := mp.changes.Sub(localUpdates) + + go func() { + defer mp.changes.Unsub(sub) + defer close(out) + + for { + select { + case u := <-sub: + select { + case out <- u.(types.MpoolUpdate): + case <-ctx.Done(): + return + case <-mp.closer: + return + } + case <-ctx.Done(): + return + case <-mp.closer: + return + } + } + }() + + return out, nil +} + +func (mp *MessagePool) loadLocal(ctx context.Context) error { + if val := os.Getenv("VENUS_DISABLE_LOCAL_MESSAGE"); val != "" { + log.Warnf("receive environment to disable local local message") + return nil + } + + res, err := mp.localMsgs.Query(ctx, query.Query{}) + if err != nil { + return fmt.Errorf("query local messages: %v", err) + } + + for r := range res.Next() { + if r.Error != nil { + return fmt.Errorf("r.Error: %v", r.Error) + } + + var sm types.SignedMessage + if err := sm.UnmarshalCBOR(bytes.NewReader(r.Value)); err != nil { + return fmt.Errorf("unmarshaling local message: %v", err) + } + + if err := mp.addLoaded(ctx, &sm); err != nil { + if errors.Is(err, ErrNonceTooLow) { + continue // todo: drop the message from local cache (if above certain confidence threshold) + } + + log.Errorf("adding local message: %+v", err) + } + + if err = mp.setLocal(ctx, sm.Message.From); err != nil { + log.Debugf("mpoolloadLocal errored: %s", err) + return err + } + } + + return nil +} + +func (mp *MessagePool) Clear(ctx context.Context, local bool) { + mp.lk.Lock() + defer mp.lk.Unlock() + + // remove everything if local is true, including removing local messages from + // the datastore + if local { + mp.forEachLocal(ctx, func(ctx context.Context, la address.Address) { + mset, ok, err := mp.getPendingMset(ctx, la) + if err != nil { + log.Warnf("errored while getting pending mset: %w", err) + return + } + + if ok { + for _, m := range mset.msgs { + err := mp.localMsgs.Delete(ctx, datastore.NewKey(string(m.Cid().Bytes()))) + if err != nil { + log.Warnf("error deleting local message: %s", err) + } + } + } + }) + + mp.clearPending() + mp.republished = nil + + return + } + + mp.forEachPending(func(a address.Address, ms *msgSet) { + isLocal, err := mp.isLocal(ctx, a) + if err != nil { + log.Warnf("errored while determining isLocal: %w", err) + return + } + + if isLocal { + return + } + + if err = mp.deletePendingMset(ctx, a); err != nil { + log.Warnf("errored while deleting mset: %w", err) + return + } + }) +} + +func getBaseFeeLowerBound(baseFee, factor big.Int) big.Int { + baseFeeLowerBound := big.Div(baseFee, factor) + if big.Cmp(baseFeeLowerBound, minimumBaseFee) < 0 { + baseFeeLowerBound = minimumBaseFee + } + + return baseFeeLowerBound +} diff --git a/pkg/messagepool/messagepool_test.go b/pkg/messagepool/messagepool_test.go new file mode 100644 index 0000000000..0447806960 --- /dev/null +++ b/pkg/messagepool/messagepool_test.go @@ -0,0 +1,937 @@ +// stm: #unit +package messagepool + +import ( + "context" + "fmt" + "sort" + "testing" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/statemanger" + + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + tbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/assert" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/messagepool/gasguess" + "github.com/filecoin-project/venus/pkg/repo" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func init() { + _ = logging.SetLogLevel("*", "INFO") +} + +type testMpoolAPI struct { + cb func(rev, app []*types.TipSet) error + + bmsgs map[cid.Cid][]*types.SignedMessage + statenonce map[address.Address]uint64 + balance map[address.Address]tbig.Int + + tipsets []*types.TipSet + + published int + + baseFee tbig.Int +} + +func mkAddress(i uint64) address.Address { + a, err := address.NewIDAddress(i) + if err != nil { + panic(err) + } + return a +} + +func mkMessage(from, to address.Address, nonce uint64, w *wallet.Wallet) *types.SignedMessage { + msg := &types.Message{ + To: to, + From: from, + Value: tbig.NewInt(1), + Nonce: nonce, + GasLimit: 1000000, + GasFeeCap: tbig.NewInt(100), + GasPremium: tbig.NewInt(1), + } + + c := msg.Cid() + sig, err := w.WalletSign(context.Background(), from, c.Bytes(), types.MsgMeta{}) + if err != nil { + panic(err) + } + return &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } +} + +func mkBlock(parents *types.TipSet, weightInc int64, ticketNonce uint64) *types.BlockHeader { + addr := mkAddress(123561) + + c, err := cid.Decode("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") + if err != nil { + panic(err) + } + + pstateRoot := c + if parents != nil { + pstateRoot = parents.Blocks()[0].ParentStateRoot + } + + var height abi.ChainEpoch + var tsKey types.TipSetKey + weight := tbig.NewInt(weightInc) + var timestamp uint64 + if parents != nil { + height = parents.Height() + height = height + 1 + timestamp = parents.MinTimestamp() + constants.MainNetBlockDelaySecs + weight = tbig.Add(parents.Blocks()[0].ParentWeight, weight) + tsKey = parents.Key() + } + + return &types.BlockHeader{ + Miner: addr, + ElectionProof: &types.ElectionProof{ + VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), + }, + Ticket: &types.Ticket{ + VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), + }, + Parents: tsKey.Cids(), + ParentMessageReceipts: c, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, + ParentWeight: weight, + Messages: c, + Height: height, + Timestamp: timestamp, + ParentStateRoot: pstateRoot, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, + ParentBaseFee: tbig.NewInt(int64(constants.MinimumBaseFee)), + } +} + +func mkTipSet(blks ...*types.BlockHeader) *types.TipSet { + ts, err := types.NewTipSet(blks) + if err != nil { + panic(err) + } + return ts +} + +func newTestMpoolAPI() *testMpoolAPI { + tma := &testMpoolAPI{ + bmsgs: make(map[cid.Cid][]*types.SignedMessage), + statenonce: make(map[address.Address]uint64), + balance: make(map[address.Address]tbig.Int), + baseFee: tbig.NewInt(100), + } + genesis := mkBlock(nil, 1, 1) + tma.tipsets = append(tma.tipsets, mkTipSet(genesis)) + return tma +} + +func (tma *testMpoolAPI) nextBlock() *types.BlockHeader { + newBlk := mkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1) + tma.tipsets = append(tma.tipsets, mkTipSet(newBlk)) + return newBlk +} + +func (tma *testMpoolAPI) nextBlockWithHeight(height uint64) *types.BlockHeader { + newBlk := mkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1) + newBlk.Height = abi.ChainEpoch(height) + tma.tipsets = append(tma.tipsets, mkTipSet(newBlk)) + return newBlk +} + +func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) { + t.Helper() + if err := tma.cb(nil, []*types.TipSet{mkTipSet(b)}); err != nil { + t.Fatal(err) + } +} + +func (tma *testMpoolAPI) revertBlock(t *testing.T, b *types.BlockHeader) { + t.Helper() + if err := tma.cb([]*types.TipSet{mkTipSet(b)}, nil); err != nil { + t.Fatal(err) + } +} + +func (tma *testMpoolAPI) setStateNonce(addr address.Address, v uint64) { + tma.statenonce[addr] = v +} + +func (tma *testMpoolAPI) setBalance(addr address.Address, v uint64) { + tma.balance[addr] = types.FromFil(v) +} + +func (tma *testMpoolAPI) setBalanceRaw(addr address.Address, v tbig.Int) { + tma.balance[addr] = v +} + +func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) { + tma.bmsgs[h.Cid()] = msgs +} + +func (tma *testMpoolAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { + return &types.TipSet{}, nil +} + +func (tma *testMpoolAPI) ChainTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + return &types.TipSet{}, nil +} + +func (tma *testMpoolAPI) SubscribeHeadChanges(ctx context.Context, cb func(rev, app []*types.TipSet) error) *types.TipSet { + tma.cb = cb + return tma.tipsets[0] +} + +func (tma *testMpoolAPI) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { + return cid.Undef, nil +} + +func (tma *testMpoolAPI) IsLite() bool { + return false +} + +func (tma *testMpoolAPI) PubSubPublish(context.Context, string, []byte) error { + tma.published++ + return nil +} + +func (tma *testMpoolAPI) GetActorAfter(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, error) { + // regression check for load bug + if ts == nil { + panic("GetActorAfter called with nil tipset") + } + + balance, ok := tma.balance[addr] + if !ok { + balance = tbig.NewInt(1000e6) + tma.balance[addr] = balance + } + + msgs := make([]*types.SignedMessage, 0) + for _, b := range ts.Blocks() { + for _, m := range tma.bmsgs[b.Cid()] { + if m.Message.From == addr { + msgs = append(msgs, m) + } + } + } + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Message.Nonce < msgs[j].Message.Nonce + }) + + nonce := tma.statenonce[addr] + + for _, m := range msgs { + if m.Message.Nonce != nonce { + break + } + nonce++ + } + + return &types.Actor{ + Code: builtin2.StorageMarketActorCodeID, + Nonce: nonce, + Balance: balance, + }, nil +} + +func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { + return address.Undef, fmt.Errorf("given address was not a key addr") + } + return addr, nil +} + +func (tma *testMpoolAPI) StateNetworkVersion(ctx context.Context, h abi.ChainEpoch) network.Version { + return constants.TestNetworkVersion +} + +func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { + return address.Undef, fmt.Errorf("given address was not a key addr") + } + return addr, nil +} + +func (tma *testMpoolAPI) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + return nil, tma.bmsgs[h.Cid()], nil +} + +func (tma *testMpoolAPI) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + if len(ts.Blocks()) != 1 { + panic("cant deal with multiblock tipsets in this test") + } + + bm, sm, err := tma.MessagesForBlock(ctx, ts.Blocks()[0]) + if err != nil { + return nil, err + } + + var out []types.ChainMsg + for _, m := range bm { + out = append(out, m) + } + + for _, m := range sm { + out = append(out, m) + } + + return out, nil +} + +func (tma *testMpoolAPI) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + for _, ts := range tma.tipsets { + if tsk.Equals(ts.Key()) { + return ts, nil + } + } + + return nil, fmt.Errorf("tipset not found") +} + +func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (tbig.Int, error) { + return tma.baseFee, nil +} + +func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { + tf.UnitTest(t) + + t.Helper() + n, err := mp.GetNonce(context.Background(), addr, types.EmptyTSK) + if err != nil { + t.Fatal(err) + } + + if n != val { + t.Fatalf("expected nonce of %d, got %d", val, n) + } +} + +func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) { + tf.UnitTest(t) + + t.Helper() + if err := mp.Add(context.TODO(), msg); err != nil { + t.Fatal(err) + } +} + +func newWalletAndMpool(t *testing.T, tma *testMpoolAPI) (*wallet.Wallet, *MessagePool) { + ds := datastore.NewMapDatastore() + + builder := chain.NewBuilder(t, address.Undef) + eval := builder.FakeStateEvaluator() + stmgr := statemanger.NewStateManger(builder.Store(), eval, nil, fork.NewMockFork(), nil, nil) + + mp, err := New(context.Background(), tma, stmgr, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "mptest", nil) + if err != nil { + t.Fatal(err) + } + + return newWallet(t), mp +} + +func newWallet(t *testing.T) *wallet.Wallet { + r := repo.NewInMemoryRepo() + backend, err := wallet.NewDSBackend(context.Background(), r.WalletDatastore(), r.Config().Wallet.PassphraseConfig, wallet.TestPassword) + assert.NoError(t, err) + + return wallet.New(backend) +} + +func TestMessagePool(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + tma := newTestMpoolAPI() + + w, mp := newWalletAndMpool(t, tma) + // stm: @MESSAGEPOOL_POOL_CLOSE_001 + defer mp.Close() // nolint + + a := tma.nextBlock() + + sender, err := w.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + target := mkAddress(1001) + + var msgs []*types.SignedMessage + for i := 0; i < 5; i++ { + msgs = append(msgs, mkMessage(sender, target, uint64(i), w)) + } + + tma.setStateNonce(sender, 0) + assertNonce(t, mp, sender, 0) + // stm: @MESSAGEPOOL_POOL_ADD_001 + mustAdd(t, mp, msgs[0]) + assertNonce(t, mp, sender, 1) + mustAdd(t, mp, msgs[1]) + assertNonce(t, mp, sender, 2) + + tma.setBlockMessages(a, msgs[0], msgs[1]) + + // stm: @MESSAGEPOOL_POOL_GET_MESSAGES_FOR_BLOCKS_001 + blockMsgs, err := mp.MessagesForBlocks(ctx, []*types.BlockHeader{a}) + assert.NoError(t, err) + assert.Equal(t, len(blockMsgs), 2) + tma.applyBlock(t, a) + + assertNonce(t, mp, sender, 2) + { // test verify message signature + // stm: @MESSAGEPOOL_POOL_VERIFY_MSG_SIG_001 + assert.NoError(t, mp.VerifyMsgSig(msgs[2])) + } + { // test publish message + mustAdd(t, mp, msgs[2]) + assertNonce(t, mp, sender, msgs[2].Message.Nonce+1) + pendingMsgs, _ := mp.PendingFor(ctx, sender) + assert.Equal(t, len(pendingMsgs), 1) + // stm: @MESSAGEPOOL_POOL_PUBLISH_FOR_WALLET + assert.NoError(t, mp.PublishMsgForWallet(ctx, sender)) + //// stm: @MESSAGEPOOL_POOL_PUBLISH_001 + assert.NoError(t, mp.PublishMsg(ctx, msgs[2])) + assertNonce(t, mp, sender, msgs[2].Message.Nonce+1) + } + { // test delete by address + // delete pending message with sender is message.From + // stm: @MESSAGEPOOL_POOL_DELETE_BY_ADDRESS_001 + assert.NoError(t, mp.DeleteByAdress(sender)) + // since message.From is deleted, the pending messages should be 0 + pendingMsgs, _ := mp.PendingFor(ctx, sender) + assert.Equal(t, len(pendingMsgs), 0) + } + { // test remove message + mustAdd(t, mp, msgs[2]) + pendingMsgs, _ := mp.PendingFor(ctx, sender) + assert.Equal(t, len(pendingMsgs), 1) + // stm: @MESSAGEPOOL_POOL_REMOVE_001 + mp.Remove(ctx, sender, msgs[2].Message.Nonce, false) + pendingMsgs, _ = mp.PendingFor(ctx, sender) + assert.Equal(t, len(pendingMsgs), 0) + } + + { // test push untrusted. + // stm: @MESSAGEPOOL_POOL_PUSH_UNTRUSTED + msgCID, err := mp.PushUntrusted(ctx, msgs[2]) + assert.NoError(t, err) + assert.Equal(t, msgCID, msgs[2].Cid()) + + assertNonce(t, mp, sender, msgs[2].Message.Nonce+1) + + // stm: @MESSAGEPOOL_POOL_GET_PENDING_001 + pendingMsgs01, _ := mp.Pending(ctx) + assert.Equal(t, len(pendingMsgs01), 1) + + // stm: @MESSAGEPOOL_POOL_GET_PENDING_FOR_ADDRESS_001 + pendingMsgs02, _ := mp.PendingFor(ctx, sender) + assert.Equal(t, len(pendingMsgs02), 1) + + assert.Equal(t, pendingMsgs01, pendingMsgs02) + } + { // test check messages + mustAdd(t, mp, msgs[3]) + // stm: @MESSAGEPOOL_POOL_CHECK_PENDING_MESSAGES_001 + _, err := mp.CheckPendingMessages(ctx, sender) + assert.NoError(t, err) + // stm: @MESSAGEPOOL_POOL_CHECK_MESSAGES_001 + _, err = mp.CheckMessages(ctx, []*types.MessagePrototype{ + {ValidNonce: true, Message: msgs[3].Message}, + }) + assert.NoError(t, err) + // stm:@MESSAGEPOOL_POOL_RECOVER_SIG_001 + assert.Nil(t, mp.RecoverSig(&msgs[4].Message)) + } + +} + +func TestCheckMessageBig(t *testing.T) { + tma := newTestMpoolAPI() + + w, mp := newWalletAndMpool(t, tma) + from, err := w.NewAddress(context.Background(), address.SECP256K1) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + to := mkAddress(1001) + + { + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(100), + GasPremium: types.NewInt(1), + Params: make([]byte, 41<<10), // 41KiB payload + } + + sig, err := w.WalletSign(context.Background(), from, msg.Cid().Bytes(), types.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + mustAdd(t, mp, sm) + } + + { + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(100), + GasPremium: types.NewInt(1), + Params: make([]byte, 64<<10), // 64KiB payload + } + + sig, err := w.WalletSign(context.Background(), from, msg.Cid().Bytes(), types.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + err = mp.Add(context.TODO(), sm) + assert.ErrorIs(t, err, ErrMessageTooBig) + } +} + +func TestMessagePoolMessagesInEachBlock(t *testing.T) { + tf.UnitTest(t) + + tma := newTestMpoolAPI() + + w, mp := newWalletAndMpool(t, tma) + + a := tma.nextBlock() + + sender, err := w.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + target := mkAddress(1001) + + var msgs []*types.SignedMessage + for i := 0; i < 5; i++ { + m := mkMessage(sender, target, uint64(i), w) + msgs = append(msgs, m) + mustAdd(t, mp, m) + } + + tma.setStateNonce(sender, 0) + + tma.setBlockMessages(a, msgs[0], msgs[1]) + tma.applyBlock(t, a) + tsa := mkTipSet(a) + + _, _ = mp.Pending(context.TODO()) + + // stm: @MESSAGEPOOL_POOL_SELECT_MESSAGES_001 + selm, _ := mp.SelectMessages(context.Background(), tsa, 1) + if len(selm) == 0 { + t.Fatal("should have returned the rest of the messages") + } +} + +func TestRevertMessages(t *testing.T) { + tf.UnitTest(t) + + futureDebug = true + defer func() { + futureDebug = false + }() + + tma := newTestMpoolAPI() + + w, mp := newWalletAndMpool(t, tma) + + a := tma.nextBlock() + b := tma.nextBlock() + + sender, err := w.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + target := mkAddress(1001) + + var msgs []*types.SignedMessage + for i := 0; i < 5; i++ { + msgs = append(msgs, mkMessage(sender, target, uint64(i), w)) + } + + tma.setBlockMessages(a, msgs[0]) + tma.setBlockMessages(b, msgs[1], msgs[2], msgs[3]) + + mustAdd(t, mp, msgs[0]) + mustAdd(t, mp, msgs[1]) + mustAdd(t, mp, msgs[2]) + mustAdd(t, mp, msgs[3]) + + tma.setStateNonce(sender, 0) + tma.applyBlock(t, a) + assertNonce(t, mp, sender, 4) + + tma.setStateNonce(sender, 1) + tma.applyBlock(t, b) + assertNonce(t, mp, sender, 4) + tma.setStateNonce(sender, 0) + tma.revertBlock(t, b) + + assertNonce(t, mp, sender, 4) + + p, _ := mp.Pending(context.TODO()) + fmt.Printf("%+v\n", p) + if len(p) != 3 { + t.Fatal("expected three messages in mempool") + } +} + +func TestPruningSimple(t *testing.T) { + tf.UnitTest(t) + + oldMaxNonceGap := MaxNonceGap + MaxNonceGap = 1000 + defer func() { + MaxNonceGap = oldMaxNonceGap + }() + + tma := newTestMpoolAPI() + + w, mp := newWalletAndMpool(t, tma) + + a := tma.nextBlock() + tma.applyBlock(t, a) + + sender, err := w.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + tma.setBalance(sender, 1) // in FIL + target := mkAddress(1001) + + for i := 0; i < 5; i++ { + smsg := mkMessage(sender, target, uint64(i), w) + if err := mp.Add(context.TODO(), smsg); err != nil { + t.Fatal(err) + } + } + + for i := 10; i < 50; i++ { + smsg := mkMessage(sender, target, uint64(i), w) + if err := mp.Add(context.TODO(), smsg); err != nil { + t.Fatal(err) + } + } + + mp.cfg.SizeLimitHigh = 40 + mp.cfg.SizeLimitLow = 10 + + // stm: @MESSAGEPOOL_POOL_PRUNE_001 + mp.Prune() + + msgs, _ := mp.Pending(context.TODO()) + if len(msgs) != 5 { + t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) + } +} + +func TestLoadLocal(t *testing.T) { + tf.UnitTest(t) + + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, nil, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "mptest", nil) + if err != nil { + t.Fatal(err) + } + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + msgs := make(map[cid.Cid]struct{}) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + // stm: @MESSAGEPOOL_POOL_PUSH_001 + c, err := mp.Push(context.TODO(), m) + if err != nil { + t.Fatal(err) + } + msgs[c] = struct{}{} + } + err = mp.Close() + if err != nil { + t.Fatal(err) + } + + mp, err = New(context.Background(), tma, nil, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "mptest", nil) + if err != nil { + t.Fatal(err) + } + + pmsgs, _ := mp.Pending(context.TODO()) + if len(msgs) != len(pmsgs) { + t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) + } + + for _, m := range pmsgs { + c := m.Cid() + _, ok := msgs[c] + if !ok { + t.Fatal("unknown message") + } + + delete(msgs, c) + } + + if len(msgs) > 0 { + t.Fatalf("not all messages were laoded; missing %d messages", len(msgs)) + } +} + +func TestClearAll(t *testing.T) { + tf.UnitTest(t) + + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, nil, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "mptest", nil) + if err != nil { + t.Fatal(err) + } + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(context.TODO(), m) + if err != nil { + t.Fatal(err) + } + } + + for i := 0; i < 10; i++ { + m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + // stm: @MESSAGEPOOL_POOL_CLEAR_001 + mp.Clear(context.Background(), true) + + pending, _ := mp.Pending(context.TODO()) + if len(pending) > 0 { + t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) + } +} + +func TestClearNonLocal(t *testing.T) { + tf.UnitTest(t) + + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, nil, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "mptest", nil) + if err != nil { + t.Fatal(err) + } + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(context.TODO(), m) + if err != nil { + t.Fatal(err) + } + } + + for i := 0; i < 10; i++ { + m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + mp.Clear(context.Background(), false) + + pending, _ := mp.Pending(context.TODO()) + if len(pending) != 10 { + t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) + } + + for _, m := range pending { + if m.Message.From != a1 { + t.Fatalf("expected message from %s but got one from %s instead", a1, m.Message.From) + } + } +} + +func TestUpdates(t *testing.T) { + tf.UnitTest(t) + + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, nil, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "mptest", nil) + if err != nil { + t.Fatal(err) + } + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + ch, err := mp.Updates(ctx) + if err != nil { + t.Fatal(err) + } + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(context.TODO(), m) + if err != nil { + t.Fatal(err) + } + + _, ok := <-ch + if !ok { + t.Fatal("expected update, but got a closed channel instead") + } + } + + err = mp.Close() + if err != nil { + t.Fatal(err) + } + + _, ok := <-ch + if ok { + t.Fatal("expected closed channel, but got an update instead") + } +} + +func TestCapGasFee(t *testing.T) { + t.Run("use default maxfee", func(t *testing.T) { + msg := &types.Message{ + GasLimit: 100_000_000, + GasFeeCap: abi.NewTokenAmount(100_000_000), + GasPremium: abi.NewTokenAmount(100_000), + } + CapGasFee(func() (abi.TokenAmount, error) { + return abi.NewTokenAmount(100_000_000_000), nil + }, msg, nil) + assert.Equal(t, msg.GasFeeCap.Int64(), int64(1000)) + assert.Equal(t, msg.GasPremium.Int.Int64(), int64(1000)) + }) + + t.Run("use spec maxfee", func(t *testing.T) { + msg := &types.Message{ + GasLimit: 100_000_000, + GasFeeCap: abi.NewTokenAmount(100_000_000), + GasPremium: abi.NewTokenAmount(100_000), + } + CapGasFee(nil, msg, &types.MessageSendSpec{MaxFee: abi.NewTokenAmount(100_000_000_000)}) + assert.Equal(t, msg.GasFeeCap.Int64(), int64(1000)) + assert.Equal(t, msg.GasPremium.Int.Int64(), int64(1000)) + }) + + t.Run("use smaller feecap value when fee is enough", func(t *testing.T) { + msg := &types.Message{ + GasLimit: 100_000_000, + GasFeeCap: abi.NewTokenAmount(100_000), + GasPremium: abi.NewTokenAmount(100_000_000), + } + CapGasFee(nil, msg, &types.MessageSendSpec{MaxFee: abi.NewTokenAmount(100_000_000_000_000)}) + assert.Equal(t, msg.GasFeeCap.Int64(), int64(100_000)) + assert.Equal(t, msg.GasPremium.Int.Int64(), int64(100_000)) + }) +} diff --git a/pkg/messagepool/messagesigner.go b/pkg/messagepool/messagesigner.go new file mode 100644 index 0000000000..282089eacd --- /dev/null +++ b/pkg/messagepool/messagesigner.go @@ -0,0 +1,158 @@ +package messagepool + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync" + + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" +) + +const dsKeyActorNonce = "ActorNextNonce" + +type MpoolNonceAPI interface { + GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error) + GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) +} + +// MessageSigner keeps track of nonces per address, and increments the nonce +// when signing a message +type MessageSigner struct { + wallet wallet.WalletIntersection + lk sync.Mutex + mpool MpoolNonceAPI + ds datastore.Batching +} + +func NewMessageSigner(wallet wallet.WalletIntersection, mpool MpoolNonceAPI, ds datastore.Batching) *MessageSigner { + ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/")) + return &MessageSigner{ + wallet: wallet, + mpool: mpool, + ds: ds, + } +} + +// SignMessage increments the nonce for the message From address, and signs +// the message +func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) { + ms.lk.Lock() + defer ms.lk.Unlock() + + // Get the next message nonce + nonce, err := ms.nextNonce(ctx, msg.From) + if err != nil { + return nil, fmt.Errorf("failed to create nonce: %w", err) + } + + // Sign the message with the nonce + msg.Nonce = nonce + + mb, err := msg.ToStorageBlock() + if err != nil { + return nil, fmt.Errorf("serializing message: %w", err) + } + + sig, err := ms.wallet.WalletSign(ctx, msg.From, mb.Cid().Bytes(), types.MsgMeta{ + Type: types.MTChainMsg, + Extra: mb.RawData(), + }) + if err != nil { + return nil, fmt.Errorf("failed to sign message: %w", err) + } + + // Callback with the signed message + smsg := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + err = cb(smsg) + if err != nil { + return nil, err + } + + // If the callback executed successfully, write the nonce to the datastore + if err := ms.saveNonce(ctx, msg.From, nonce); err != nil { + return nil, fmt.Errorf("failed to save nonce: %w", err) + } + + return smsg, nil +} + +// nextNonce gets the next nonce for the given address. +// If there is no nonce in the datastore, gets the nonce from the message pool. +func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (uint64, error) { + // Nonces used to be created by the mempool and we need to support nodes + // that have mempool nonces, so first check the mempool for a nonce for + // this address. Note that the mempool returns the actor state's nonce + // by default. + nonce, err := ms.mpool.GetNonce(ctx, addr, types.EmptyTSK) + if err != nil { + return 0, fmt.Errorf("failed to get nonce from mempool: %w", err) + } + + // Get the next nonce for this address from the datastore + addrNonceKey := ms.dstoreKey(addr) + dsNonceBytes, err := ms.ds.Get(ctx, addrNonceKey) + + switch { + case errors.Is(err, datastore.ErrNotFound): + // If a nonce for this address hasn't yet been created in the + // datastore, just use the nonce from the mempool + return nonce, nil + + case err != nil: + return 0, fmt.Errorf("failed to get nonce from datastore: %w", err) + + default: + // There is a nonce in the datastore, so unmarshall it + maj, dsNonce, err := cbg.CborReadHeader(bytes.NewReader(dsNonceBytes)) + if err != nil { + return 0, fmt.Errorf("failed to parse nonce from datastore: %w", err) + } + if maj != cbg.MajUnsignedInt { + return 0, fmt.Errorf("bad cbor type parsing nonce from datastore") + } + + // The message pool nonce should be <= than the datastore nonce + if nonce <= dsNonce { + nonce = dsNonce + } else { + log.Warnf("mempool nonce was larger than datastore nonce (%d > %d)", nonce, dsNonce) + } + + return nonce, nil + } +} + +// saveNonce increments the nonce for this address and writes it to the +// datastore +func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, nonce uint64) error { + // Increment the nonce + nonce++ + + // Write the nonce to the datastore + addrNonceKey := ms.dstoreKey(addr) + buf := bytes.Buffer{} + _, err := buf.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, nonce)) + if err != nil { + return fmt.Errorf("failed to marshall nonce: %w", err) + } + err = ms.ds.Put(ctx, addrNonceKey, buf.Bytes()) + if err != nil { + return fmt.Errorf("failed to write nonce to datastore: %w", err) + } + return nil +} + +func (ms *MessageSigner) dstoreKey(addr address.Address) datastore.Key { + return datastore.KeyWithNamespaces([]string{dsKeyActorNonce, addr.String()}) +} diff --git a/pkg/messagepool/messagesigner_test.go b/pkg/messagepool/messagesigner_test.go new file mode 100644 index 0000000000..18ef423112 --- /dev/null +++ b/pkg/messagepool/messagesigner_test.go @@ -0,0 +1,216 @@ +// stm: #unit +package messagepool + +import ( + "context" + "fmt" + "sync" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + "github.com/filecoin-project/venus/pkg/repo" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type mockMpool struct { + lk sync.RWMutex + nonces map[address.Address]uint64 +} + +var _ MpoolNonceAPI = (*mockMpool)(nil) + +func newMockMpool() *mockMpool { + return &mockMpool{nonces: make(map[address.Address]uint64)} +} + +func (mp *mockMpool) setNonce(addr address.Address, nonce uint64) { + mp.lk.Lock() + defer mp.lk.Unlock() + + mp.nonces[addr] = nonce +} + +func (mp *mockMpool) GetNonce(_ context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) { + mp.lk.RLock() + defer mp.lk.RUnlock() + + return mp.nonces[addr], nil +} + +func (mp *mockMpool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) { + panic("don't use it") +} + +func TestMessageSignerSignMessage(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + r := repo.NewInMemoryRepo() + backend, err := wallet.NewDSBackend(ctx, r.WalletDatastore(), r.Config().Wallet.PassphraseConfig, wallet.TestPassword) + assert.NoError(t, err) + + w := wallet.New(backend) + + from1, err := w.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + from2, err := w.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + to1, err := w.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + to2, err := w.NewAddress(ctx, address.SECP256K1) + require.NoError(t, err) + + type msgSpec struct { + msg *types.Message + mpoolNonce [1]uint64 + expNonce uint64 + cbErr error + } + tests := []struct { + name string + msgs []msgSpec + }{{ + // No nonce yet in datastore + name: "no nonce yet", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 0, + }}, + }, { + // Get nonce value of zero from mpool + name: "mpool nonce zero", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + mpoolNonce: [1]uint64{0}, + expNonce: 0, + }}, + }, { + // Get non-zero nonce value from mpool + name: "mpool nonce set", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + mpoolNonce: [1]uint64{5}, + expNonce: 5, + }, { + msg: &types.Message{ + To: to1, + From: from1, + }, + // Should adjust datastore nonce because mpool nonce is higher + mpoolNonce: [1]uint64{10}, + expNonce: 10, + }}, + }, { + // Nonce should increment independently for each address + name: "nonce increments per address", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 0, + }, { + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 1, + }, { + msg: &types.Message{ + To: to2, + From: from2, + }, + mpoolNonce: [1]uint64{5}, + expNonce: 5, + }, { + msg: &types.Message{ + To: to2, + From: from2, + }, + expNonce: 6, + }, { + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 2, + }}, + }, { + name: "recover from callback error", + msgs: []msgSpec{{ + // No nonce yet in datastore + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 0, + }, { + // Increment nonce + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 1, + }, { + // Callback returns error + msg: &types.Message{ + To: to1, + From: from1, + }, + cbErr: fmt.Errorf("err"), + }, { + // Callback successful, should increment nonce in datastore + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 2, + }}, + }} + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mpool := newMockMpool() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + // stm: @MESSAGEPOOL_SIGNER_NEW_SIGNER_001 + ms := NewMessageSigner(w, mpool, ds) + + for _, m := range tt.msgs { + if len(m.mpoolNonce) == 1 { + mpool.setNonce(m.msg.From, m.mpoolNonce[0]) + } + merr := m.cbErr + // stm: @MESSAGEPOOL_SIGNER_SIGN_MESSAGE_001 + smsg, err := ms.SignMessage(ctx, m.msg, func(message *types.SignedMessage) error { + return merr + }) + + if m.cbErr != nil { + require.Error(t, err) + require.Nil(t, smsg) + } else { + require.NoError(t, err) + require.Equal(t, m.expNonce, smsg.Message.Nonce) + } + } + }) + } +} diff --git a/pkg/messagepool/provider.go b/pkg/messagepool/provider.go new file mode 100644 index 0000000000..514609f6aa --- /dev/null +++ b/pkg/messagepool/provider.go @@ -0,0 +1,167 @@ +package messagepool + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + tbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/filecoin-project/venus/venus-shared/actors/policy" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + pubsub "github.com/libp2p/go-libp2p-pubsub" +) + +var ( + HeadChangeCoalesceMinDelay = 2 * time.Second + HeadChangeCoalesceMaxDelay = 6 * time.Second + HeadChangeCoalesceMergeInterval = time.Second +) + +type Provider interface { + ChainHead(ctx context.Context) (*types.TipSet, error) + ChainTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + SubscribeHeadChanges(context.Context, func(rev, app []*types.TipSet) error) *types.TipSet + PutMessage(context.Context, types.ChainMsg) (cid.Cid, error) + PubSubPublish(context.Context, string, []byte) error + GetActorAfter(context.Context, address.Address, *types.TipSet) (*types.Actor, error) + StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error) + StateNetworkVersion(context.Context, abi.ChainEpoch) network.Version + StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error) + MessagesForBlock(context.Context, *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + MessagesForTipset(context.Context, *types.TipSet) ([]types.ChainMsg, error) + LoadTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (tbig.Int, error) + IsLite() bool +} + +type mpoolProvider struct { + stmgr *statemanger.Stmgr + sm *chain.Store + cms *chain.MessageStore + config *config.NetworkParamsConfig + ps *pubsub.PubSub + + lite MpoolNonceAPI +} + +var _ Provider = (*mpoolProvider)(nil) + +func NewProvider(sm *statemanger.Stmgr, cs *chain.Store, cms *chain.MessageStore, cfg *config.NetworkParamsConfig, ps *pubsub.PubSub) Provider { + return &mpoolProvider{ + stmgr: sm, + sm: cs, + cms: cms, + config: cfg, + ps: ps, + } +} + +func NewProviderLite(sm *chain.Store, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider { + return &mpoolProvider{sm: sm, ps: ps, lite: noncer} +} + +func (mpp *mpoolProvider) IsLite() bool { + return mpp.lite != nil +} + +func (mpp *mpoolProvider) SubscribeHeadChanges(ctx context.Context, cb func(rev, app []*types.TipSet) error) *types.TipSet { + mpp.sm.SubscribeHeadChanges( + chain.WrapHeadChangeCoalescer( + cb, + HeadChangeCoalesceMinDelay, + HeadChangeCoalesceMaxDelay, + HeadChangeCoalesceMergeInterval, + )) + return mpp.sm.GetHead() +} + +func (mpp *mpoolProvider) ChainHead(context.Context) (*types.TipSet, error) { + return mpp.sm.GetHead(), nil +} + +func (mpp *mpoolProvider) ChainTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + return mpp.sm.GetTipSet(ctx, key) +} + +func (mpp *mpoolProvider) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { + return mpp.sm.PutMessage(ctx, m) +} + +func (mpp *mpoolProvider) PubSubPublish(ctx context.Context, k string, v []byte) error { + return mpp.ps.Publish(k, v) // nolint +} + +func (mpp *mpoolProvider) GetActorAfter(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, error) { + if mpp.IsLite() { + n, err := mpp.lite.GetNonce(ctx, addr, ts.Key()) + if err != nil { + return nil, fmt.Errorf("getting nonce over lite: %w", err) + } + a, err := mpp.lite.GetActor(ctx, addr, ts.Key()) + if err != nil { + return nil, fmt.Errorf("getting actor over lite: %w", err) + } + a.Nonce = n + return a, nil + } + + st, err := mpp.stmgr.TipsetState(ctx, ts) + if err != nil { + return nil, fmt.Errorf("computing tipset state for GetActor: %v", err) + } + + act, found, err := st.GetActor(ctx, addr) + if !found { + err = errors.New("actor not found") + } + + return act, err +} + +func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + var err error + if ts.Height() > policy.ChainFinality { + ts, err = mpp.sm.GetTipSetByHeight(ctx, ts, ts.Height()-policy.ChainFinality, true) + if err != nil { + return address.Undef, fmt.Errorf("failed to load lookback tipset: %w", err) + } + } + return mpp.stmgr.ResolveToKeyAddress(ctx, addr, ts) +} + +func (mpp *mpoolProvider) StateNetworkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { + return mpp.stmgr.GetNetworkVersion(ctx, height) +} + +func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + return mpp.stmgr.ResolveToKeyAddress(ctx, addr, ts) +} + +func (mpp *mpoolProvider) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + secpMsgs, blsMsgs, err := mpp.cms.LoadMetaMessages(context.TODO(), h.Messages) + return blsMsgs, secpMsgs, err +} + +func (mpp *mpoolProvider) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + return mpp.cms.MessagesForTipset(ts) +} + +func (mpp *mpoolProvider) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return mpp.sm.GetTipSet(ctx, tsk) +} + +func (mpp *mpoolProvider) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (tbig.Int, error) { + baseFee, err := mpp.cms.ComputeBaseFee(ctx, ts, mpp.config.ForkUpgradeParam) + if err != nil { + return tbig.NewInt(0), fmt.Errorf("computing base fee at %s: %v", ts, err) + } + return baseFee, nil +} diff --git a/pkg/messagepool/pruning.go b/pkg/messagepool/pruning.go new file mode 100644 index 0000000000..0960f97ac5 --- /dev/null +++ b/pkg/messagepool/pruning.go @@ -0,0 +1,119 @@ +package messagepool + +import ( + "context" + "errors" + "fmt" + "sort" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +func (mp *MessagePool) pruneExcessMessages() error { + mp.curTSLk.Lock() + ts := mp.curTS + mp.curTSLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + if mp.currentSize < mp.cfg.SizeLimitHigh { + return nil + } + + select { + case <-mp.pruneCooldown: + err := mp.pruneMessages(context.TODO(), ts) + go func() { + time.Sleep(mp.cfg.PruneCooldown) + mp.pruneCooldown <- struct{}{} + }() + return err + default: + return errors.New("cannot prune before cooldown") + } +} + +func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) error { + start := time.Now() + defer func() { + log.Infof("message pruning took %s", time.Since(start)) + }() + + baseFee, err := mp.api.ChainComputeBaseFee(ctx, ts) + if err != nil { + return fmt.Errorf("computing basefee: %v", err) + } + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) + + pending, _ := mp.getPendingMessages(ctx, ts, ts) + + // protected actors -- not pruned + protected := make(map[address.Address]struct{}) + + // we never prune priority addresses + for _, actor := range mp.cfg.PriorityAddrs { + pk, err := mp.resolveToKey(ctx, actor) + if err != nil { + log.Debugf("pruneMessages failed to resolve priority address: %s", err) + } + + protected[pk] = struct{}{} + } + + // we also never prune locally published messages + mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) { + protected[actor] = struct{}{} + }) + + // Collect all messages to track which ones to remove and create chains for block inclusion + pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize) + keepCount := 0 + + var chains []*msgChain + for actor, mset := range pending { + // we never prune protected actors + _, keep := protected[actor] + if keep { + keepCount += len(mset) + continue + } + + // not a protected actor, track the messages and create chains + for _, m := range mset { + pruneMsgs[m.Message.Cid()] = m + } + actorChains := mp.createMessageChains(ctx, actor, mset, baseFeeLowerBound, ts) + chains = append(chains, actorChains...) + } + + // Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + // Keep messages (remove them from pruneMsgs) from chains while we are under the low water mark + loWaterMark := mp.cfg.SizeLimitLow +keepLoop: + for _, chain := range chains { + for _, m := range chain.msgs { + if keepCount < loWaterMark { + delete(pruneMsgs, m.Message.Cid()) + keepCount++ + } else { + break keepLoop + } + } + } + + // and remove all messages that are still in pruneMsgs after processing the chains + log.Infof("Pruning %d messages", len(pruneMsgs)) + for _, m := range pruneMsgs { + mp.remove(ctx, m.Message.From, m.Message.Nonce, false) + } + + return nil +} diff --git a/pkg/messagepool/repub.go b/pkg/messagepool/repub.go new file mode 100644 index 0000000000..901dc073e0 --- /dev/null +++ b/pkg/messagepool/repub.go @@ -0,0 +1,181 @@ +package messagepool + +import ( + "bytes" + "context" + "fmt" + "sort" + "time" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/messagepool/gasguess" + "github.com/filecoin-project/venus/venus-shared/types" +) + +const repubMsgLimit = 30 + +var RepublishBatchDelay = 100 * time.Millisecond + +func (mp *MessagePool) republishPendingMessages(ctx context.Context) error { + mp.curTSLk.Lock() + ts := mp.curTS + + baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) + if err != nil { + mp.curTSLk.Unlock() + return fmt.Errorf("computing basefee: %v", err) + } + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) + + pending := make(map[address.Address]map[uint64]*types.SignedMessage) + mp.lk.Lock() + mp.republished = nil // clear this to avoid races triggering an early republish + for actor := range mp.localAddrs { + mset, ok := mp.pending[actor] + if !ok { + continue + } + if len(mset.msgs) == 0 { + continue + } + // we need to copy this while holding the lock to avoid races with concurrent modification + pend := make(map[uint64]*types.SignedMessage, len(mset.msgs)) + for nonce, m := range mset.msgs { + pend[nonce] = m + } + pending[actor] = pend + } + mp.lk.Unlock() + mp.curTSLk.Unlock() + + if len(pending) == 0 { + return nil + } + + var chains []*msgChain + for actor, mset := range pending { + // We use the baseFee lower bound for createChange so that we optimistically include + // chains that might become profitable in the next 20 blocks. + // We still check the lowerBound condition for individual messages so that we don't send + // messages that will be rejected by the mpool spam protector, so this is safe to do. + next := mp.createMessageChains(ctx, actor, mset, baseFeeLowerBound, ts) + chains = append(chains, next...) + } + + if len(chains) == 0 { + return nil + } + + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + gasLimit := int64(constants.BlockGasLimit) + minGas := int64(gasguess.MinGas) + var msgs []*types.SignedMessage + +LOOP: + for i := 0; i < len(chains); { + chain := chains[i] + + // we can exceed this if we have picked (some) longer chain already + if len(msgs) > repubMsgLimit { + break + } + + // there is not enough gas for any message + if gasLimit <= minGas { + break + } + + // has the chain been invalidated? + if !chain.valid { + i++ + continue + } + + // does it fit in a block? + if chain.gasLimit <= gasLimit { + // check the baseFee lower bound -- only republish messages that can be included in the chain + // within the next 20 blocks. + for _, m := range chain.msgs { + if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) { + chain.Invalidate() + continue LOOP + } + gasLimit -= m.Message.GasLimit + msgs = append(msgs, m) + } + + // we processed the whole chain, advance + i++ + continue + } + + // we can't fit the current chain but there is gas to spare + // trim it and push it down + chain.Trim(gasLimit, repubMsgLimit, mp, baseFee) + for j := i; j < len(chains)-1; j++ { + if chains[j].Before(chains[j+1]) { + break + } + chains[j], chains[j+1] = chains[j+1], chains[j] + } + } + + count := 0 + if len(msgs) > repubMsgLimit { + msgs = msgs[:repubMsgLimit] + } + + log.Infof("republishing %d messages", len(msgs)) + for _, m := range msgs { + buf := new(bytes.Buffer) + err := m.MarshalCBOR(buf) + if err != nil { + return fmt.Errorf("cannot serialize message: %v", err) + } + + err = mp.api.PubSubPublish(ctx, types.MessageTopic(mp.netName), buf.Bytes()) + if err != nil { + return fmt.Errorf("cannot publish: %v", err) + } + + count++ + + if count < len(msgs) { + // this delay is here to encourage the pubsub subsystem to process the messages serially + // and avoid creating nonce gaps because of concurrent validation. + time.Sleep(RepublishBatchDelay) + } + } + + if len(msgs) > 0 { + mp.journal.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} { + msgsEv := make([]MessagePoolEvtMessage, 0, len(msgs)) + for _, m := range msgs { + msgsEv = append(msgsEv, MessagePoolEvtMessage{Message: m.Message, CID: m.Cid()}) + } + return MessagePoolEvt{ + Action: "repub", + Messages: msgsEv, + } + }) + } + + // track most recently republished messages + republished := make(map[cid.Cid]struct{}) + for _, m := range msgs[:count] { + republished[m.Cid()] = struct{}{} + } + + mp.lk.Lock() + // update the republished set so that we can trigger early republish from head changes + mp.republished = republished + mp.lk.Unlock() + + return nil +} diff --git a/pkg/messagepool/repub_test.go b/pkg/messagepool/repub_test.go new file mode 100644 index 0000000000..3a6b4d5b4a --- /dev/null +++ b/pkg/messagepool/repub_test.go @@ -0,0 +1,70 @@ +package messagepool + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-address" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/ipfs/go-datastore" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/messagepool/gasguess" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestRepubMessages(t *testing.T) { + tf.UnitTest(t) + + oldRepublishBatchDelay := RepublishBatchDelay + RepublishBatchDelay = time.Microsecond + defer func() { + RepublishBatchDelay = oldRepublishBatchDelay + }() + + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, nil, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "mptest", nil) + if err != nil { + t.Fatal(err) + } + + // the actors + ctx := context.Background() + w1 := newWallet(t) + a1, err := w1.NewAddress(ctx, address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(ctx, address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(context.TODO(), m) + if err != nil { + t.Fatal(err) + } + } + + if tma.published != 10 { + t.Fatalf("expected to have published 10 messages, but got %d instead", tma.published) + } + + mp.repubTrigger <- struct{}{} + time.Sleep(100 * time.Millisecond) + + if tma.published != 20 { + t.Fatalf("expected to have published 20 messages, but got %d instead", tma.published) + } +} diff --git a/pkg/messagepool/selection.go b/pkg/messagepool/selection.go new file mode 100644 index 0000000000..024b0e55e8 --- /dev/null +++ b/pkg/messagepool/selection.go @@ -0,0 +1,1070 @@ +package messagepool + +import ( + "context" + "fmt" + "math/big" + "math/rand" + "sort" + "time" + + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + tbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/messagepool/gasguess" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var bigBlockGasLimit = big.NewInt(constants.BlockGasLimit) + +const MaxBlocks = 15 + +type msgChain struct { + msgs []*types.SignedMessage + gasReward *big.Int + gasLimit int64 + gasPerf float64 + effPerf float64 + bp float64 + parentOffset float64 + valid bool + merged bool + next *msgChain + prev *msgChain + sigType crypto.SigType +} + +func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + // Load messages for the target tipset; if it is the same as the current tipset in the mpool + // then this is just the pending messages + pending, err := mp.getPendingMessages(ctx, mp.curTS, ts) + if err != nil { + return nil, err + } + // if the ticket quality is high enough that the first block has higher probability + // than any other block, then we don't bother with optimal selection because the + // first block will always have higher effective performance + var sm *selectedMessages + if tq > 0.84 { + sm, err = mp.selectMessagesGreedy(ctx, mp.curTS, ts, pending) + } else { + sm, err = mp.selectMessagesOptimal(ctx, mp.curTS, ts, tq, pending) + } + + if err != nil { + return nil, err + } + + if sm == nil { + return nil, nil + } + + // one last sanity check + if len(sm.msgs) > constants.BlockMessageLimit { + log.Errorf("message selection chose too many messages %d > %d", len(sm.msgs), constants.BlockMessageLimit) + sm.msgs = sm.msgs[:constants.BlockMessageLimit] + } + + return sm.msgs, nil +} + +type selectedMessages struct { + msgs []*types.SignedMessage + gasLimit int64 + secpLimit int + blsLimit int +} + +// returns false if chain can't be added due to block constraints +func (sm *selectedMessages) tryToAdd(mc *msgChain) bool { + l := len(mc.msgs) + + if constants.BlockMessageLimit < l+len(sm.msgs) || sm.gasLimit < mc.gasLimit { + return false + } + + if mc.sigType == crypto.SigTypeBLS { + if sm.blsLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.blsLimit -= l + sm.gasLimit -= mc.gasLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if sm.secpLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.secpLimit -= l + sm.gasLimit -= mc.gasLimit + } + + // don't add the weird sigType msg, but otherwise proceed + return true +} + +// returns false if messages can't be added due to block constraints +// will trim / invalidate chain as appropriate +func (sm *selectedMessages) tryToAddWithDeps(mc *msgChain, mp *MessagePool, baseFee types.BigInt) bool { + // compute the dependencies that must be merged and the gas limit including deps + chainGasLimit := mc.gasLimit + chainMsgLimit := len(mc.msgs) + depGasLimit := int64(0) + depMsgLimit := 0 + smMsgLimit := 0 + + if mc.sigType == crypto.SigTypeBLS { + smMsgLimit = sm.blsLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + smMsgLimit = sm.secpLimit + } else { + return false + } + + if smMsgLimit > constants.BlockMessageLimit-len(sm.msgs) { + smMsgLimit = constants.BlockMessageLimit - len(sm.msgs) + } + + var chainDeps []*msgChain + for curChain := mc.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { + chainDeps = append(chainDeps, curChain) + chainGasLimit += curChain.gasLimit + chainMsgLimit += len(curChain.msgs) + depGasLimit += curChain.gasLimit + depMsgLimit += len(curChain.msgs) + } + + // the chain doesn't fit as-is, so trim / invalidate it and return false + if chainGasLimit > sm.gasLimit || chainMsgLimit > smMsgLimit { + + // it doesn't all fit; now we have to take into account the dependent chains before + // making a decision about trimming or invalidating. + // if the dependencies exceed the block limits, then we must invalidate the chain + // as it can never be included. + // Otherwise we can just trim and continue + if depGasLimit > sm.gasLimit || depMsgLimit >= smMsgLimit { + mc.Invalidate() + } else { + // dependencies fit, just trim it + mc.Trim(sm.gasLimit-depGasLimit, smMsgLimit-depMsgLimit, mp, baseFee) + } + + return false + } + + // the chain fits! include it together with all dependencies + for i := len(chainDeps) - 1; i >= 0; i-- { + curChain := chainDeps[i] + curChain.merged = true + sm.msgs = append(sm.msgs, curChain.msgs...) + } + + mc.merged = true + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.gasLimit -= chainGasLimit + + if mc.sigType == crypto.SigTypeBLS { + sm.blsLimit -= chainMsgLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + sm.secpLimit -= chainMsgLimit + } + + return true +} + +func (sm *selectedMessages) trimChain(mc *msgChain, mp *MessagePool, baseFee types.BigInt) { + msgLimit := constants.BlockMessageLimit - len(sm.msgs) + if mc.sigType == crypto.SigTypeBLS { + if msgLimit > sm.blsLimit { + msgLimit = sm.blsLimit + } + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if msgLimit > sm.secpLimit { + msgLimit = sm.secpLimit + } + } + + if mc.gasLimit > sm.gasLimit || len(mc.msgs) > msgLimit { + mc.Trim(sm.gasLimit, msgLimit, mp, baseFee) + } +} + +func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTS, ts *types.TipSet, tq float64, pending map[address.Address]map[uint64]*types.SignedMessage) (*selectedMessages, error) { + start := time.Now() + + baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) + if err != nil { + return nil, fmt.Errorf("computing basefee: %w", err) + } + + if len(pending) == 0 { + return nil, nil + } + + // defer only here so if we have no pending messages we don't spam + defer func() { + log.Infow("message selection done", "took", time.Since(start)) + }() + + // 0b. Select all priority messages that fit in the block + minGas := int64(gasguess.MinGas) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + + // have we filled the block? + if result.gasLimit < minGas || len(result.msgs) >= constants.BlockMessageLimit { + return result, nil + } + + // 1. Create a list of dependent message chains with maximal gas reward per limit consumed + startChains := time.Now() + var chains []*msgChain + for actor, mset := range pending { + next := mp.createMessageChains(ctx, actor, mset, baseFee, ts) + chains = append(chains, next...) + } + if dt := time.Since(startChains); dt > time.Millisecond { + log.Infow("create message chains done", "took", dt) + } + + // 2. Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + if len(chains) != 0 && chains[0].gasPerf < 0 { + log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf) + return result, nil + } + + // 3. Partition chains into blocks (without trimming) + // we use the full blockGasLimit (as opposed to the residual gas limit from the + // priority message selection) as we have to account for what other block providers are doing + nextChain := 0 + partitions := make([][]*msgChain, MaxBlocks) + for i := 0; i < MaxBlocks && nextChain < len(chains); i++ { + gasLimit := int64(constants.BlockGasLimit) + msgLimit := constants.BlockMessageLimit + for nextChain < len(chains) { + chain := chains[nextChain] + nextChain++ + partitions[i] = append(partitions[i], chain) + gasLimit -= chain.gasLimit + msgLimit -= len(chain.msgs) + if gasLimit < minGas || msgLimit <= 0 { + break + } + } + + } + + // 4. Compute effective performance for each chain, based on the partition they fall into + // The effective performance is the gasPerf of the chain * block probability + blockProb := mp.blockProbabilities(tq) + effChains := 0 + for i := 0; i < MaxBlocks; i++ { + for _, chain := range partitions[i] { + chain.SetEffectivePerf(blockProb[i]) + } + effChains += len(partitions[i]) + } + + // nullify the effective performance of chains that don't fit in any partition + for _, chain := range chains[effChains:] { + chain.SetNullEffectivePerf() + } + + // 5. Resort the chains based on effective performance + sort.Slice(chains, func(i, j int) bool { + return chains[i].BeforeEffective(chains[j]) + }) + + // 6. Merge the head chains to produce the list of messages selected for inclusion + // subject to the residual block limits + // When a chain is merged in, all its previous dependent chains *must* also be + // merged in or we'll have a broken block + startMerge := time.Now() + last := len(chains) + for i, chain := range chains { + // did we run out of performing chains? + if chain.gasPerf < 0 { + break + } + + // has it already been merged? + if chain.merged { + continue + } + + if result.tryToAddWithDeps(chain, mp, baseFee) { + // adjust the effective performance for all subsequent chains + if next := chain.next; next != nil && next.effPerf > 0 { + next.effPerf += next.parentOffset + for next = next.next; next != nil && next.effPerf > 0; next = next.next { + next.setEffPerf() + } + } + + // re-sort to account for already merged chains and effective performance adjustments + // the sort *must* be stable or we end up getting negative gasPerfs pushed up. + sort.SliceStable(chains[i+1:], func(i, j int) bool { + return chains[i].BeforeEffective(chains[j]) + }) + + continue + } + + // we can't fit this chain and its dependencies because of block limits -- we are + // at the edge + last = i + break + } + if dt := time.Since(startMerge); dt > time.Millisecond { + log.Infow("merge message chains done", "took", dt) + } + + // 7. We have reached the edge of what can fit wholesale; if we still hae available + // gasLimit to pack some more chains, then trim the last chain and push it down. + // Trimming invalidates subsequent dependent chains so that they can't be selected + // as their dependency cannot be (fully) included. + // We do this in a loop because the blocker might have been inordinately large and + // we might have to do it multiple times to satisfy tail packing + startTail := time.Now() +tailLoop: + for result.gasLimit >= minGas && last < len(chains) { + + if !chains[last].valid { + last++ + continue tailLoop + } + + // trim if necessary + result.trimChain(chains[last], mp, baseFee) + + // push down if it hasn't been invalidated + if chains[last].valid { + for i := last; i < len(chains)-1; i++ { + if chains[i].BeforeEffective(chains[i+1]) { + break + } + chains[i], chains[i+1] = chains[i+1], chains[i] + } + } + + // select the next (valid and fitting) chain and its dependencies for inclusion + for _, chain := range chains[last:] { + // has the chain been invalidated? + if !chain.valid { + continue + } + + // has it already been merged? + if chain.merged { + continue + } + + // if gasPerf < 0 we have no more profitable chains + if chain.gasPerf < 0 { + break tailLoop + } + + if result.tryToAddWithDeps(chain, mp, baseFee) { + continue + } + + continue tailLoop + } + + // the merge loop ended after processing all the chains and we we probably have still + // gas to spare; end the loop. + break + } + if dt := time.Since(startTail); dt > time.Millisecond { + log.Infow("pack tail chains done", "took", dt) + } + + // if we have room to spare, pick some random (non-negative) chains to fill the block + // we pick randomly so that we minimize the probability of duplication among all block producers + if result.gasLimit >= minGas && len(result.msgs) <= constants.BlockMessageLimit { + preRandomLength := len(result.msgs) + + startRandom := time.Now() + shuffleChains(chains) + + for _, chain := range chains { + // have we filled the block + if result.gasLimit < minGas || len(result.msgs) >= constants.BlockMessageLimit { + break + } + + // has it been merged or invalidated? + if chain.merged || !chain.valid { + continue + } + + // is it negative? + if chain.gasPerf < 0 { + continue + } + + if result.tryToAddWithDeps(chain, mp, baseFee) { + continue + } + + if chain.valid { + // chain got trimmed on the previous call to tryToAddWithDeps, can now be included + result.tryToAddWithDeps(chain, mp, baseFee) + continue + } + } + + if dt := time.Since(startRandom); dt > time.Millisecond { + log.Infow("pack random tail chains done", "took", dt) + } + + if len(result.msgs) != preRandomLength { + log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection", + len(result.msgs)-preRandomLength) + } + } + + return result, nil +} + +func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTS, ts *types.TipSet, pending map[address.Address]map[uint64]*types.SignedMessage) (*selectedMessages, error) { + start := time.Now() + + baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) + if err != nil { + return nil, fmt.Errorf("computing basefee: %w", err) + } + + if len(pending) == 0 { + return nil, nil + } + + // defer only here so if we have no pending messages we don't spam + defer func() { + log.Infow("message selection done", "took", time.Since(start)) + }() + + // 0b. Select all priority messages that fit in the block + minGas := int64(gasguess.MinGas) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + + // have we filled the block? + if result.gasLimit < minGas || len(result.msgs) > constants.BlockMessageLimit { + return result, nil + } + + // 1. Create a list of dependent message chains with maximal gas reward per limit consumed + startChains := time.Now() + var chains []*msgChain + for actor, mset := range pending { + next := mp.createMessageChains(ctx, actor, mset, baseFee, ts) + chains = append(chains, next...) + } + if dt := time.Since(startChains); dt > time.Millisecond { + log.Infow("create message chains done", "took", dt) + } + + // 2. Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + if len(chains) != 0 && chains[0].gasPerf < 0 { + log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf) + return result, nil + } + + // 3. Merge the head chains to produce the list of messages selected for inclusion, subject to + // the block gas and message limits. + startMerge := time.Now() + last := len(chains) + for i, chain := range chains { + // did we run out of performing chains? + if chain.gasPerf < 0 { + break + } + + // does it fit in the block? + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going + continue + } + + // we can't fit this chain because of block limits -- we are at the edge + last = i + break + } + if dt := time.Since(startMerge); dt > time.Millisecond { + log.Infow("merge message chains done", "took", dt) + } + + // 4. We have reached the edge of what we can fit wholesale; if we still have available gasLimit + // to pack some more chains, then trim the last chain and push it down. + // Trimming invalidates subsequent dependent chains so that they can't be selected as their + // dependency cannot be (fully) included. + // We do this in a loop because the blocker might have been inordinately large and we might + // have to do it multiple times to satisfy tail packing. + startTail := time.Now() +tailLoop: + for result.gasLimit >= minGas && last < len(chains) { + // trim + result.trimChain(chains[last], mp, baseFee) + + // push down if it hasn't been invalidated + if chains[last].valid { + for i := last; i < len(chains)-1; i++ { + if chains[i].Before(chains[i+1]) { + break + } + chains[i], chains[i+1] = chains[i+1], chains[i] + } + } + + // select the next (valid and fitting) chain for inclusion + for i, chain := range chains[last:] { + // has the chain been invalidated? + if !chain.valid { + continue + } + + // if gasPerf < 0 we have no more profitable chains + if chain.gasPerf < 0 { + break tailLoop + } + + // does it fit in the bock? + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going + continue + } + + // this chain needs to be trimmed + last += i + continue tailLoop + } + + // the merge loop ended after processing all the chains and we probably still have + // gas to spare; end the loop + break + } + if dt := time.Since(startTail); dt > time.Millisecond { + log.Infow("pack tail chains done", "took", dt) + } + + return result, nil +} + +func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *selectedMessages { + start := time.Now() + defer func() { + if dt := time.Since(start); dt > time.Millisecond { + log.Infow("select priority messages done", "took", dt) + } + }() + mpCfg := mp.cfg + result := &selectedMessages{ + msgs: make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow), + gasLimit: int64(constants.BlockGasLimit), + blsLimit: cbg.MaxLength, + secpLimit: cbg.MaxLength, + } + minGas := int64(gasguess.MinGas) + + // 1. Get priority actor chains + var chains []*msgChain + priority := mpCfg.PriorityAddrs + for _, actor := range priority { + pk, err := mp.resolveToKey(ctx, actor) + if err != nil { + log.Debugf("mpooladdlocal failed to resolve sender: %s", err) + return result + } + + mset, ok := pending[pk] + if ok { + // remove actor from pending set as we are already processed these messages + delete(pending, pk) + // create chains for the priority actor + next := mp.createMessageChains(ctx, actor, mset, baseFee, ts) + chains = append(chains, next...) + } + } + if len(chains) == 0 { + return result + } + + // 2. Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + if len(chains) != 0 && chains[0].gasPerf < 0 { + log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf) + return result + } + + // 3. Merge chains until the block limit, as long as they have non-negative gas performance + last := len(chains) + for i, chain := range chains { + if chain.gasPerf < 0 { + break + } + + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going + continue + } + + // we can't fit this chain because of block gasLimit -- we are at the edge + last = i + break + } + +tailLoop: + for result.gasLimit >= minGas && last < len(chains) { + // trim, discarding negative performing messages + + result.trimChain(chains[last], mp, baseFee) + + // push down if it hasn't been invalidated + if chains[last].valid { + for i := last; i < len(chains)-1; i++ { + if chains[i].Before(chains[i+1]) { + break + } + chains[i], chains[i+1] = chains[i+1], chains[i] + } + } + + // select the next (valid and fitting) chain for inclusion + for i, chain := range chains[last:] { + // has the chain been invalidated + if !chain.valid { + continue + } + + // if gasPerf < 0 we have no more profitable chains + if chain.gasPerf < 0 { + break tailLoop + } + + // does it fit in the bock? + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going + continue + } + + // this chain needs to be trimmed + last += i + continue tailLoop + } + + // the merge loop ended after processing all the chains and we probably still have gas to spare; + // end the loop + break + } + + return result +} + +func (mp *MessagePool) getPendingMessages(ctx context.Context, curTS, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { + start := time.Now() + + result := make(map[address.Address]map[uint64]*types.SignedMessage) + defer func() { + if dt := time.Since(start); dt > time.Millisecond { + log.Infow("get pending messages done", "took", dt) + } + }() + + // are we in sync? + inSync := false + if curTS.Height() == ts.Height() && curTS.Equals(ts) { + inSync = true + } + + mp.forEachPending(func(a address.Address, mset *msgSet) { + if inSync { + // no need to copy the map + result[a] = mset.msgs + } else { + // we need to copy the map to avoid clobbering it as we load more messages + msetCopy := make(map[uint64]*types.SignedMessage, len(mset.msgs)) + for nonce, m := range mset.msgs { + msetCopy[nonce] = m + } + result[a] = msetCopy + + } + }) + + // we are in sync, that's the happy path + if inSync { + return result, nil + } + + if err := mp.runHeadChange(ctx, curTS, ts, result); err != nil { + return nil, fmt.Errorf("failed to process difference between mpool head and given head: %w", err) + } + + return result, nil +} + +func (*MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt) *big.Int { + maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee) + + if types.BigCmp(maxPremium, msg.Message.GasPremium) > 0 { + maxPremium = msg.Message.GasPremium + } + + gasReward := tbig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit))) + if gasReward.Sign() == -1 { + // penalty multiplier + gasReward = tbig.Mul(gasReward, types.NewInt(3)) + } + return gasReward.Int +} + +func (*MessagePool) getGasPerf(gasReward *big.Int, gasLimit int64) float64 { + // gasPerf = gasReward * constants.BlockGasLimit / gasLimit + a := new(big.Rat).SetInt(new(big.Int).Mul(gasReward, bigBlockGasLimit)) + b := big.NewRat(1, gasLimit) + c := new(big.Rat).Mul(a, b) + r, _ := c.Float64() + return r +} + +func (mp *MessagePool) createMessageChains(ctx context.Context, actor address.Address, mset map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) []*msgChain { + // collect all messages + msgs := make([]*types.SignedMessage, 0, len(mset)) + for _, m := range mset { + msgs = append(msgs, m) + } + + // sort by nonce + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Message.Nonce < msgs[j].Message.Nonce + }) + + // sanity checks: + // - there can be no gaps in nonces, starting from the current actor nonce + // if there is a gap, drop messages after the gap, we can't include them + // - all messages must have minimum gas and the total gas for the candidate messages + // cannot exceed the block limit; drop all messages that exceed the limit + // - the total gasReward cannot exceed the actor's balance; drop all messages that exceed + // the balance + a, err := mp.api.GetActorAfter(ctx, actor, ts) + if err != nil { + log.Errorf("failed to load actor state, not building chain for %s: %v", actor, err) + return nil + } + + curNonce := a.Nonce + balance := a.Balance.Int + gasLimit := int64(0) + skip := 0 + i := 0 + rewards := make([]*big.Int, 0, len(msgs)) + for i = 0; i < len(msgs); i++ { + m := msgs[i] + + if m.Message.Nonce < curNonce { + log.Warnf("encountered message from actor %s with nonce (%d) less than the current nonce (%d)", + actor, m.Message.Nonce, curNonce) + skip++ + continue + } + + if m.Message.Nonce != curNonce { + break + } + curNonce++ + + minGas := mp.gasPriceSchedule.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total() + if m.Message.GasLimit < minGas { + break + } + + gasLimit += m.Message.GasLimit + if gasLimit > constants.BlockGasLimit { + break + } + + required := m.Message.RequiredFunds().Int + if balance.Cmp(required) < 0 { + break + } + + balance = new(big.Int).Sub(balance, required) + + value := m.Message.Value.Int + balance = new(big.Int).Sub(balance, value) + + gasReward := mp.getGasReward(m, baseFee) + rewards = append(rewards, gasReward) + } + + // check we have a sane set of messages to construct the chains + if i > skip { + msgs = msgs[skip:i] + } else { + return nil + } + + // if we have more messages from this sender than can fit in a block, drop the extra ones + if len(msgs) > constants.BlockMessageLimit { + msgs = msgs[:constants.BlockMessageLimit] + } + + // ok, now we can construct the chains using the messages we have + // invariant: each chain has a bigger gasPerf than the next -- otherwise they can be merged + // and increase the gasPerf of the first chain + // We do this in two passes: + // - in the first pass we create chains that aggregate messages with non-decreasing gasPerf + // - in the second pass we merge chains to maintain the invariant. + var chains []*msgChain + var curChain *msgChain + + newChain := func(m *types.SignedMessage, i int) *msgChain { + chain := new(msgChain) + chain.msgs = []*types.SignedMessage{m} + chain.gasReward = rewards[i] + chain.gasLimit = m.Message.GasLimit + chain.gasPerf = mp.getGasPerf(chain.gasReward, chain.gasLimit) + chain.valid = true + chain.sigType = m.Signature.Type + return chain + } + + // create the individual chains + for i, m := range msgs { + if curChain == nil { + curChain = newChain(m, i) + continue + } + + gasReward := new(big.Int).Add(curChain.gasReward, rewards[i]) + gasLimit := curChain.gasLimit + m.Message.GasLimit + gasPerf := mp.getGasPerf(gasReward, gasLimit) + + // try to add the message to the current chain -- if it decreases the gasPerf, or then make a + // new chain + if gasPerf < curChain.gasPerf { + chains = append(chains, curChain) + curChain = newChain(m, i) + } else { + curChain.msgs = append(curChain.msgs, m) + curChain.gasReward = gasReward + curChain.gasLimit = gasLimit + curChain.gasPerf = gasPerf + } + } + chains = append(chains, curChain) + + // merge chains to maintain the invariant + for { + merged := 0 + + for i := len(chains) - 1; i > 0; i-- { + if chains[i].gasPerf >= chains[i-1].gasPerf { + chains[i-1].msgs = append(chains[i-1].msgs, chains[i].msgs...) + chains[i-1].gasReward = new(big.Int).Add(chains[i-1].gasReward, chains[i].gasReward) + chains[i-1].gasLimit += chains[i].gasLimit + chains[i-1].gasPerf = mp.getGasPerf(chains[i-1].gasReward, chains[i-1].gasLimit) + chains[i].valid = false + merged++ + } + } + + if merged == 0 { + break + } + + // drop invalidated chains + newChains := make([]*msgChain, 0, len(chains)-merged) + for _, c := range chains { + if c.valid { + newChains = append(newChains, c) + } + } + chains = newChains + } + + // link dependent chains + for i := 0; i < len(chains)-1; i++ { + chains[i].next = chains[i+1] + } + + for i := len(chains) - 1; i > 0; i-- { + chains[i].prev = chains[i-1] + } + + return chains +} + +func (mc *msgChain) Before(other *msgChain) bool { + return mc.gasPerf > other.gasPerf || + (mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) +} + +func (mc *msgChain) Trim(gasLimit int64, msgLimit int, mp *MessagePool, baseFee types.BigInt) { + i := len(mc.msgs) - 1 + for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0 || i >= msgLimit) { + gasReward := mp.getGasReward(mc.msgs[i], baseFee) + mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward) + mc.gasLimit -= mc.msgs[i].Message.GasLimit + if mc.gasLimit > 0 { + mc.gasPerf = mp.getGasPerf(mc.gasReward, mc.gasLimit) + if mc.bp != 0 { + mc.setEffPerf() + } + } else { + mc.gasPerf = 0 + mc.effPerf = 0 + } + i-- + } + + if i < 0 { + mc.msgs = nil + mc.valid = false + } else { + mc.msgs = mc.msgs[:i+1] + } + + // TODO: if the trim above is a no-op, this (may) needlessly invalidates the next chain + if mc.next != nil { + mc.next.Invalidate() + mc.next = nil + } +} + +func (mc *msgChain) Invalidate() { + mc.valid = false + mc.msgs = nil + if mc.next != nil { + mc.next.Invalidate() + mc.next = nil + } +} + +func (mc *msgChain) SetEffectivePerf(bp float64) { + mc.bp = bp + mc.setEffPerf() +} + +func (mc *msgChain) setEffPerf() { + effPerf := mc.gasPerf * mc.bp + if effPerf > 0 && mc.prev != nil { + effPerfWithParent := (effPerf*float64(mc.gasLimit) + mc.prev.effPerf*float64(mc.prev.gasLimit)) / float64(mc.gasLimit+mc.prev.gasLimit) + mc.parentOffset = effPerf - effPerfWithParent + effPerf = effPerfWithParent + } + mc.effPerf = effPerf +} + +func (mc *msgChain) SetNullEffectivePerf() { + if mc.gasPerf < 0 { + mc.effPerf = mc.gasPerf + } else { + mc.effPerf = 0 + } +} + +func (mc *msgChain) BeforeEffective(other *msgChain) bool { + // move merged chains to the front so we can discard them earlier + return (mc.merged && !other.merged) || + (mc.gasPerf >= 0 && other.gasPerf < 0) || + mc.effPerf > other.effPerf || + (mc.effPerf == other.effPerf && mc.gasPerf > other.gasPerf) || + (mc.effPerf == other.effPerf && mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) +} + +func shuffleChains(lst []*msgChain) { + for i := range lst { + j := rand.Intn(i + 1) + lst[i], lst[j] = lst[j], lst[i] + } +} + +func deleteSelectedMessages(pending map[address.Address]map[uint64]*types.SignedMessage, msgs []*types.SignedMessage) map[address.Address]map[uint64]*types.SignedMessage { + // messages from the same wallet cannot be scattered in multiple blocks in a cycle, eg b1{nonce: 20~30}, b2{nonce: 31~40} + for _, msg := range msgs { + delete(pending, msg.Message.From) + } + + return pending +} + +// select the message multiple times and try not to repeat it each time +func (mp *MessagePool) MultipleSelectMessages(ctx context.Context, ts *types.TipSet, tqs []float64) (msgss [][]*types.SignedMessage, err error) { + mp.curTSLk.Lock() + defer mp.curTSLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + // Load messages for the target tipset; if it is the same as the current tipset in the mpool + // then this is just the pending messages + pending, err := mp.getPendingMessages(ctx, mp.curTS, ts) + if err != nil { + return nil, err + } + + msgss = make([][]*types.SignedMessage, len(tqs)) + + for idx, tq := range tqs { + if len(pending) == 0 { + break + } + + var selMsg *selectedMessages + if tq > 0.84 { + selMsg, err = mp.multiSelectMessagesGreedy(ctx, mp.curTS, ts, tq, pending) + } else { + selMsg, err = mp.multiSelectMessagesOptimal(ctx, mp.curTS, ts, tq, pending) + } + + if err != nil { + return nil, err + } + + msgss[idx] = selMsg.msgs + + // delete the selected message from pending + pending = deleteSelectedMessages(pending, selMsg.msgs) + } + + // if no message is selected for a block, msgss[0] is filled by default + for i := 1; i < len(msgss); i++ { + if len(msgss[i]) == 0 { + msgss[i] = msgss[0] + } + } + + return msgss, nil +} + +func (mp *MessagePool) multiSelectMessagesGreedy(ctx context.Context, curTS, ts *types.TipSet, tq float64, pending map[address.Address]map[uint64]*types.SignedMessage) (*selectedMessages, error) { + return mp.selectMessagesGreedy(ctx, curTS, ts, pending) +} + +func (mp *MessagePool) multiSelectMessagesOptimal(ctx context.Context, curTS, ts *types.TipSet, tq float64, pending map[address.Address]map[uint64]*types.SignedMessage) (*selectedMessages, error) { + return mp.selectMessagesOptimal(ctx, curTS, ts, tq, pending) +} diff --git a/pkg/messagepool/selection_test.go b/pkg/messagepool/selection_test.go new file mode 100644 index 0000000000..80bdcb7137 --- /dev/null +++ b/pkg/messagepool/selection_test.go @@ -0,0 +1,1606 @@ +package messagepool + +import ( + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "os" + "sort" + "testing" + + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + tbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/constants" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + "github.com/filecoin-project/venus/pkg/messagepool/gasguess" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func init() { + // bump this for the selection tests + MaxActorPendingMessages = 1000000 +} + +const UpgradeBreezeHeight = 41280 + +func makeTestMessage(w *wallet.Wallet, from, to address.Address, nonce uint64, gasLimit int64, gasPrice uint64) *types.SignedMessage { + msg := &types.Message{ + From: from, + To: to, + Method: 2, + Value: types.FromFil(0), + Nonce: nonce, + GasLimit: gasLimit, + GasFeeCap: tbig.NewInt(int64(100) + int64(gasPrice)), + GasPremium: tbig.NewInt(int64(gasPrice)), + } + + c := msg.Cid() + sig, err := w.WalletSign(context.Background(), from, c.Bytes(), types.MsgMeta{}) + if err != nil { + panic(err) + } + return &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } +} + +func makeTestMpool() (*MessagePool, *testMpoolAPI) { + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + mp, err := New(context.Background(), tma, nil, ds, config.NewDefaultConfig().NetworkParams, config.DefaultMessagePoolParam, "test", nil) + if err != nil { + panic(err) + } + + return mp, tma +} + +func TestMessageChains(t *testing.T) { + tf.UnitTest(t) + + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + + // test chain aggregations + + // test1: 10 messages from a1 to a2, with increasing gasPerf; it should + // make a single chain with 10 messages given enough balance + mset := make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + mset[uint64(i)] = m + } + baseFee := tbig.NewInt(0) + + chains := mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != 10 { + t.Fatalf("expected 10 messages in the chain but got %d", len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test2 : 10 messages from a1 to a2, with decreasing gasPerf; it should + // make 10 chains with 1 message each + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(10-i)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 10 { + t.Fatal("expected 10 chains") + } + for i, chain := range chains { + if len(chain.msgs) != 1 { + t.Fatalf("expected 1 message in chain %d but got %d", i, len(chain.msgs)) + } + } + for i, chain := range chains { + m := chain.msgs[0] + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test3a: 10 messages from a1 to a2, with gasPerf increasing in groups of 3; it should + // merge them in two chains, one with 9 messages and one with the last message + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 2 { + t.Fatal("expected 1 chain") + } + + if len(chains[0].msgs) != 9 { + t.Fatalf("expected 9 messages in the chain but got %d", len(chains[0].msgs)) + } + if len(chains[1].msgs) != 1 { + t.Fatalf("expected 1 messages in the chain but got %d", len(chains[1].msgs)) + } + nextNonce := 0 + for _, chain := range chains { + for _, m := range chain.msgs { + if m.Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + } + + // test3b: 10 messages from a1 to a2, with gasPerf decreasing in groups of 3 with a bias for the + // earlier chains; it should make 4 chains, the first 3 with 3 messages and the last with + // a single message + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + bias := (12 - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 4 { + t.Fatal("expected 4 chains") + } + for i, chain := range chains { + expectedLen := 3 + if i > 2 { + expectedLen = 1 + } + if len(chain.msgs) != expectedLen { + t.Fatalf("expected %d message in chain %d but got %d", expectedLen, i, len(chain.msgs)) + } + } + nextNonce = 0 + for _, chain := range chains { + for _, m := range chain.msgs { + if m.Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + } + + // test chain breaks + + // test4: 10 messages with non-consecutive nonces; it should make a single chain with just + // the first message + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i*2), gasLimit, uint64(i+1)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != 1 { + t.Fatalf("expected 1 message in the chain but got %d", len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test5: 10 messages with increasing gasLimit, except for the 6th message which has less than + // the epoch gasLimit; it should create a single chain with the first 5 messages + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + var m *types.SignedMessage + if i != 5 { + m = makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + } else { + m = makeTestMessage(w1, a1, a2, uint64(i), 1, uint64(i+1)) + } + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != 5 { + t.Fatalf("expected 5 message in the chain but got %d", len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test6: one more message than what can fit in a block according to gas limit, with increasing + // gasPerf; it should create a single chain with the max messages + maxMessages := int(constants.BlockGasLimit / gasLimit) + nMessages := maxMessages + 1 + + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < nMessages; i++ { + mset[uint64(i)] = makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + } + + chains = mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != maxMessages { + t.Fatalf("expected %d message in the chain but got %d", maxMessages, len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test5: insufficient balance for all messages + tma.setBalanceRaw(a1, tbig.NewInt(300*gasLimit+1)) + + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + mset[uint64(i)] = makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + } + + chains = mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatalf("expected a single chain: got %d", len(chains)) + } + if len(chains[0].msgs) != 2 { + t.Fatalf("expected %d message in the chain but got %d", 2, len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } +} + +func TestMessageChainSkipping(t *testing.T) { + tf.UnitTest(t) + + // regression test for chain skip bug + + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + baseFee := tbig.NewInt(0) + + tma.setBalance(a1, 1) // in FIL + tma.setStateNonce(a1, 10) + + mset := make(map[uint64]*types.SignedMessage) + for i := 0; i < 20; i++ { + bias := (20 - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mset[uint64(i)] = m + } + + chains := mp.createMessageChains(context.Background(), a1, mset, baseFee, ts) + if len(chains) != 4 { + t.Fatalf("expected 4 chains, got %d", len(chains)) + } + for i, chain := range chains { + var expectedLen int + switch { + case i == 0: + expectedLen = 2 + case i > 2: + expectedLen = 2 + default: + expectedLen = 3 + } + if len(chain.msgs) != expectedLen { + t.Fatalf("expected %d message in chain %d but got %d", expectedLen, i, len(chain.msgs)) + } + } + nextNonce := 10 + for _, chain := range chains { + for _, m := range chain.msgs { + if m.Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + } +} + +func TestBasicMessageSelection(t *testing.T) { + tf.UnitTest(t) + + oldMaxNonceGap := MaxNonceGap + MaxNonceGap = 1000 + defer func() { + MaxNonceGap = oldMaxNonceGap + }() + + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // we create 10 messages from each actor to another, with the first actor paying higher + // gas prices than the second; we expect message selection to order his messages first + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+1)) + mustAdd(t, mp, m) + } + + for i := 0; i < 10; i++ { + m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + if len(msgs) != 20 { + t.Fatalf("exptected 20 messages, got %d", len(msgs)) + } + + nextNonce := 0 + for i := 0; i < 10; i++ { + if msgs[i].Message.From != a1 { + t.Fatalf("expected message from actor a1") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } + + nextNonce = 0 + for i := 10; i < 20; i++ { + if msgs[i].Message.From != a2 { + t.Fatalf("expected message from actor a2") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } + + // now we make a block with all the messages and advance the chain + block2 := tma.nextBlock() + tma.setBlockMessages(block2, msgs...) + tma.applyBlock(t, block2) + + // we should have no pending messages in the mpool + pend, _ := mp.Pending(context.TODO()) + if len(pend) != 0 { + t.Fatalf("expected no pending messages, but got %d", len(pend)) + } + + // create a block and advance the chain without applying to the mpool + msgs = nil + for i := 10; i < 20; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+1)) + msgs = append(msgs, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + msgs = append(msgs, m) + } + block3 := tma.nextBlock() + tma.setBlockMessages(block3, msgs...) + ts3 := mkTipSet(block3) + + // now create another set of messages and add them to the mpool + for i := 20; i < 30; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+200)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + // select messages in the last tipset; this should include the missed messages as well as + // the last messages we added, with the first actor's messages first + // first we need to update the nonce on the tma + tma.setStateNonce(a1, 10) + tma.setStateNonce(a2, 10) + + msgs, err = mp.SelectMessages(context.Background(), ts3, 1.0) + if err != nil { + t.Fatal(err) + } + if len(msgs) != 20 { + t.Fatalf("expected 20 messages, got %d", len(msgs)) + } + + nextNonce = 20 + for i := 0; i < 10; i++ { + if msgs[i].Message.From != a1 { + t.Fatalf("expected message from actor a1") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } + + nextNonce = 20 + for i := 10; i < 20; i++ { + if msgs[i].Message.From != a2 { + t.Fatalf("expected message from actor a2") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } +} + +func TestMessageSelectionTrimmingGas(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // make many small chains for the two actors + nMessages := int((constants.BlockGasLimit / gasLimit) + 1) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expected := int(constants.BlockGasLimit / gasLimit) + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + mGasLimit := int64(0) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + } + if mGasLimit > constants.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } +} + +func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + + // create a larger than selectable chain + for i := 0; i < constants.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expected := cbg.MaxLength + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + mGasLimit := int64(0) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + } + if mGasLimit > constants.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } +} + +func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.BLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 larger than selectable chains + for i := 0; i < constants.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ + } + + if mGasLimit > constants.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + + expected := constants.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + if counts[crypto.SigTypeBLS] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } +} + +func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.BLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 almost max-length chains of equal value + i := 0 + for i = 0; i < cbg.MaxLength-1; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + // a1's 8192th message is worth more than a2's + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + + i++ + + // a2's (unselectable) 8193rd message is worth SO MUCH + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000000) + mustAdd(t, mp, m) + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ + } + + if mGasLimit > constants.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + + expected := constants.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + // we should have taken the secp chain + if counts[crypto.SigTypeSecp256k1] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } +} + +func TestPriorityMessageSelection(t *testing.T) { + tf.UnitTest(t) + + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + mp.cfg.PriorityAddrs = []address.Address{a1} + + nMessages := 10 + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + if len(msgs) != 20 { + t.Fatalf("expected 20 messages but got %d", len(msgs)) + } + + // messages from a1 must be first + nextNonce := uint64(0) + for i := 0; i < 10; i++ { + m := msgs[i] + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + + nextNonce = 0 + for i := 10; i < 20; i++ { + m := msgs[i] + if m.Message.From != a2 { + t.Fatal("expected messages from a2 after messages from a1") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } +} + +func TestPriorityMessageSelection2(t *testing.T) { + tf.UnitTest(t) + + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + mp.cfg.PriorityAddrs = []address.Address{a1} + + nMessages := int(2 * constants.BlockGasLimit / gasLimit) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(constants.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs)) + } + + // all messages must be from a1 + nextNonce := uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } +} + +func TestPriorityMessageSelection3(t *testing.T) { + tf.UnitTest(t) + + t.Skip("reenable after removing allow negative") + + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + mp.cfg.PriorityAddrs = []address.Address{a1} + + tma.baseFee = tbig.NewInt(1000) + nMessages := 10 + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1000+i%3+bias)) + mustAdd(t, mp, m) + // messages from a2 have negative performance + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, 100) + mustAdd(t, mp, m) + } + + // test greedy selection + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := 10 + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs)) + } + + // all messages must be from a1 + nextNonce := uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + + // test optimal selection + msgs, err = mp.SelectMessages(context.Background(), ts, 0.1) + if err != nil { + t.Fatal(err) + } + + expectedMsgs = 10 + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs)) + } + + // all messages must be from a1 + nextNonce = uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } +} + +func TestOptimalMessageSelection1(t *testing.T) { + tf.UnitTest(t) + + // this test uses just a single actor sending messages with a low tq + // the chain depenent merging algorithm should pick messages from the actor + // from the start + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + nMessages := int(10 * constants.BlockGasLimit / gasLimit) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 0.25) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(constants.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) + } + + nextNonce := uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected message from a1") + } + + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } +} + +func TestOptimalMessageSelection2(t *testing.T) { + tf.UnitTest(t) + + // this test uses two actors sending messages to each other, with the first + // actor paying (much) higher gas premium than the second. + // We select with a low ticket quality; the chain depenent merging algorithm should pick + // messages from the second actor from the start + mp, tma := makeTestMpool() + + // the actors + w1 := newWallet(t) + a1, err := w1.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + w2 := newWallet(t) + a2, err := w2.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + nMessages := int(5 * constants.BlockGasLimit / gasLimit) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(200000+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(190000+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 0.1) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(constants.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) + } + + var nFrom1, nFrom2 int + var nextNonce1, nextNonce2 uint64 + for _, m := range msgs { + if m.Message.From == a1 { + if m.Message.Nonce != nextNonce1 { + t.Fatalf("expected nonce %d but got %d", nextNonce1, m.Message.Nonce) + } + nextNonce1++ + nFrom1++ + } else { + if m.Message.Nonce != nextNonce2 { + t.Fatalf("expected nonce %d but got %d", nextNonce2, m.Message.Nonce) + } + nextNonce2++ + nFrom2++ + } + } + + if nFrom1 > nFrom2 { + t.Fatalf("expected more messages from a2 than a1; nFrom1=%d nFrom2=%d", nFrom1, nFrom2) + } +} + +func TestOptimalMessageSelection3(t *testing.T) { + tf.UnitTest(t) + + // this test uses 10 actors sending a block of messages to each other, with the the first + // actors paying higher gas premium than the subsequent actors. + // We select with a low ticket quality; the chain depenent merging algorithm should pick + // messages from the median actor from the start + mp, tma := makeTestMpool() + + nActors := 10 + // the actors + var actors []address.Address + var wallets []*wallet.Wallet + + for i := 0; i < nActors; i++ { + w := newWallet(t) + + a, err := w.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + actors = append(actors, a) + wallets = append(wallets, w) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + + for _, a := range actors { + tma.setBalance(a, 1) // in FIL + } + + nMessages := int(constants.BlockGasLimit/gasLimit) + 1 + for i := 0; i < nMessages; i++ { + for j := 0; j < nActors; j++ { + premium := 500000 + 10000*(nActors-j) + (nMessages+2-i)/(30*nActors) + i%3 + m := makeTestMessage(wallets[j], actors[j], actors[j%nActors], uint64(i), gasLimit, uint64(premium)) + mustAdd(t, mp, m) + } + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 0.1) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(constants.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) + } + + whoIs := func(a address.Address) int { + for i, aa := range actors { + if a == aa { + return i + } + } + return -1 + } + + nonces := make([]uint64, nActors) + for _, m := range msgs { + who := whoIs(m.Message.From) + if who < 3 { + t.Fatalf("got message from %dth actor", who) + } + + nextNonce := nonces[who] + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nonces[who]++ + } +} + +func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium func() uint64) (float64, float64, float64) { + // in this test we use 300 actors and send 10 blocks of messages. + // actors send with an randomly distributed premium dictated by the getPremium function. + // a number of miners select with varying ticket quality and we compare the + // capacity and rewards of greedy selection -vs- optimal selection + mp, tma := makeTestMpool() + + nActors := 300 + // the actors + var actors []address.Address + var wallets []*wallet.Wallet + + for i := 0; i < nActors; i++ { + w := newWallet(t) + + a, err := w.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + actors = append(actors, a) + wallets = append(wallets, w) + } + + block := tma.nextBlock() + ts := mkTipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] + baseFee := tbig.NewInt(0) + + for _, a := range actors { + tma.setBalance(a, 1) // in FIL + } + + nMessages := int(10 * constants.BlockGasLimit / gasLimit) + t.Log("nMessages", nMessages) + nonces := make([]uint64, nActors) + for i := 0; i < nMessages; i++ { + from := rng.Intn(nActors) + to := rng.Intn(nActors) + nonce := nonces[from] + nonces[from]++ + premium := getPremium() + m := makeTestMessage(wallets[from], actors[from], actors[to], nonce, gasLimit, premium) + mustAdd(t, mp, m) + } + + logging.SetLogLevel("messagepool", "error") // nolint: errcheck + + pending, err := mp.getPendingMessages(context.TODO(), mp.curTS, ts) + require.NoError(t, err) + // 1. greedy selection + gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts, pending) + if err != nil { + t.Fatal(err) + } + + greedyMsgs := gm.msgs + + totalGreedyCapacity := 0.0 + totalGreedyReward := 0.0 + totalOptimalCapacity := 0.0 + totalOptimalReward := 0.0 + totalBestTQReward := 0.0 + const runs = 1 + for i := 0; i < runs; i++ { + // 2. optimal selection + minersRand := rng.Float64() + winerProba := noWinnersProb() + i := 0 + for ; i < MaxBlocks && minersRand > 0; i++ { + minersRand -= winerProba[i] + } + nMiners := i - 1 + if nMiners < 1 { + nMiners = 1 + } + + optMsgs := make(map[cid.Cid]*types.SignedMessage) + bestTq := 0.0 + var bestMsgs []*types.SignedMessage + for j := 0; j < nMiners; j++ { + tq := rng.Float64() + msgs, err := mp.SelectMessages(context.Background(), ts, tq) + if err != nil { + t.Fatal(err) + } + if tq > bestTq { + bestMsgs = msgs + } + + for _, m := range msgs { + c := m.Cid() + optMsgs[c] = m + } + } + + totalGreedyCapacity += float64(len(greedyMsgs)) + totalOptimalCapacity += float64(len(optMsgs)) + boost := float64(len(optMsgs)) / float64(len(greedyMsgs)) + + t.Logf("nMiners: %d", nMiners) + t.Logf("greedy capacity %d, optimal capacity %d (x %.1f )", len(greedyMsgs), + len(optMsgs), boost) + if len(greedyMsgs) > len(optMsgs) { + t.Errorf("greedy capacity higher than optimal capacity; wtf") + } + + greedyReward := big.NewInt(0) + for _, m := range greedyMsgs { + greedyReward.Add(greedyReward, mp.getGasReward(m, baseFee)) + } + + optReward := big.NewInt(0) + for _, m := range optMsgs { + optReward.Add(optReward, mp.getGasReward(m, baseFee)) + } + + bestTqReward := big.NewInt(0) + for _, m := range bestMsgs { + bestTqReward.Add(bestTqReward, mp.getGasReward(m, baseFee)) + } + + totalBestTQReward += float64(bestTqReward.Uint64()) + + nMinersBig := big.NewInt(int64(nMiners)) + greedyAvgReward, _ := new(big.Rat).SetFrac(greedyReward, nMinersBig).Float64() + totalGreedyReward += greedyAvgReward + optimalAvgReward, _ := new(big.Rat).SetFrac(optReward, nMinersBig).Float64() + totalOptimalReward += optimalAvgReward + + boost = optimalAvgReward / greedyAvgReward + t.Logf("greedy reward: %.0f, optimal reward: %.0f (x %.1f )", greedyAvgReward, + optimalAvgReward, boost) + + } + + capacityBoost := totalOptimalCapacity / totalGreedyCapacity + rewardBoost := totalOptimalReward / totalGreedyReward + t.Logf("Average capacity boost: %f", capacityBoost) + t.Logf("Average reward boost: %f", rewardBoost) + t.Logf("Average best tq reward: %f", totalBestTQReward/runs/1e12) + + logging.SetLogLevel("messagepool", "info") // nolint: errcheck + + return capacityBoost, rewardBoost, totalBestTQReward / runs / 1e12 +} + +func makeExpPremiumDistribution(rng *rand.Rand) func() uint64 { + return func() uint64 { + premium := 20000*math.Exp(-3.*rng.Float64()) + 5000 + return uint64(premium) + } +} + +// nolint +func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 { + zipf := rand.NewZipf(rng, 1.001, 1, 40000) + return func() uint64 { + return zipf.Uint64() + 10000 + } +} + +func TestCompetitiveMessageSelectionExp(t *testing.T) { + tf.UnitTest(t) + + if testing.Short() { + t.Skip("skipping in short mode") + } + var capacityBoost, rewardBoost, tqReward float64 + seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45} + for _, seed := range seeds { + t.Log("running competitive message selection with Exponential premium distribution and seed", seed) + rng := rand.New(rand.NewSource(seed)) + cb, rb, tqR := testCompetitiveMessageSelection(t, rng, makeExpPremiumDistribution(rng)) + capacityBoost += cb + rewardBoost += rb + tqReward += tqR + } + + capacityBoost /= float64(len(seeds)) + rewardBoost /= float64(len(seeds)) + tqReward /= float64(len(seeds)) + t.Logf("Average capacity boost across all seeds: %f", capacityBoost) + t.Logf("Average reward boost across all seeds: %f", rewardBoost) + t.Logf("Average reward of best ticket across all seeds: %f", tqReward) +} + +func TestCompetitiveMessageSelectionZipf(t *testing.T) { + tf.UnitTest(t) + t.Skipf("'TestCompetitiveMessageSelectionZipf' cost such a long time, we have done enough tests for this module") + + var capacityBoost, rewardBoost, tqReward float64 + seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45} + for _, seed := range seeds { + t.Log("running competitive message selection with Zipf premium distribution and seed", seed) + rng := rand.New(rand.NewSource(seed)) + cb, rb, tqR := testCompetitiveMessageSelection(t, rng, makeZipfPremiumDistribution(rng)) + capacityBoost += cb + rewardBoost += rb + tqReward += tqR + } + + tqReward /= float64(len(seeds)) + capacityBoost /= float64(len(seeds)) + rewardBoost /= float64(len(seeds)) + t.Logf("Average capacity boost across all seeds: %f", capacityBoost) + t.Logf("Average reward boost across all seeds: %f", rewardBoost) + t.Logf("Average reward of best ticket across all seeds: %f", tqReward) +} + +func TestGasReward(t *testing.T) { + tf.UnitTest(t) + + tests := []struct { + Premium uint64 + FeeCap uint64 + BaseFee uint64 + GasReward int64 + }{ + {Premium: 100, FeeCap: 200, BaseFee: 100, GasReward: 100}, + {Premium: 100, FeeCap: 200, BaseFee: 210, GasReward: -10 * 3}, + {Premium: 200, FeeCap: 250, BaseFee: 210, GasReward: 40}, + {Premium: 200, FeeCap: 250, BaseFee: 2000, GasReward: -1750 * 3}, + } + + mp := new(MessagePool) + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { + msg := &types.SignedMessage{ + Message: types.Message{ + GasLimit: 10, + GasFeeCap: tbig.NewInt(int64(test.FeeCap)), + GasPremium: tbig.NewInt(int64(test.Premium)), + }, + } + rew := mp.getGasReward(msg, tbig.NewInt(int64(test.BaseFee))) + if rew.Cmp(big.NewInt(test.GasReward*10)) != 0 { + t.Errorf("bad reward: expected %d, got %s", test.GasReward*10, rew) + } + }) + } +} + +type SignedMessage struct { + Message types.Message + Signature crypto.Signature +} + +func TestRealWorldSelection(t *testing.T) { + tf.UnitTest(t) + + // load test-messages.json.gz and rewrite the messages so that + // 1) we map each real actor to a test actor so that we can sign the messages + // 2) adjust the nonces so that they start from 0 + file, err := os.Open("test-messages.json.gz") + if err != nil { + t.Fatal(err) + } + + gzr, err := gzip.NewReader(file) + if err != nil { + t.Fatal(err) + } + + dec := json.NewDecoder(gzr) + + var msgs []*types.SignedMessage + baseNonces := make(map[address.Address]uint64) + +readLoop: + for { + m := new(SignedMessage) + err := dec.Decode(m) + switch err { + case nil: + sm := types.SignedMessage{ + Message: m.Message, + Signature: m.Signature, + } + msgs = append(msgs, &sm) + + nonce, ok := baseNonces[m.Message.From] + if !ok || m.Message.Nonce < nonce { + baseNonces[m.Message.From] = m.Message.Nonce + } + + case io.EOF: + break readLoop + + default: + t.Fatal(err) + } + } + + actorMap := make(map[address.Address]address.Address) + actorWallets := make(map[address.Address]*wallet.Wallet) + ctx := context.Background() + + for _, m := range msgs { + baseNonce := baseNonces[m.Message.From] + + localActor, ok := actorMap[m.Message.From] + if !ok { + w := newWallet(t) + + a, err := w.NewAddress(context.Background(), address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + actorMap[m.Message.From] = a + actorWallets[a] = w + localActor = a + } + + w, ok := actorWallets[localActor] + if !ok { + t.Fatalf("failed to lookup wallet for actor %s", localActor) + } + + m.Message.From = localActor + m.Message.Nonce -= baseNonce + + c := m.Message.Cid() + sig, err := w.WalletSign(context.Background(), localActor, c.Bytes(), types.MsgMeta{}) + if err != nil { + t.Fatal(err) + } + + m.Signature = *sig + } + + mp, tma := makeTestMpool() + + block := tma.nextBlockWithHeight(UpgradeBreezeHeight + 10) + ts := mkTipSet(block) + tma.applyBlock(t, block) + + for _, a := range actorMap { + tma.setBalance(a, 1000000) + } + + tma.baseFee = tbig.NewInt(800_000_000) + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Message.Nonce < msgs[j].Message.Nonce + }) + + // add the messages + for _, m := range msgs { + mustAdd(t, mp, m) + } + + // do message selection and check block packing + minGasLimit := int64(0.9 * float64(constants.BlockGasLimit)) + + // greedy first + selected, err := mp.SelectMessages(ctx, ts, 1.0) + if err != nil { + t.Fatal(err) + } + + gasLimit := int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=1.0; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // high quality ticket + selected, err = mp.SelectMessages(ctx, ts, .8) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.8; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // mid quality ticket + selected, err = mp.SelectMessages(context.Background(), ts, .4) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.4; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // low quality ticket + selected, err = mp.SelectMessages(context.Background(), ts, .1) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.1; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // very low quality ticket + selected, err = mp.SelectMessages(context.Background(), ts, .01) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.01; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } +} diff --git a/pkg/messagepool/test-messages.json.gz b/pkg/messagepool/test-messages.json.gz new file mode 100644 index 0000000000..09481e1f88 Binary files /dev/null and b/pkg/messagepool/test-messages.json.gz differ diff --git a/internal/pkg/metrics/counter.go b/pkg/metrics/counter.go similarity index 100% rename from internal/pkg/metrics/counter.go rename to pkg/metrics/counter.go diff --git a/pkg/metrics/export.go b/pkg/metrics/export.go new file mode 100644 index 0000000000..cedf6fe0e9 --- /dev/null +++ b/pkg/metrics/export.go @@ -0,0 +1,93 @@ +package metrics + +import ( + "net/http" + "time" + + "contrib.go.opencensus.io/exporter/jaeger" + "contrib.go.opencensus.io/exporter/prometheus" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + prom "github.com/prometheus/client_golang/prometheus" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + + "github.com/filecoin-project/venus/pkg/config" +) + +// RegisterPrometheusEndpoint registers and serves prometheus metrics +func RegisterPrometheusEndpoint(cfg *config.MetricsConfig) error { + if !cfg.PrometheusEnabled { + return nil + } + + // validate config values and marshal to types + interval, err := time.ParseDuration(cfg.ReportInterval) + if err != nil { + log.Errorf("invalid metrics interval: %s", err) + return err + } + + promma, err := ma.NewMultiaddr(cfg.PrometheusEndpoint) + if err != nil { + return err + } + + _, promAddr, err := manet.DialArgs(promma) // nolint + if err != nil { + return err + } + + // setup prometheus + registry := prom.NewRegistry() + pe, err := prometheus.NewExporter(prometheus.Options{ + Namespace: "filecoin", + Registry: registry, + }) + if err != nil { + return err + } + + view.RegisterExporter(pe) + view.SetReportingPeriod(interval) + + go func() { + mux := http.NewServeMux() + mux.Handle("/metrics", pe) + if err := http.ListenAndServe(promAddr, mux); err != nil { + log.Errorf("failed to serve /metrics endpoint on %v", err) + } + }() + + return nil +} + +// RegisterJaeger registers the jaeger endpoint with opencensus and names the +// tracer `name`. +func RegisterJaeger(name string, cfg *config.TraceConfig) (*jaeger.Exporter, error) { + if !cfg.JaegerTracingEnabled { + return nil, nil + } + + if len(cfg.ServerName) != 0 { + name = cfg.ServerName + } + + je, err := jaeger.NewExporter(jaeger.Options{ + AgentEndpoint: cfg.JaegerEndpoint, + Process: jaeger.Process{ + ServiceName: name, + }, + }) + if err != nil { + return nil, err + } + + trace.RegisterExporter(je) + // trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(cfg.ProbabilitySampler)}) + + log.Infof("register tracing exporter:%s, service name:%s", cfg.JaegerEndpoint, name) + + return je, err +} diff --git a/internal/pkg/metrics/gauge.go b/pkg/metrics/gauge.go similarity index 100% rename from internal/pkg/metrics/gauge.go rename to pkg/metrics/gauge.go diff --git a/internal/pkg/metrics/heartbeat.go b/pkg/metrics/heartbeat.go similarity index 90% rename from internal/pkg/metrics/heartbeat.go rename to pkg/metrics/heartbeat.go index f200f2d98d..ba091ee408 100644 --- a/internal/pkg/metrics/heartbeat.go +++ b/pkg/metrics/heartbeat.go @@ -8,26 +8,25 @@ import ( "time" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/go-state-types/abi" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - net "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p/core/host" + net "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" ma "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" ) // HeartbeatProtocol is the libp2p protocol used for the heartbeat service const ( HeartbeatProtocol = "fil/heartbeat/1.0.0" // Minutes to wait before logging connection failure at ERROR level - connectionFailureErrorLogPeriodMinutes = 10 * time.Minute + connectionFailureErrorLogPeriod = 10 * time.Minute ) var log = logging.Logger("metrics") @@ -46,7 +45,7 @@ type Heartbeat struct { // Syncing is `true` iff the node is currently syncing its chain with the network. // Syncing bool - // Address of this node's active miner. Can be empty - will return the zero address + // MinerAddress of this node's active miner. Can be empty - will return the zero address MinerAddress address.Address // CID of this chain's genesis block. @@ -60,7 +59,7 @@ type HeartbeatService struct { Config *config.HeartbeatConfig // A function that returns the heaviest tipset - HeadGetter func() (block.TipSet, error) + HeadGetter func() (types.TipSet, error) // A function that returns the miner's address MinerAddressGetter func() address.Address @@ -84,7 +83,7 @@ func defaultMinerAddressGetter() address.Address { } // NewHeartbeatService returns a HeartbeatService -func NewHeartbeatService(h host.Host, genesisCID cid.Cid, hbc *config.HeartbeatConfig, hg func() (block.TipSet, error), options ...HeartbeatServiceOption) *HeartbeatService { +func NewHeartbeatService(h host.Host, genesisCID cid.Cid, hbc *config.HeartbeatConfig, hg func() (types.TipSet, error), options ...HeartbeatServiceOption) *HeartbeatService { srv := &HeartbeatService{ Host: h, GenesisCID: genesisCID, @@ -149,7 +148,7 @@ func (hbs *HeartbeatService) Start(ctx context.Context) { failedAt = now erroredAt = failedAt // Start the timer on raising to ERROR level logfn = log.Warnf - } else if now.Sub(erroredAt) > connectionFailureErrorLogPeriodMinutes { + } else if now.Sub(erroredAt) > connectionFailureErrorLogPeriod { logfn = log.Errorf erroredAt = now // Reset the timer } @@ -207,10 +206,8 @@ func (hbs *HeartbeatService) Beat(ctx context.Context) Heartbeat { log.Errorf("unable to fetch chain head: %s", err) } tipset := ts.Key().String() - height, err := ts.Height() - if err != nil { - log.Warnf("heartbeat service failed to get chain height: %s", err) - } + height := ts.Height() + addr := hbs.MinerAddressGetter() return Heartbeat{ Head: tipset, @@ -242,7 +239,7 @@ func (hbs *HeartbeatService) Connect(ctx context.Context) error { // Decapsulate the /p2p/ part from the target // /ip4//p2p/ becomes /ip4/ targetPeerAddr, _ := ma.NewMultiaddr( - fmt.Sprintf("/p2p/%s", peer.Encode(peerid))) + fmt.Sprintf("/p2p/%s", peerid.String())) targetAddr := targetMaddr.Decapsulate(targetPeerAddr) hbs.Host.Peerstore().AddAddr(peerid, targetAddr, peerstore.PermanentAddrTTL) diff --git a/pkg/metrics/heartbeat_test.go b/pkg/metrics/heartbeat_test.go new file mode 100644 index 0000000000..ad205f0b31 --- /dev/null +++ b/pkg/metrics/heartbeat_test.go @@ -0,0 +1,206 @@ +package metrics_test + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + fbig "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/host" + net "github.com/libp2p/go-libp2p/core/network" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/metrics" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +var testCid cid.Cid + +func init() { + c, err := cid.Decode("Qmd52WKRSwrBK5gUaJKawryZQ5by6UbNB8KVW2Zy6JtbyW") + if err != nil { + panic(err) + } + testCid = c +} + +type endpoint struct { + Host host.Host + Address string +} + +func newEndpoint(t *testing.T, port int) endpoint { + priv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, rand.Reader) + if err != nil { + t.Fatal(err) + } + opts := []libp2p.Option{ + libp2p.DisableRelay(), + libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port)), + libp2p.Identity(priv), + } + + basicHost, err := libp2p.New(opts...) + if err != nil { + t.Fatal(err) + } + + // Build host multiaddress + hostAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", basicHost.ID().Pretty())) + + // Now we can build a full multiaddress to reach this host + // by encapsulating both addresses: + addr := basicHost.Addrs()[0] + fullAddr := addr.Encapsulate(hostAddr) + + return endpoint{ + Host: basicHost, + Address: fullAddr.String(), + } +} + +func TestHeartbeatConnectSuccess(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + aggregator := newEndpoint(t, 0) + filecoin := newEndpoint(t, 0) + aggregator.Host.SetStreamHandler(metrics.HeartbeatProtocol, func(c net.Stream) { + }) + + hbs := metrics.NewHeartbeatService( + filecoin.Host, + testCid, + &config.HeartbeatConfig{ + BeatTarget: aggregator.Address, + BeatPeriod: "3s", + ReconnectPeriod: "10s", + Nickname: "BobHoblaw", + }, + func() (types.TipSet, error) { + tipSet := chain.NewBuilder(t, address.Undef).Genesis() + return *tipSet, nil + }, + ) + + assert.Equal(t, 1, len(aggregator.Host.Peerstore().Peers())) + assert.Contains(t, aggregator.Host.Peerstore().Peers(), aggregator.Host.ID()) + assert.NoError(t, hbs.Connect(ctx)) + assert.Equal(t, 2, len(aggregator.Host.Peerstore().Peers())) + assert.Contains(t, aggregator.Host.Peerstore().Peers(), aggregator.Host.ID()) + assert.Contains(t, aggregator.Host.Peerstore().Peers(), filecoin.Host.ID()) +} + +func TestHeartbeatConnectFailure(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + filecoin := newEndpoint(t, 60001) + + hbs := metrics.NewHeartbeatService( + filecoin.Host, + testCid, + &config.HeartbeatConfig{ + BeatTarget: "", + BeatPeriod: "3s", + ReconnectPeriod: "10s", + Nickname: "BobHoblaw", + }, + func() (types.TipSet, error) { + tipSet := chain.NewBuilder(t, address.Undef).Genesis() + return *tipSet, nil + }, + ) + assert.Error(t, hbs.Connect(ctx)) +} + +func TestHeartbeatRunSuccess(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + // we will use this to stop the run method after making assertions + runCtx, cancel := context.WithCancel(ctx) + + // port 0 to avoid conflicts + aggregator := newEndpoint(t, 0) + filecoin := newEndpoint(t, 0) + + // create a tipset, we will assert on it in the SetStreamHandler method + expHeight := abi.ChainEpoch(444) + expTS := mustMakeTipset(t, expHeight) + + addr, err := address.NewSecp256k1Address([]byte("miner address")) + require.NoError(t, err) + + // The handle method will run the assertions for the test + aggregator.Host.SetStreamHandler(metrics.HeartbeatProtocol, func(s net.Stream) { + defer func() { + require.NoError(t, s.Close()) + }() + + dec := json.NewDecoder(s) + var hb metrics.Heartbeat + require.NoError(t, dec.Decode(&hb)) + + assert.Equal(t, expTS.String(), hb.Head) + assert.Equal(t, abi.ChainEpoch(444), hb.Height) + assert.Equal(t, "BobHoblaw", hb.Nickname) + assert.Equal(t, addr, hb.MinerAddress) + cancel() + }) + + hbs := metrics.NewHeartbeatService( + filecoin.Host, + testCid, + &config.HeartbeatConfig{ + BeatTarget: aggregator.Address, + BeatPeriod: "1s", + ReconnectPeriod: "1s", + Nickname: "BobHoblaw", + }, + func() (types.TipSet, error) { + return *expTS, nil + }, + metrics.WithMinerAddressGetter(func() address.Address { + return addr + }), + ) + + require.NoError(t, hbs.Connect(ctx)) + + assert.NoError(t, hbs.Run(runCtx)) + assert.Error(t, runCtx.Err(), context.Canceled.Error()) +} + +func mustMakeTipset(t *testing.T, height abi.ChainEpoch) *types.TipSet { + ts, err := types.NewTipSet([]*types.BlockHeader{{ + Miner: testhelpers.NewForTestGetter()(), + Ticket: &types.Ticket{VRFProof: []byte{0}}, + Parents: types.TipSetKey{}.Cids(), + ParentWeight: fbig.Zero(), + Height: height, + ParentMessageReceipts: testhelpers.EmptyMessagesCID, + Messages: testhelpers.EmptyTxMetaCID, + ParentStateRoot: testhelpers.EmptyTxMetaCID, + }}) + if err != nil { + t.Fatal(err) + } + return ts +} diff --git a/internal/pkg/metrics/log_json_formatter.go b/pkg/metrics/log_json_formatter.go similarity index 97% rename from internal/pkg/metrics/log_json_formatter.go rename to pkg/metrics/log_json_formatter.go index abfa0c9ad2..fd2b0e4054 100644 --- a/internal/pkg/metrics/log_json_formatter.go +++ b/pkg/metrics/log_json_formatter.go @@ -12,8 +12,7 @@ import ( ) // JSONFormatter implements go-logging Formatter for JSON encoded logs -type JSONFormatter struct { -} +type JSONFormatter struct{} type logRecord struct { Timestamp time.Time `json:"timestamp"` diff --git a/internal/pkg/metrics/timer.go b/pkg/metrics/timer.go similarity index 99% rename from internal/pkg/metrics/timer.go rename to pkg/metrics/timer.go index 8b5fe2ae19..095392c203 100644 --- a/internal/pkg/metrics/timer.go +++ b/pkg/metrics/timer.go @@ -54,7 +54,6 @@ func (t *Float64Timer) Start(ctx context.Context) *Stopwatch { start: time.Now(), recorder: t.measureMs.M, } - } // Stopwatch contains a start time and a recorder, when stopped it record the diff --git a/internal/pkg/metrics/timer_test.go b/pkg/metrics/timer_test.go similarity index 94% rename from internal/pkg/metrics/timer_test.go rename to pkg/metrics/timer_test.go index 122f179bbd..a52c0698d0 100644 --- a/internal/pkg/metrics/timer_test.go +++ b/pkg/metrics/timer_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" "github.com/stretchr/testify/assert" "go.opencensus.io/stats/view" ) @@ -25,7 +25,6 @@ func TestTimerSimple(t *testing.T) { sw := testTimer.Start(ctx) sw.Stop(ctx) assert.NotEqual(t, 0, sw.start) - } func TestDuplicateTimersPanics(t *testing.T) { @@ -48,7 +47,6 @@ func TestDuplicateTimersPanics(t *testing.T) { sw := testTimer.Start(ctx) sw.Stop(ctx) assert.NotEqual(t, 0, sw.start) - } func TestMultipleTimers(t *testing.T) { @@ -67,5 +65,4 @@ func TestMultipleTimers(t *testing.T) { assert.NotEqual(t, 0, sw1.start) sw2.Stop(ctx2) assert.NotEqual(t, 0, sw2.start) - } diff --git a/internal/pkg/metrics/tracing/util.go b/pkg/metrics/tracing/util.go similarity index 100% rename from internal/pkg/metrics/tracing/util.go rename to pkg/metrics/tracing/util.go diff --git a/pkg/migration/migrate.go b/pkg/migration/migrate.go new file mode 100644 index 0000000000..6b1fe13d04 --- /dev/null +++ b/pkg/migration/migrate.go @@ -0,0 +1,367 @@ +package migration + +import ( + "encoding/json" + "fmt" + "math" + "os" + "path/filepath" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/fixtures/networks" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/venus-shared/types" + logging "github.com/ipfs/go-log/v2" +) + +var migrateLog = logging.Logger("data_migrate") + +type UpgradeFunc func(string) error + +type versionInfo struct { + version uint + upgrade UpgradeFunc +} + +var versionMap = []versionInfo{ + {version: 3, upgrade: Version3Upgrade}, + {version: 4, upgrade: Version4Upgrade}, + {version: 5, upgrade: Version5Upgrade}, + {version: 6, upgrade: Version6Upgrade}, + {version: 7, upgrade: Version7Upgrade}, + {version: 8, upgrade: Version8Upgrade}, + {version: 9, upgrade: Version9Upgrade}, + {version: 10, upgrade: Version10Upgrade}, +} + +// TryToMigrate used to migrate data(db,config,file,etc) in local repo +func TryToMigrate(repoPath string) error { + localVersion, err := repo.ReadVersion(repoPath) + if err != nil { + return err + } + + for _, up := range versionMap { + if up.version > localVersion { + err = up.upgrade(repoPath) + if err != nil { + return err + } + migrateLog.Infof("success to upgrade version %d to version %d", localVersion, up.version) + localVersion = up.version + } + } + + return nil +} + +// Version3Upgrade 3 for a config filed named apiAuthUrl +func Version3Upgrade(repoPath string) error { + fsrRepo, err := repo.OpenFSRepo(repoPath, 2) + if err != nil { + return err + } + + cfg := fsrRepo.Config() + + switch cfg.NetworkParams.NetworkType { + case types.NetworkMainnet: + fallthrough + case types.Network2k: + fallthrough + case types.NetworkCalibnet: + fallthrough + case types.NetworkNerpa: + fallthrough + case types.NetworkInterop: + cfg.API.VenusAuthURL = "" + } + + err = fsrRepo.ReplaceConfig(cfg) + if err != nil { + return err + } + err = fsrRepo.Close() + if err != nil { + return err + } + return repo.WriteVersion(repoPath, 3) +} + +func Version4Upgrade(repoPath string) (err error) { + var fsrRepo repo.Repo + if fsrRepo, err = repo.OpenFSRepo(repoPath, 3); err != nil { + return + } + cfg := fsrRepo.Config() + switch cfg.NetworkParams.NetworkType { + case types.NetworkMainnet: + cfg.NetworkParams.ForkUpgradeParam = config.DefaultForkUpgradeParam + case types.Network2k: + cfg.NetworkParams.ForkUpgradeParam = networks.Net2k().Network.ForkUpgradeParam + case types.NetworkCalibnet: + cfg.NetworkParams.ForkUpgradeParam = networks.Calibration().Network.ForkUpgradeParam + case types.NetworkForce: + cfg.NetworkParams.ForkUpgradeParam = networks.ForceNet().Network.ForkUpgradeParam + case types.NetworkButterfly: + cfg.NetworkParams.ForkUpgradeParam = networks.ButterflySnapNet().Network.ForkUpgradeParam + case types.NetworkInterop: + cfg.NetworkParams.ForkUpgradeParam = networks.InteropNet().Network.ForkUpgradeParam + default: + return fsrRepo.Close() + } + + if err = fsrRepo.ReplaceConfig(cfg); err != nil { + return + } + + if err = fsrRepo.Close(); err != nil { + return + } + + return repo.WriteVersion(repoPath, 4) +} + +// Version5Upgrade +func Version5Upgrade(repoPath string) (err error) { + var fsrRepo repo.Repo + if fsrRepo, err = repo.OpenFSRepo(repoPath, 4); err != nil { + return + } + cfg := fsrRepo.Config() + switch cfg.NetworkParams.NetworkType { + case types.NetworkMainnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = 1231620 + case types.Network2k: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = -17 + case types.NetworkCalibnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = 450 + case types.NetworkForce: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = math.MaxInt32 + case types.NetworkInterop: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = -17 + default: + return fsrRepo.Close() + } + + if err = fsrRepo.ReplaceConfig(cfg); err != nil { + return + } + + if err = fsrRepo.Close(); err != nil { + return + } + + return repo.WriteVersion(repoPath, 5) +} + +// Version6Upgrade +func Version6Upgrade(repoPath string) (err error) { + var fsrRepo repo.Repo + if fsrRepo, err = repo.OpenFSRepo(repoPath, 5); err != nil { + return + } + cfg := fsrRepo.Config() + switch cfg.NetworkParams.NetworkType { + case types.NetworkMainnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = 1231620 + case types.Network2k: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = -17 + case types.NetworkCalibnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = 450 + case types.NetworkForce: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = -17 + case types.NetworkInterop: + cfg.NetworkParams.GenesisNetworkVersion = network.Version14 + cfg.NetworkParams.ForkUpgradeParam.UpgradeChocolateHeight = -17 + default: + return fsrRepo.Close() + } + + if err = fsrRepo.ReplaceConfig(cfg); err != nil { + return + } + + if err = fsrRepo.Close(); err != nil { + return + } + + return repo.WriteVersion(repoPath, 6) +} + +// Version7Upgrade +func Version7Upgrade(repoPath string) (err error) { + var fsrRepo repo.Repo + if fsrRepo, err = repo.OpenFSRepo(repoPath, 6); err != nil { + return + } + cfg := fsrRepo.Config() + switch cfg.NetworkParams.NetworkType { + case types.NetworkMainnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeOhSnapHeight = 1594680 + case types.Network2k: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeOhSnapHeight = -18 + case types.NetworkCalibnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeOhSnapHeight = 480 + case types.NetworkButterfly: + cfg.NetworkParams.GenesisNetworkVersion = network.Version14 + cfg.NetworkParams.ForkUpgradeParam.UpgradeOhSnapHeight = -18 + case types.NetworkForce: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeOhSnapHeight = -18 + case types.NetworkInterop: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeOhSnapHeight = -18 + default: + return fsrRepo.Close() + } + + // In order to migrate maxfee + type MpoolCfg struct { + MaxFee float64 `json:"maxFee"` + } + type tempCfg struct { + Mpool *MpoolCfg `json:"mpool"` + } + data, err := os.ReadFile(filepath.Join(repoPath, "config.json")) + if err != nil { + migrateLog.Errorf("open config file failed: %v", err) + } else { + // If maxFee value is String(10 FIL), unmarshal failure is expected + // If maxFee value is Number(10000000000000000000), need convert to FIL(10 FIL) + tmpCfg := tempCfg{} + if err := json.Unmarshal(data, &tmpCfg); err == nil { + maxFee := types.MustParseFIL(fmt.Sprintf("%fattofil", tmpCfg.Mpool.MaxFee)) + cfg.Mpool.MaxFee = maxFee + migrateLog.Info("convert mpool.maxFee from %v to %s", tmpCfg.Mpool.MaxFee, maxFee.String()) + } + } + + if err = fsrRepo.ReplaceConfig(cfg); err != nil { + return + } + + if err = fsrRepo.Close(); err != nil { + return + } + + return repo.WriteVersion(repoPath, 7) +} + +// Version8Upgrade +func Version8Upgrade(repoPath string) (err error) { + var fsrRepo repo.Repo + if fsrRepo, err = repo.OpenFSRepo(repoPath, 7); err != nil { + return + } + cfg := fsrRepo.Config() + switch cfg.NetworkParams.NetworkType { + case types.NetworkMainnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSkyrHeight = 1960320 + case types.Network2k: + cfg.NetworkParams.GenesisNetworkVersion = network.Version16 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSkyrHeight = -19 + case types.NetworkCalibnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSkyrHeight = 510 + case types.NetworkForce: + cfg.NetworkParams.GenesisNetworkVersion = network.Version16 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSkyrHeight = -19 + case types.NetworkInterop: + cfg.NetworkParams.GenesisNetworkVersion = network.Version15 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSkyrHeight = -19 + case types.NetworkButterfly: + cfg.NetworkParams.GenesisNetworkVersion = network.Version15 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSkyrHeight = -19 + default: + return fsrRepo.Close() + } + + if err = fsrRepo.ReplaceConfig(cfg); err != nil { + return + } + + if err = fsrRepo.Close(); err != nil { + return + } + + return repo.WriteVersion(repoPath, 8) +} + +// Version9Upgrade +func Version9Upgrade(repoPath string) (err error) { + var fsrRepo repo.Repo + if fsrRepo, err = repo.OpenFSRepo(repoPath, 8); err != nil { + return + } + cfg := fsrRepo.Config() + switch cfg.NetworkParams.NetworkType { + case types.NetworkMainnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSharkHeight = 2383680 + cfg.NetworkParams.PropagationDelaySecs = 10 + case types.Network2k: + cfg.NetworkParams.GenesisNetworkVersion = network.Version16 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSharkHeight = 100 + cfg.NetworkParams.PropagationDelaySecs = 1 + case types.NetworkCalibnet: + cfg.NetworkParams.GenesisNetworkVersion = network.Version0 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSharkHeight = 16800 + cfg.NetworkParams.PropagationDelaySecs = 10 + case types.NetworkForce: + cfg.NetworkParams.GenesisNetworkVersion = network.Version16 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSharkHeight = 100 + cfg.NetworkParams.PropagationDelaySecs = 1 + case types.NetworkInterop: + cfg.NetworkParams.GenesisNetworkVersion = network.Version16 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSharkHeight = 99999999999999 + case types.NetworkButterfly: + cfg.NetworkParams.GenesisNetworkVersion = network.Version16 + cfg.NetworkParams.ForkUpgradeParam.UpgradeSharkHeight = 600 + default: + return fsrRepo.Close() + } + + if err = fsrRepo.ReplaceConfig(cfg); err != nil { + return + } + + if err = fsrRepo.Close(); err != nil { + return + } + + return repo.WriteVersion(repoPath, 9) +} + +// Version10Upgrade will ignore some fields in the parameters structure of the config.json file +func Version10Upgrade(repoPath string) (err error) { + var fsrRepo repo.Repo + if fsrRepo, err = repo.OpenFSRepo(repoPath, 9); err != nil { + return + } + cfg := fsrRepo.Config() + + if err = fsrRepo.ReplaceConfig(cfg); err != nil { + return + } + + if err = fsrRepo.Close(); err != nil { + return + } + + return repo.WriteVersion(repoPath, 10) +} diff --git a/pkg/migration/migrate_test.go b/pkg/migration/migrate_test.go new file mode 100644 index 0000000000..2f471f9864 --- /dev/null +++ b/pkg/migration/migrate_test.go @@ -0,0 +1,50 @@ +package migration + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/filecoin-project/venus/fixtures/networks" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/repo" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/assert" +) + +func TestMigration(t *testing.T) { + tf.UnitTest(t) + + cfgs := map[types.NetworkType]*config.NetworkParamsConfig{ + types.Network2k: &networks.Net2k().Network, + types.NetworkForce: &networks.ForceNet().Network, + types.NetworkInterop: &networks.InteropNet().Network, + types.NetworkButterfly: &networks.ButterflySnapNet().Network, + types.NetworkCalibnet: &networks.Calibration().Network, + types.NetworkMainnet: &networks.Mainnet().Network, + types.Integrationnet: &networks.IntegrationNet().Network, + } + + for nt, paramsCfg := range cfgs { + cfg := config.NewDefaultConfig() + cfg.NetworkParams.NetworkType = nt + repoPath := t.TempDir() + assert.Nil(t, os.RemoveAll(repoPath)) + t.Log(repoPath) + assert.Nil(t, repo.InitFSRepo(repoPath, 0, cfg)) + + assert.Nil(t, TryToMigrate(repoPath)) + fsRepo, err := repo.OpenFSRepo(repoPath, repo.LatestVersion) + assert.Nil(t, err) + newCfg := fsRepo.Config() + assert.Equal(t, paramsCfg.NetworkType, newCfg.NetworkParams.NetworkType) + assert.EqualValuesf(t, config.NewDefaultConfig().NetworkParams.ForkUpgradeParam, newCfg.NetworkParams.ForkUpgradeParam, fmt.Sprintf("current network type %d", paramsCfg.NetworkType)) + + cfgTmp, err := config.ReadFile(filepath.Join(repoPath, "config.json")) + assert.NoError(t, err) + assert.Equal(t, uint64(0), cfgTmp.NetworkParams.BlockDelay) + assert.Equal(t, paramsCfg.NetworkType, cfgTmp.NetworkParams.NetworkType) + } +} diff --git a/pkg/net/blocksub/validator.go b/pkg/net/blocksub/validator.go new file mode 100644 index 0000000000..be8edf5171 --- /dev/null +++ b/pkg/net/blocksub/validator.go @@ -0,0 +1,62 @@ +package blocksub + +import ( + "bytes" + "context" + + "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/pkg/metrics" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var ( + blockTopicLogger = log.Logger("net/block_validator") + mDecodeBlkFail = metrics.NewInt64Counter("net/pubsub_block_decode_failure", "Number of blocks that fail to decode seen on block pubsub channel") +) + +// BlockTopicValidator may be registered on go-libp2p-pubsub to validate blocksub messages. +type BlockTopicValidator struct { + validator pubsub.ValidatorEx + opts []pubsub.ValidatorOpt +} + +type BlockHeaderValidator interface { + ValidateBlockMsg(context.Context, *types.BlockMsg) pubsub.ValidationResult +} + +// NewBlockTopicValidator retruns a BlockTopicValidator using `bv` for message validation +func NewBlockTopicValidator(bv BlockHeaderValidator, opts ...pubsub.ValidatorOpt) *BlockTopicValidator { + return &BlockTopicValidator{ + opts: opts, + validator: func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { + var bm types.BlockMsg + err := bm.UnmarshalCBOR(bytes.NewReader(msg.GetData())) + if err != nil { + blockTopicLogger.Warnf("failed to decode blocksub payload from peer %s: %s", p.String(), err.Error()) + mDecodeBlkFail.Inc(ctx, 1) + return pubsub.ValidationIgnore + } + + validateResult := bv.ValidateBlockMsg(ctx, &bm) + if validateResult == pubsub.ValidationAccept { + msg.ValidatorData = bm + } + return validateResult + }, + } +} + +func (btv *BlockTopicValidator) Topic(network string) string { + return types.BlockTopic(network) +} + +func (btv *BlockTopicValidator) Validator() pubsub.ValidatorEx { + return btv.validator +} + +func (btv *BlockTopicValidator) Opts() []pubsub.ValidatorOpt { + return btv.opts +} diff --git a/pkg/net/blocksub/validator_test.go b/pkg/net/blocksub/validator_test.go new file mode 100644 index 0000000000..cc2a7ac88b --- /dev/null +++ b/pkg/net/blocksub/validator_test.go @@ -0,0 +1,73 @@ +package blocksub_test + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/filecoin-project/go-address" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/net/blocksub" + th "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func TestBlockTopicValidator(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + mbv := th.NewStubBlockValidator() + tv := blocksub.NewBlockTopicValidator(mbv, nil) + builder := chain.NewBuilder(t, address.Undef) + pid1 := th.RequireIntPeerID(t, 1) + + goodBlk := builder.BuildOnBlock(ctx, nil, func(b *chain.BlockBuilder) {}) + badBlk := builder.BuildOnBlock(ctx, nil, func(b *chain.BlockBuilder) { + b.IncHeight(1) + }) + + mbv.StubSyntaxValidationForBlock(badBlk, fmt.Errorf("invalid block")) + + validator := tv.Validator() + + network := "gfctest" + assert.Equal(t, types.BlockTopic(network), tv.Topic(network)) + assert.True(t, validator(ctx, pid1, blkToPubSub(t, goodBlk)) == pubsub.ValidationAccept) + assert.False(t, validator(ctx, pid1, blkToPubSub(t, badBlk)) == pubsub.ValidationAccept) + assert.False(t, validator(ctx, pid1, nonBlkPubSubMsg()) == pubsub.ValidationAccept) +} + +// convert a types.BlockHeader to a pubsub message +func blkToPubSub(t *testing.T, blk *types.BlockHeader) *pubsub.Message { + bm := types.BlockMsg{ + Header: blk, + BlsMessages: nil, + SecpkMessages: nil, + } + buf := new(bytes.Buffer) + err := bm.MarshalCBOR(buf) + require.NoError(t, err) + + return &pubsub.Message{ + Message: &pubsubpb.Message{ + Data: buf.Bytes(), + }, + } +} + +// returns a pubsub message that will not decode to a types.BlockHeader +func nonBlkPubSubMsg() *pubsub.Message { + pbm := &pubsubpb.Message{ + Data: []byte("meow"), + } + return &pubsub.Message{ + Message: pbm, + } +} diff --git a/pkg/net/dht.go b/pkg/net/dht.go new file mode 100644 index 0000000000..a5165ac94a --- /dev/null +++ b/pkg/net/dht.go @@ -0,0 +1,12 @@ +package net + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/protocol" +) + +// FilecoinDHT is creates a protocol for the filecoin DHT. +func FilecoinDHT(network string) protocol.ID { + return protocol.ID(fmt.Sprintf("/fil/kad/%s", network)) +} diff --git a/pkg/net/exchange/client.go b/pkg/net/exchange/client.go new file mode 100644 index 0000000000..69fc849e9b --- /dev/null +++ b/pkg/net/exchange/client.go @@ -0,0 +1,499 @@ +package exchange + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "math/rand" + "time" + + cborutil "github.com/filecoin-project/go-cbor-util" + logging "github.com/ipfs/go-log" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + + "go.opencensus.io/trace" + + "github.com/filecoin-project/venus/pkg/net/peermgr" + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var exchangeClientLogger = logging.Logger("exchange.client") + +// client implements exchange.Client, using the libp2p ChainExchange protocol +// as the fetching mechanism. +type client struct { + // Connection manager used to contact the server. + // FIXME: We should have a reduced interface here, initialized + // just with our protocol ID, we shouldn't be able to open *any* + // connection. + host host.Host + + peerTracker *bsPeerTracker +} + +var _ Client = (*client)(nil) + +// NewClient creates a new libp2p-based exchange.Client that uses the libp2p +// ChainExhange protocol as the fetching mechanism. +func NewClient(host host.Host, pmgr peermgr.IPeerMgr) Client { + return &client{ + host: host, + peerTracker: newPeerTracker(host, pmgr), + } +} + +// Main logic of the client request service. The provided `Request` +// is sent to the `singlePeer` if one is indicated or to all available +// ones otherwise. The response is processed and validated according +// to the `Request` options. Either a `validatedResponse` is returned +// (which can be safely accessed), or an `error` that may represent +// either a response error status, a failed validation or an internal +// error. +// +// This is the internal single point of entry for all external-facing +// APIs, currently we have 3 very heterogeneous services exposed: +// * GetBlocks: Headers +// * GetFullTipSet: Headers | Messages +// * GetChainMessages: Messages +// This function handles all the different combinations of the available +// request options without disrupting external calls. In the future the +// consumers should be forced to use a more standardized service and +// adhere to a single API derived from this function. +func (c *client) doRequest( + ctx context.Context, + req *exchange.Request, + singlePeer []peer.ID, + // In the `GetChainMessages` case, we won't request the headers but we still + // need them to check the integrity of the `CompactedMessages` in the response + // so the tipset blocks need to be provided by the caller. + tipsets []*types.TipSet, +) (*validatedResponse, error) { + // Validate request. + if req.Length == 0 { + return nil, fmt.Errorf("invalid request of length 0") + } + + if req.Length > exchange.MaxRequestLength { + return nil, fmt.Errorf("request length (%d) above maximum (%d)", + req.Length, exchange.MaxRequestLength) + } + + if req.Options == 0 { + return nil, fmt.Errorf("request with no options set") + } + + // Generate the list of peers to be queried, either the + // `singlePeer` indicated or all peers available (sorted + // by an internal peer tracker with some randomness injected). + var selectPeers []peer.ID + if singlePeer != nil { + selectPeers = append(selectPeers, singlePeer...) + } else { + selectPeers = c.getShuffledPeers() + } + if len(selectPeers) == 0 { + return nil, fmt.Errorf("no peers available") + } + + // Try the request for each peer in the list, + // return on the first successful response. + // FIXME: Doing this serially isn't great, but fetching in parallel + // may not be a good idea either. Think about this more. + globalTime := time.Now() + // Global time used to track what is the expected time we will need to get + // a response if a client fails us. + for _, peer := range selectPeers { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("context cancelled: %w", ctx.Err()) + default: + } + + // Send request, read response. + res, err := c.sendRequestToPeer(ctx, peer, req) + if err != nil { + if !errors.Is(err, network.ErrNoConn) { + exchangeClientLogger.Warnf("could not send request to peer %s: %s", + peer.String(), err) + } + continue + } + + // Process and validate response. + validRes, err := c.processResponse(req, res, tipsets) + if err != nil { + exchangeClientLogger.Warnf("processing peer %s response failed: %s", peer.String(), err) + continue + } + + c.peerTracker.logGlobalSuccess(time.Since(globalTime)) + c.host.ConnManager().TagPeer(peer, "bsync", SuccessPeerTagValue) + return validRes, nil + } + + return nil, fmt.Errorf("doRequest failed for all peers") +} + +// Process and validate response. Check the status, the integrity of the +// information returned, and that it matches the request. Extract the information +// into a `validatedResponse` for the external-facing APIs to select what they +// need. +// +// We are conflating in the single error returned both status and validation +// errors. Peer penalization should happen here then, before returning, so +// we can apply the correct penalties depending on the cause of the error. +// FIXME: Add the `peer` as argument once we implement penalties. +func (c *client) processResponse(req *exchange.Request, res *exchange.Response, tipsets []*types.TipSet) (*validatedResponse, error) { + err := res.StatusToError() + if err != nil { + return nil, fmt.Errorf("status error: %s", err) + } + + options := exchange.ParseOptions(req.Options) + if options.IsEmpty() { + // Safety check: this shouldn't have been sent, and even if it did + // it should have been caught by the peer in its error status. + return nil, fmt.Errorf("nothing was requested") + } + + // Verify that the chain segment returned is in the valid range. + // Note that the returned length might be less than requested. + resLength := len(res.Chain) + if resLength == 0 { + return nil, fmt.Errorf("got no chain in successful response") + } + if resLength > int(req.Length) { + return nil, fmt.Errorf("got longer response (%d) than requested (%d)", + resLength, req.Length) + } + if resLength < int(req.Length) && res.Status != exchange.Partial { + return nil, fmt.Errorf("got less than requested without a proper status: %d", res.Status) + } + + validRes := &validatedResponse{} + if options.IncludeHeaders { + // Check for valid block sets and extract them into `TipSet`s. + validRes.tipsets = make([]*types.TipSet, resLength) + for i := 0; i < resLength; i++ { + if res.Chain[i] == nil { + return nil, fmt.Errorf("response with nil tipset in pos %d", i) + } + for blockIdx, block := range res.Chain[i].Blocks { + if block == nil { + return nil, fmt.Errorf("tipset with nil block in pos %d", blockIdx) + // FIXME: Maybe we should move this check to `NewTipSet`. + } + } + + validRes.tipsets[i], err = types.NewTipSet(res.Chain[i].Blocks) + if err != nil { + return nil, fmt.Errorf("invalid tipset blocks at height (head - %d): %w", i, err) + } + } + + // Check that the returned head matches the one requested + if !types.CidArrsEqual(validRes.tipsets[0].Key().Cids(), req.Head) { + return nil, fmt.Errorf("returned chain head does not match request") + } + + // Check `TipSet`s are connected (valid chain). + for i := 0; i < len(validRes.tipsets)-1; i++ { + if !validRes.tipsets[i].IsChildOf(validRes.tipsets[i+1]) { + return nil, fmt.Errorf("tipsets are not connected at height (head - %d)/(head - %d)", + i, i+1) + // FIXME: Maybe give more information here, like CIDs. + } + } + } + + if options.IncludeMessages { + validRes.messages = make([]*exchange.CompactedMessages, resLength) + for i := 0; i < resLength; i++ { + if res.Chain[i].Messages == nil { + return nil, fmt.Errorf("no messages included for tipset at height (head - %d)", i) + } + validRes.messages[i] = res.Chain[i].Messages + } + + if options.IncludeHeaders { + // If the headers were also returned check that the compression + // indexes are valid before `toFullTipSets()` is called by the + // consumer. + err := c.validateCompressedIndices(res.Chain) + if err != nil { + return nil, err + } + } else { + // If we didn't request the headers they should have been provided + // by the caller. + if len(tipsets) < len(res.Chain) { + return nil, fmt.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) + } + chain := make([]*exchange.BSTipSet, 0, resLength) + for i, resChain := range res.Chain { + next := &exchange.BSTipSet{ + Blocks: tipsets[i].Blocks(), + Messages: resChain.Messages, + } + chain = append(chain, next) + } + + err := c.validateCompressedIndices(chain) + if err != nil { + return nil, err + } + } + } + + return validRes, nil +} + +func (c *client) validateCompressedIndices(chain []*exchange.BSTipSet) error { + resLength := len(chain) + for tipsetIdx := 0; tipsetIdx < resLength; tipsetIdx++ { + msgs := chain[tipsetIdx].Messages + blocksNum := len(chain[tipsetIdx].Blocks) + + if len(msgs.BlsIncludes) != blocksNum { + return fmt.Errorf("BlsIncludes (%d) does not match number of blocks (%d)", + len(msgs.BlsIncludes), blocksNum) + } + + if len(msgs.SecpkIncludes) != blocksNum { + return fmt.Errorf("SecpkIncludes (%d) does not match number of blocks (%d)", + len(msgs.SecpkIncludes), blocksNum) + } + + for blockIdx := 0; blockIdx < blocksNum; blockIdx++ { + for _, mi := range msgs.BlsIncludes[blockIdx] { + if int(mi) >= len(msgs.Bls) { + return fmt.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)", + mi, len(msgs.Bls)) + } + } + + for _, mi := range msgs.SecpkIncludes[blockIdx] { + if int(mi) >= len(msgs.Secpk) { + return fmt.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)", + mi, len(msgs.Secpk)) + } + } + } + } + + return nil +} + +// GetBlocks implements Client.GetBlocks(). Refer to the godocs there. +func (c *client) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { + ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("tipset", fmt.Sprint(tsk.Cids())), + trace.Int64Attribute("count", int64(count)), + ) + } + + req := &exchange.Request{ + Head: tsk.Cids(), + Length: uint64(count), + Options: exchange.Headers, + } + + validRes, err := c.doRequest(ctx, req, nil, nil) + if err != nil { + return nil, err + } + + return validRes.tipsets, nil +} + +// GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there. +func (c *client) GetFullTipSet(ctx context.Context, peers []peer.ID, tsk types.TipSetKey) (*types.FullTipSet, error) { + // TODO: round robin through these peers on error + + req := &exchange.Request{ + Head: tsk.Cids(), + Length: 1, + Options: exchange.Headers | exchange.Messages, + } + + validRes, err := c.doRequest(ctx, req, peers, nil) + if err != nil { + return nil, err + } + + return validRes.toFullTipSets()[0], nil + // If `doRequest` didn't fail we are guaranteed to have at least + // *one* tipset here, so it's safe to index directly. +} + +// GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there. +func (c *client) GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*exchange.CompactedMessages, error) { + head := tipsets[0] + length := uint64(len(tipsets)) + + ctx, span := trace.StartSpan(ctx, "GetChainMessages") + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("tipset", fmt.Sprint(head.Key().Cids())), + trace.Int64Attribute("count", int64(length)), + ) + } + defer span.End() + + req := &exchange.Request{ + Head: head.Key().Cids(), + Length: length, + Options: exchange.Messages, + } + + validRes, err := c.doRequest(ctx, req, nil, tipsets) + if err != nil { + return nil, err + } + + return validRes.messages, nil +} + +// Send a request to a peer. Write request in the stream and read the +// response back. We do not do any processing of the request/response +// here. +func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *exchange.Request) (_ *exchange.Response, err error) { + // Trace code. + ctx, span := trace.StartSpan(ctx, "sendRequestToPeer") + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("peer", peer.Pretty()), + ) + } + defer func() { + if err != nil { + if span.IsRecordingEvents() { + span.SetStatus(trace.Status{ + Code: 5, + Message: err.Error(), + }) + } + } + }() + // -- TRACE -- + + supported, err := c.host.Peerstore().SupportsProtocols(peer, exchange.ChainExchangeProtocolID) + if err != nil { + c.RemovePeer(peer) + return nil, fmt.Errorf("failed to get protocols for peer: %w", err) + } + if len(supported) == 0 || supported[0] != exchange.ChainExchangeProtocolID { + c.RemovePeer(peer) + return nil, fmt.Errorf("peer %s does not support protocols %s", peer, []string{exchange.ChainExchangeProtocolID}) + } + + connectionStart := time.Now() + + // Open stream to peer. + stream, err := c.host.NewStream( + network.WithNoDial(ctx, "should already have connection"), + peer, + exchange.ChainExchangeProtocolID) + if err != nil { + c.RemovePeer(peer) + return nil, fmt.Errorf("failed to open stream to peer: %w", err) + } + + defer func() { + // Note: this will become just stream.Close once we've completed the go-libp2p migration to + // go-libp2p-core 0.7.0 + go stream.Close() //nolint:errcheck + }() + + // Write request. + _ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline)) + if err := cborutil.WriteCborRPC(stream, req); err != nil { + _ = stream.SetWriteDeadline(time.Time{}) + c.peerTracker.logFailure(peer, time.Since(connectionStart), req.Length) + // FIXME: Should we also remove peer here? + return nil, err + } + _ = stream.SetWriteDeadline(time.Time{}) // clear deadline // FIXME: Needs + // its own API (https://github.com/libp2p/go-libp2p/core/issues/162). + + // Read response. + _ = stream.SetReadDeadline(time.Time{}) + + // TODO Note: this will remove once we've completed the go-libp2p migration to + // go-libp2p-core 0.7.0 + respBytes, err := io.ReadAll(bufio.NewReader(NewInct(stream, ReadResMinSpeed, ReadResDeadline))) + if err != nil { + return nil, err + } + + var res exchange.Response + err = cborutil.ReadCborRPC( + bytes.NewReader(respBytes), + // bufio.NewReader(NewInct(stream, ReadResMinSpeed, ReadResDeadline)), + &res) + if err != nil { + c.peerTracker.logFailure(peer, time.Since(connectionStart), req.Length) + return nil, fmt.Errorf("failed to read chainxchg response: %w", err) + } + + // FIXME: Move all this together at the top using a defer as done elsewhere. + // Maybe we need to declare `res` in the signature. + if span.IsRecordingEvents() { + span.AddAttributes( + trace.Int64Attribute("resp_status", int64(res.Status)), + trace.StringAttribute("msg", res.ErrorMessage), + trace.Int64Attribute("chain_len", int64(len(res.Chain))), + ) + } + + c.peerTracker.logSuccess(peer, time.Since(connectionStart), uint64(len(res.Chain))) + // FIXME: We should really log a success only after we validate the response. + // It might be a bit hard to do. + return &res, nil +} + +// AddPeer implements Client.AddPeer(). Refer to the godocs there. +func (c *client) AddPeer(p peer.ID) { + c.peerTracker.addPeer(p) +} + +// RemovePeer implements Client.RemovePeer(). Refer to the godocs there. +func (c *client) RemovePeer(p peer.ID) { + c.peerTracker.removePeer(p) +} + +// getShuffledPeers returns a preference-sorted set of peers (by latency +// and failure counting), shuffling the first few peers so we don't always +// pick the same peer. +// FIXME: Consider merging with `shufflePrefix()s`. +func (c *client) getShuffledPeers() []peer.ID { + peers := c.peerTracker.prefSortedPeers() + shufflePrefix(peers) + return peers +} + +func shufflePrefix(peers []peer.ID) { + prefix := ShufflePeersPrefix + if len(peers) < prefix { + prefix = len(peers) + } + + buf := make([]peer.ID, prefix) + perm := rand.Perm(prefix) + for i, v := range perm { + buf[i] = peers[v] + } + + copy(peers, buf) +} diff --git a/pkg/net/exchange/doc.go b/pkg/net/exchange/doc.go new file mode 100644 index 0000000000..21abc38c4b --- /dev/null +++ b/pkg/net/exchange/doc.go @@ -0,0 +1,19 @@ +// Package exchange contains the ChainExchange server and client components. +// +// ChainExchange is the basic chain synchronization protocol of Filecoin. +// ChainExchange is an RPC-oriented protocol, with a single operation to +// request blocks for now. +// +// A request contains a start anchor block (referred to with a CID), and a +// amount of blocks requested beyond the anchor (including the anchor itself). +// +// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports +// two options at the moment: +// +// - include block contents +// - include block messages +// +// The response will include a status code, an optional message, and the +// response payload in case of success. The payload is a slice of serialized +// tipsets. +package exchange diff --git a/pkg/net/exchange/inct.go b/pkg/net/exchange/inct.go new file mode 100644 index 0000000000..099d8c0442 --- /dev/null +++ b/pkg/net/exchange/inct.go @@ -0,0 +1,68 @@ +package exchange + +import ( + "io" + "time" +) + +type ReaderDeadline interface { + Read([]byte) (int, error) + SetReadDeadline(time.Time) error +} + +type incrt struct { + rd ReaderDeadline + + waitPerByte time.Duration + wait time.Duration + maxWait time.Duration +} + +// New creates an Incremental Reader Timeout, with minimum sustained speed of +// minSpeed bytes per second and with maximum wait of maxWait +func NewInct(rd ReaderDeadline, minSpeed int64, maxWait time.Duration) io.Reader { + return &incrt{ + rd: rd, + waitPerByte: time.Second / time.Duration(minSpeed), + wait: maxWait, + maxWait: maxWait, + } +} + +type errNoWait struct{} + +func (err errNoWait) Error() string { + return "wait time exceeded" +} + +func (err errNoWait) Timeout() bool { + return true +} + +func (crt *incrt) Read(buf []byte) (int, error) { + start := time.Now() + if crt.wait == 0 { + return 0, errNoWait{} + } + + err := crt.rd.SetReadDeadline(start.Add(crt.wait)) + if err != nil { + log.Debugf("unable to set deadline: %+v", err) + } + + n, err := crt.rd.Read(buf) + + _ = crt.rd.SetReadDeadline(time.Time{}) + if err == nil { + dur := time.Since(start) + crt.wait -= dur + crt.wait += time.Duration(n) * crt.waitPerByte + if crt.wait < 0 { + crt.wait = 0 + } + if crt.wait > crt.maxWait { + crt.wait = crt.maxWait + } + } + return n, err +} diff --git a/pkg/net/exchange/interfaces.go b/pkg/net/exchange/interfaces.go new file mode 100644 index 0000000000..8d19b06fc1 --- /dev/null +++ b/pkg/net/exchange/interfaces.go @@ -0,0 +1,43 @@ +package exchange + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// Server is the responder side of the ChainExchange protocol. It accepts +// requests from clients and services them by returning the requested +// chain data. +type Server interface { + Register() +} + +// Client is the requesting side of the ChainExchange protocol. It acts as +// a proxy for other components to request chain data from peers. It is chiefly +// used by the Syncer. +type Client interface { + // GetBlocks fetches block headers from the network, from the provided + // tipset *backwards*, returning as many tipsets as the count parameter, + // or less. + GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) + + // GetChainMessages fetches messages from the network, starting from the first provided tipset + // and returning messages from as many tipsets as requested or less. + GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*exchange.CompactedMessages, error) + + // GetFullTipSet fetches a full tipset from a given peer. If successful, + // the fetched object contains block headers and all messages in full form. + GetFullTipSet(ctx context.Context, peer []peer.ID, tsk types.TipSetKey) (*types.FullTipSet, error) + + // AddPeer adds a peer to the pool of peers that the Client requests + // data from. + AddPeer(peer peer.ID) + + // RemovePeer removes a peer from the pool of peers that the Client + // requests data from. + RemovePeer(peer peer.ID) +} diff --git a/pkg/net/exchange/peer_tracker.go b/pkg/net/exchange/peer_tracker.go new file mode 100644 index 0000000000..2d37ac19c6 --- /dev/null +++ b/pkg/net/exchange/peer_tracker.go @@ -0,0 +1,183 @@ +package exchange + +// FIXME: This needs to be reviewed. + +import ( + "sort" + "sync" + "time" + + host "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/pkg/net/peermgr" +) + +type peerStats struct { + successes int + failures int + firstSeen time.Time + averageTime time.Duration +} + +type bsPeerTracker struct { + lk sync.Mutex + + peers map[peer.ID]*peerStats + avgGlobalTime time.Duration + + pmgr peermgr.IPeerMgr +} + +func newPeerTracker(h host.Host, pmgr peermgr.IPeerMgr) *bsPeerTracker { + bsPt := &bsPeerTracker{ + peers: make(map[peer.ID]*peerStats), + pmgr: pmgr, + } + + sub, err := h.EventBus().Subscribe(new(peermgr.FilPeerEvt)) + if err != nil { + panic(err) + } + + go func() { + for evt := range sub.Out() { + pEvt := evt.(peermgr.FilPeerEvt) + switch pEvt.Type { + case peermgr.AddFilPeerEvt: + bsPt.addPeer(pEvt.ID) + case peermgr.RemoveFilPeerEvt: + bsPt.removePeer(pEvt.ID) + } + } + }() + return bsPt +} + +func (bpt *bsPeerTracker) addPeer(p peer.ID) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + if _, ok := bpt.peers[p]; ok { + return + } + bpt.peers[p] = &peerStats{ + firstSeen: time.Now(), + } +} + +const ( + // newPeerMul is how much better than average is the new peer assumed to be + // less than one to encourouge trying new peers + newPeerMul = 0.9 +) + +func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID { + // TODO: this could probably be cached, but as long as its not too many peers, fine for now + bpt.lk.Lock() + defer bpt.lk.Unlock() + out := make([]peer.ID, 0, len(bpt.peers)) + for p := range bpt.peers { + out = append(out, p) + } + + // sort by 'expected cost' of requesting data from that peer + // additionally handle edge cases where not enough data is available + sort.Slice(out, func(i, j int) bool { + pi := bpt.peers[out[i]] + pj := bpt.peers[out[j]] + + var costI, costJ float64 + + getPeerInitLat := func(p peer.ID) float64 { + return float64(bpt.avgGlobalTime) * newPeerMul + } + + if pi.successes+pi.failures > 0 { + failRateI := float64(pi.failures) / float64(pi.failures+pi.successes) + costI = float64(pi.averageTime) + failRateI*float64(bpt.avgGlobalTime) + } else { + costI = getPeerInitLat(out[i]) + } + + if pj.successes+pj.failures > 0 { + failRateJ := float64(pj.failures) / float64(pj.failures+pj.successes) + costJ = float64(pj.averageTime) + failRateJ*float64(bpt.avgGlobalTime) + } else { + costJ = getPeerInitLat(out[j]) + } + + return costI < costJ + }) + + return out +} + +const ( + // xInvAlpha = (N+1)/2 + + localInvAlpha = 10 // 86% of the value is the last 19 + globalInvAlpha = 25 // 86% of the value is the last 49 +) + +func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + if bpt.avgGlobalTime == 0 { + bpt.avgGlobalTime = dur + return + } + delta := (dur - bpt.avgGlobalTime) / globalInvAlpha + bpt.avgGlobalTime += delta +} + +func logTime(pi *peerStats, dur time.Duration) { + if pi.averageTime == 0 { + pi.averageTime = dur + return + } + delta := (dur - pi.averageTime) / localInvAlpha + pi.averageTime += delta +} + +func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration, reqSize uint64) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + var pi *peerStats + var ok bool + if pi, ok = bpt.peers[p]; !ok { + log.Debugf("log success called on peer not in tracker", "peerid", p.String()) + return + } + + pi.successes++ + if reqSize == 0 { + reqSize = 1 + } + logTime(pi, dur/time.Duration(reqSize)) +} + +func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration, reqSize uint64) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + var pi *peerStats + var ok bool + if pi, ok = bpt.peers[p]; !ok { + log.Warn("log failure called on peer not in tracker", "peerid", p.String()) + return + } + + pi.failures++ + if reqSize == 0 { + reqSize = 1 + } + logTime(pi, dur/time.Duration(reqSize)) +} + +func (bpt *bsPeerTracker) removePeer(p peer.ID) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + delete(bpt.peers, p) +} diff --git a/pkg/net/exchange/protocol.go b/pkg/net/exchange/protocol.go new file mode 100644 index 0000000000..e29777ee09 --- /dev/null +++ b/pkg/net/exchange/protocol.go @@ -0,0 +1,71 @@ +package exchange + +import ( + "time" + + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/types" + logging "github.com/ipfs/go-log" +) + +var log = logging.Logger("exchange") + +const ( + // Extracted constants from the code. + // FIXME: Should be reviewed and confirmed. + SuccessPeerTagValue = 25 + WriteReqDeadline = 5 * time.Second + ReadResDeadline = WriteReqDeadline + ReadResMinSpeed = 50 << 10 + ShufflePeersPrefix = 16 + WriteResDeadline = 60 * time.Second +) + +// `Request` processed and validated to query the tipsets needed. +type validatedRequest struct { + head types.TipSetKey + length uint64 + options *exchange.Options +} + +// Response that has been validated according to the protocol +// and can be safely accessed. +type validatedResponse struct { + tipsets []*types.TipSet + // List of all messages per tipset (grouped by tipset, + // not by block, hence a single index like `tipsets`). + messages []*exchange.CompactedMessages +} + +// Decompress messages and form full tipsets with them. The headers +// need to have been requested as well. +func (res *validatedResponse) toFullTipSets() []*types.FullTipSet { + if len(res.tipsets) == 0 || len(res.tipsets) != len(res.messages) { + // This decompression can only be done if both headers and + // messages are returned in the response. (The second check + // is already implied by the guarantees of `validatedResponse`, + // added here just for completeness.) + return nil + } + ftsList := make([]*types.FullTipSet, len(res.tipsets)) + for tipsetIdx := range res.tipsets { + fts := &types.FullTipSet{} // FIXME: We should use the `NewFullTipSet` API. + msgs := res.messages[tipsetIdx] + for blockIdx, b := range res.tipsets[tipsetIdx].Blocks() { + fb := &types.FullBlock{ + Header: b, + } + + for _, mi := range msgs.BlsIncludes[blockIdx] { + fb.BLSMessages = append(fb.BLSMessages, msgs.Bls[mi]) + } + for _, mi := range msgs.SecpkIncludes[blockIdx] { + fb.SECPMessages = append(fb.SECPMessages, msgs.Secpk[mi]) + } + + fts.Blocks = append(fts.Blocks, fb) + } + ftsList[tipsetIdx] = fts + } + return ftsList +} diff --git a/pkg/net/exchange/server.go b/pkg/net/exchange/server.go new file mode 100644 index 0000000000..ea704fe643 --- /dev/null +++ b/pkg/net/exchange/server.go @@ -0,0 +1,271 @@ +package exchange + +import ( + "bufio" + "context" + "fmt" + "time" + + cborutil "github.com/filecoin-project/go-cbor-util" + logging "github.com/ipfs/go-log" + + "go.opencensus.io/trace" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/host" + inet "github.com/libp2p/go-libp2p/core/network" + + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var exchangeServerLog = logging.Logger("exchange.server") + +type chainReader interface { + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +type messageStore interface { + ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) + + LoadUnsignedMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.Message, error) + LoadSignedMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.SignedMessage, error) +} + +// server implements exchange.Server. It services requests for the +// libp2p ChainExchange protocol. +type server struct { + cr chainReader + mr messageStore + h host.Host +} + +var _ Server = (*server)(nil) + +// NewServer creates a new libp2p-based exchange.Server. It services requests +// for the libp2p ChainExchange protocol. +func NewServer(cr chainReader, mr messageStore, h host.Host) Server { + return &server{ + cr: cr, + mr: mr, + h: h, + } +} + +func (s *server) Register() { + s.h.SetStreamHandler(exchange.ChainExchangeProtocolID, s.handleStream) // new +} + +// HandleStream implements Server.HandleStream. Refer to the godocs there. +func (s *server) handleStream(stream inet.Stream) { + ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream") + defer span.End() + + // Note: this will become just stream.Close once we've completed the go-libp2p migration to + // go-libp2p-core 0.7.0 + defer stream.Close() //nolint:errcheck + + var req exchange.Request + if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil { + exchangeServerLog.Warnf("failed to read block sync request: %s", err) + return + } + fmt.Println(stream.Conn().RemotePeer()) + exchangeServerLog.Infow("block sync request", "start", req.Head, "len", req.Length) + + resp, err := s.processRequest(ctx, &req) + if err != nil { + exchangeServerLog.Warn("failed to process request: ", err) + return + } + + _ = stream.SetDeadline(time.Now().Add(WriteResDeadline)) + if err := cborutil.WriteCborRPC(stream, resp); err != nil { + _ = stream.SetDeadline(time.Time{}) + exchangeServerLog.Warnw("failed to write back response for handle stream", + "err", err, "peer", stream.Conn().RemotePeer()) + return + } + _ = stream.SetDeadline(time.Time{}) +} + +// Validate and service the request. We return either a protocol +// response or an internal error. +func (s *server) processRequest(ctx context.Context, req *exchange.Request) (*exchange.Response, error) { + validReq, errResponse := validateRequest(ctx, req) + if errResponse != nil { + // The request did not pass validation, return the response + // indicating it. + return errResponse, nil + } + + return s.serviceRequest(ctx, validReq) +} + +// Validate request. We either return a `validatedRequest`, or an error +// `Response` indicating why we can't process it. We do not return any +// internal errors here, we just signal protocol ones. +func validateRequest(ctx context.Context, req *exchange.Request) (*validatedRequest, *exchange.Response) { + _, span := trace.StartSpan(ctx, "chainxchg.ValidateRequest") + defer span.End() + + validReq := validatedRequest{} + + validReq.options = exchange.ParseOptions(req.Options) + if validReq.options.IsEmpty() { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: "no options set", + } + } + + validReq.length = req.Length + if validReq.length > exchange.MaxRequestLength { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: fmt.Sprintf("request length over maximum allowed (%d)", + exchange.MaxRequestLength), + } + } + if validReq.length == 0 { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: "invalid request length of zero", + } + } + + if len(req.Head) == 0 { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: "no cids in request", + } + } + validReq.head = types.NewTipSetKey(req.Head...) + + // FIXME: Add as a defer at the start. + span.AddAttributes( + trace.BoolAttribute("blocks", validReq.options.IncludeHeaders), + trace.BoolAttribute("messages", validReq.options.IncludeMessages), + trace.Int64Attribute("reqlen", int64(validReq.length)), + ) + + return &validReq, nil +} + +func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*exchange.Response, error) { + _, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest") + defer span.End() + + chain, err := collectChainSegment(ctx, s.cr, s.mr, req) + if err != nil { + exchangeServerLog.Warn("block sync request: collectChainSegment failed: ", err) + return &exchange.Response{ + Status: exchange.InternalError, + ErrorMessage: err.Error(), + }, nil + } + + status := exchange.Ok + if len(chain) < int(req.length) { + status = exchange.Partial + } + + return &exchange.Response{ + Chain: chain, + Status: status, + }, nil +} + +func collectChainSegment(ctx context.Context, cr chainReader, mr messageStore, req *validatedRequest) ([]*exchange.BSTipSet, error) { + var bstips []*exchange.BSTipSet + + cur := req.head + for { + var bst exchange.BSTipSet + ts, err := cr.GetTipSet(ctx, cur) + if err != nil { + return nil, fmt.Errorf("failed loading tipset %s: %w", cur, err) + } + + if req.options.IncludeHeaders { + bst.Blocks = ts.Blocks() + } + + if req.options.IncludeMessages { + bmsgs, bmincl, smsgs, smincl, err := GatherMessages(ctx, cr, mr, ts) + if err != nil { + return nil, fmt.Errorf("gather messages failed: %w", err) + } + + // FIXME: Pass the response to `gatherMessages()` and set all this there. + bst.Messages = &exchange.CompactedMessages{} + bst.Messages.Bls = bmsgs + bst.Messages.BlsIncludes = bmincl + bst.Messages.Secpk = smsgs + bst.Messages.SecpkIncludes = smincl + } + + bstips = append(bstips, &bst) + + // If we collected the length requested or if we reached the + // start (genesis), then stop. + if uint64(len(bstips)) >= req.length || ts.Height() == 0 { + return bstips, nil + } + + cur = ts.Parents() + } +} + +func GatherMessages(ctx context.Context, cr chainReader, mr messageStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { + blsmsgmap := make(map[cid.Cid]uint64) + secpkmsgmap := make(map[cid.Cid]uint64) + var secpkincl, blsincl [][]uint64 + + var blscids, secpkcids []cid.Cid + for _, block := range ts.Blocks() { + bc, sc, err := mr.ReadMsgMetaCids(context.TODO(), block.Messages) + if err != nil { + return nil, nil, nil, nil, err + } + + // FIXME: DRY. Use `chain.Message` interface. + bmi := make([]uint64, 0, len(bc)) + for _, m := range bc { + i, ok := blsmsgmap[m] + if !ok { + i = uint64(len(blscids)) + blscids = append(blscids, m) + blsmsgmap[m] = i + } + + bmi = append(bmi, i) + } + blsincl = append(blsincl, bmi) + + smi := make([]uint64, 0, len(sc)) + for _, m := range sc { + i, ok := secpkmsgmap[m] + if !ok { + i = uint64(len(secpkcids)) + secpkcids = append(secpkcids, m) + secpkmsgmap[m] = i + } + + smi = append(smi, i) + } + secpkincl = append(secpkincl, smi) + } + + blsmsgs, err := mr.LoadUnsignedMessagesFromCids(ctx, blscids) + if err != nil { + return nil, nil, nil, nil, err + } + + secpkmsgs, err := mr.LoadSignedMessagesFromCids(ctx, secpkcids) + if err != nil { + return nil, nil, nil, nil, err + } + + return blsmsgs, blsincl, secpkmsgs, secpkincl, nil +} diff --git a/pkg/net/gossipsub.go b/pkg/net/gossipsub.go new file mode 100644 index 0000000000..54ee97df95 --- /dev/null +++ b/pkg/net/gossipsub.go @@ -0,0 +1,335 @@ +package net + +import ( + "context" + "net" + "time" + + "github.com/filecoin-project/go-state-types/abi" + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + blake2b "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var gossipsubLog = logging.Logger("gossipsub") + +func init() { + // configure larger overlay parameters + pubsub.GossipSubD = 8 + pubsub.GossipSubDscore = 6 + pubsub.GossipSubDout = 3 + pubsub.GossipSubDlo = 6 + pubsub.GossipSubDhi = 12 + pubsub.GossipSubDlazy = 12 + pubsub.GossipSubDirectConnectInitialDelay = 30 * time.Second + pubsub.GossipSubIWantFollowupTime = 5 * time.Second + pubsub.GossipSubHistoryLength = 10 + pubsub.GossipSubGossipFactor = 0.1 +} + +const ( + GossipScoreThreshold = -500 + PublishScoreThreshold = -1000 + GraylistScoreThreshold = -2500 + AcceptPXScoreThreshold = 1000 + OpportunisticGraftScoreThreshold = 3.5 +) + +func NewGossipSub(ctx context.Context, + h host.Host, + sk *ScoreKeeper, + networkName string, + drandSchedule map[abi.ChainEpoch]config.DrandEnum, + bootNodes []peer.AddrInfo, +) (*pubsub.PubSub, error) { + bootstrappers := make(map[peer.ID]struct{}) + for _, info := range bootNodes { + bootstrappers[info.ID] = struct{}{} + } + + blockTopic := types.BlockTopic(networkName) + msgTopic := types.MessageTopic(networkName) + indexerIngestTopic := types.IndexerIngestTopic(networkName) + + topicParams := map[string]*pubsub.TopicScoreParams{ + blockTopic: { + // expected 10 blocks/min + TopicWeight: 0.1, // max cap is 50, max mesh penalty is -10, single invalid message is -100 + + // 1 tick per second, maxes at 1 after 1 hour + TimeInMeshWeight: 0.00027, // ~1/3600 + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 1, + + // deliveries decay after 1 hour, cap at 100 blocks + FirstMessageDeliveriesWeight: 5, // max value is 500 + FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour), + FirstMessageDeliveriesCap: 100, // 100 blocks in an hour + + // Mesh Delivery Failure is currently turned off for blocks + // This is on purpose as + // - the traffic is very low for meaningful distribution of incoming edges. + // - the reaction time needs to be very slow -- in the order of 10 min at least + // so we might as well let opportunistic grafting repair the mesh on its own + // pace. + // - the network is too small, so large asymmetries can be expected between mesh + // edges. + // We should revisit this once the network grows. + // + // // tracks deliveries in the last minute + // // penalty activates at 1 minute and expects ~0.4 blocks + // MeshMessageDeliveriesWeight: -576, // max penalty is -100 + // MeshMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Minute), + // MeshMessageDeliveriesCap: 10, // 10 blocks in a minute + // MeshMessageDeliveriesThreshold: 0.41666, // 10/12/2 blocks/min + // MeshMessageDeliveriesWindow: 10 * time.Millisecond, + // MeshMessageDeliveriesActivation: time.Minute, + // + // // decays after 15 min + // MeshFailurePenaltyWeight: -576, + // MeshFailurePenaltyDecay: pubsub.ScoreParameterDecay(15 * time.Minute), + + // invalid messages decay after 1 hour + InvalidMessageDeliveriesWeight: -1000, + InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour), + }, + msgTopic: { + // expected > 1 tx/second + TopicWeight: 0.1, // max cap is 5, single invalid message is -100 + + // 1 tick per second, maxes at 1 hour + TimeInMeshWeight: 0.0002778, // ~1/3600 + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 1, + + // deliveries decay after 10min, cap at 100 tx + FirstMessageDeliveriesWeight: 0.5, // max value is 50 + FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(10 * time.Minute), + FirstMessageDeliveriesCap: 100, // 100 messages in 10 minutes + + // Mesh Delivery Failure is currently turned off for messages + // This is on purpose as the network is still too small, which results in + // asymmetries and potential unmeshing from negative scores. + // // tracks deliveries in the last minute + // // penalty activates at 1 min and expects 2.5 txs + // MeshMessageDeliveriesWeight: -16, // max penalty is -100 + // MeshMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Minute), + // MeshMessageDeliveriesCap: 100, // 100 txs in a minute + // MeshMessageDeliveriesThreshold: 2.5, // 60/12/2 txs/minute + // MeshMessageDeliveriesWindow: 10 * time.Millisecond, + // MeshMessageDeliveriesActivation: time.Minute, + + // // decays after 5min + // MeshFailurePenaltyWeight: -16, + // MeshFailurePenaltyDecay: pubsub.ScoreParameterDecay(5 * time.Minute), + + // invalid messages decay after 1 hour + InvalidMessageDeliveriesWeight: -1000, + InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour), + }, + } + + pgTopicWeights := map[string]float64{ + blockTopic: 10, + msgTopic: 1, + } + + ingestTopicParams := &pubsub.TopicScoreParams{ + // expected ~0.5 confirmed deals / min. sampled + TopicWeight: 0.1, + + TimeInMeshWeight: 0.00027, // ~1/3600 + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 1, + + FirstMessageDeliveriesWeight: 0.5, + FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour), + FirstMessageDeliveriesCap: 100, // allowing for burstiness + + InvalidMessageDeliveriesWeight: -1000, + InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour), + } + + drandTopicParams := &pubsub.TopicScoreParams{ + // expected 2 beaconsn/min + TopicWeight: 0.5, // 5x block topic; max cap is 62.5 + + // 1 tick per second, maxes at 1 after 1 hour + TimeInMeshWeight: 0.00027, // ~1/3600 + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 1, + + // deliveries decay after 1 hour, cap at 25 beacons + FirstMessageDeliveriesWeight: 5, // max value is 125 + FirstMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour), + FirstMessageDeliveriesCap: 25, // the maximum expected in an hour is ~26, including the decay + + // Mesh Delivery Failure is currently turned off for beacons + // This is on purpose as + // - the traffic is very low for meaningful distribution of incoming edges. + // - the reaction time needs to be very slow -- in the order of 10 min at least + // so we might as well let opportunistic grafting repair the mesh on its own + // pace. + // - the network is too small, so large asymmetries can be expected between mesh + // edges. + // We should revisit this once the network grows. + + // invalid messages decay after 1 hour + InvalidMessageDeliveriesWeight: -1000, + InvalidMessageDeliveriesDecay: pubsub.ScoreParameterDecay(time.Hour), + } + + var drandTopics []string + drandBootstrappers := make(map[peer.ID]struct{}) + for _, drandEnum := range drandSchedule { + df := config.DrandConfigs[drandEnum] + addrInfo, _ := parseDrandBootstrap(df) + for _, info := range addrInfo { + drandBootstrappers[info.ID] = struct{}{} + } + + topic, err := types.DrandTopic(df.ChainInfoJSON) + if err != nil { + return nil, err + } + topicParams[topic] = drandTopicParams + pgTopicWeights[topic] = 5 + drandTopics = append(drandTopics, topic) + } + + // Index ingestion whitelist + topicParams[indexerIngestTopic] = ingestTopicParams + + isBootstrapNode := false + + // IP colocation whitelist + var ipcoloWhitelist []*net.IPNet + + options := []pubsub.Option{ + // Gossipsubv1.1 configuration + pubsub.WithFloodPublish(true), + pubsub.WithMessageIdFn(hashMsgId), + pubsub.WithPeerScore( + &pubsub.PeerScoreParams{ + AppSpecificScore: func(p peer.ID) float64 { + // return a heavy positive score for bootstrappers so that we don't unilaterally prune + // them and accept PX from them. + // we don't do that in the bootstrappers themselves to avoid creating a closed mesh + // between them (however we might want to consider doing just that) + _, ok := bootstrappers[p] + if ok && !isBootstrapNode { + return 2500 + } + + // todo: + // _, ok = drandBootstrappers[p] + // if ok && !isBootstrapNode { + // return 1500 + // } + + // TODO: we want to plug the application specific score to the node itself in order + // to provide feedback to the pubsub system based on observed behaviour + return 0 + }, + AppSpecificWeight: 1, + + // This sets the IP colocation threshold to 5 peers before we apply penalties + IPColocationFactorThreshold: 5, + IPColocationFactorWeight: -100, + IPColocationFactorWhitelist: ipcoloWhitelist, + + // P7: behavioural penalties, decay after 1hr + BehaviourPenaltyThreshold: 6, + BehaviourPenaltyWeight: -10, + BehaviourPenaltyDecay: pubsub.ScoreParameterDecay(time.Hour), + + DecayInterval: pubsub.DefaultDecayInterval, + DecayToZero: pubsub.DefaultDecayToZero, + + // this retains non-positive scores for 6 hours + RetainScore: 6 * time.Hour, + + // topic parameters + Topics: topicParams, + }, + &pubsub.PeerScoreThresholds{ + GossipThreshold: GossipScoreThreshold, + PublishThreshold: PublishScoreThreshold, + GraylistThreshold: GraylistScoreThreshold, + AcceptPXThreshold: AcceptPXScoreThreshold, + OpportunisticGraftThreshold: OpportunisticGraftScoreThreshold, + }, + ), + pubsub.WithPeerScoreInspect(sk.Update, 10*time.Second), + } + + // enable Peer eXchange on bootstrappers + if isBootstrapNode { + // turn off the mesh in bootstrappers -- only do gossip and PX + pubsub.GossipSubD = 0 + pubsub.GossipSubDscore = 0 + pubsub.GossipSubDlo = 0 + pubsub.GossipSubDhi = 0 + pubsub.GossipSubDout = 0 + pubsub.GossipSubDlazy = 64 + pubsub.GossipSubGossipFactor = 0.25 + pubsub.GossipSubPruneBackoff = 5 * time.Minute + // turn on PX + options = append(options, pubsub.WithPeerExchange(true)) + } + + // validation queue RED + var pgParams *pubsub.PeerGaterParams + + if isBootstrapNode { + pgParams = pubsub.NewPeerGaterParams( + 0.33, + pubsub.ScoreParameterDecay(2*time.Minute), + pubsub.ScoreParameterDecay(10*time.Minute), + ).WithTopicDeliveryWeights(pgTopicWeights) + } else { + pgParams = pubsub.NewPeerGaterParams( + 0.33, + pubsub.ScoreParameterDecay(2*time.Minute), + pubsub.ScoreParameterDecay(time.Hour), + ).WithTopicDeliveryWeights(pgTopicWeights) + } + + options = append(options, pubsub.WithPeerGater(pgParams)) + + allowTopics := []string{ + blockTopic, + msgTopic, + indexerIngestTopic, + } + allowTopics = append(allowTopics, drandTopics...) + options = append(options, + pubsub.WithSubscriptionFilter( + pubsub.WrapLimitSubscriptionFilter( + pubsub.NewAllowlistSubscriptionFilter(allowTopics...), + 100))) + + return pubsub.NewGossipSub(ctx, h, options...) +} + +func parseDrandBootstrap(df config.DrandConf) ([]peer.AddrInfo, error) { + // TODO: retry resolving, don't fail if at least one resolve succeeds + addrs, err := ParseAddresses(context.TODO(), df.Relays) + if err != nil { + gossipsubLog.Errorf("reoslving drand relays addresses: %+v", err) + return nil, nil + } + + return addrs, nil +} + +func hashMsgId(m *pubsub_pb.Message) string { + hash := blake2b.Sum256(m.Data) + return string(hash[:]) +} diff --git a/pkg/net/helloprotocol/cbor_gen.go b/pkg/net/helloprotocol/cbor_gen.go new file mode 100644 index 0000000000..94ac83406b --- /dev/null +++ b/pkg/net/helloprotocol/cbor_gen.go @@ -0,0 +1,259 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package helloprotocol + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufHelloMessage = []byte{132} + +func (t *HelloMessage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufHelloMessage); err != nil { + return err + } + + // t.HeaviestTipSetCids (types.TipSetKey) (struct) + if err := t.HeaviestTipSetCids.MarshalCBOR(cw); err != nil { + return err + } + + // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) + if t.HeaviestTipSetHeight >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.HeaviestTipSetHeight)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.HeaviestTipSetHeight-1)); err != nil { + return err + } + } + + // t.HeaviestTipSetWeight (big.Int) (struct) + if err := t.HeaviestTipSetWeight.MarshalCBOR(cw); err != nil { + return err + } + + // t.GenesisHash (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.GenesisHash); err != nil { + return xerrors.Errorf("failed to write cid field t.GenesisHash: %w", err) + } + + return nil +} + +func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) { + *t = HelloMessage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.HeaviestTipSetCids (types.TipSetKey) (struct) + + { + + if err := t.HeaviestTipSetCids.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.HeaviestTipSetCids: %w", err) + } + + } + // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.HeaviestTipSetHeight = abi.ChainEpoch(extraI) + } + // t.HeaviestTipSetWeight (big.Int) (struct) + + { + + if err := t.HeaviestTipSetWeight.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.HeaviestTipSetWeight: %w", err) + } + + } + // t.GenesisHash (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.GenesisHash: %w", err) + } + + t.GenesisHash = c + + } + return nil +} + +var lengthBufLatencyMessage = []byte{130} + +func (t *LatencyMessage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufLatencyMessage); err != nil { + return err + } + + // t.TArrival (int64) (int64) + if t.TArrival >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TArrival)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TArrival-1)); err != nil { + return err + } + } + + // t.TSent (int64) (int64) + if t.TSent >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TSent)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TSent-1)); err != nil { + return err + } + } + return nil +} + +func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) (err error) { + *t = LatencyMessage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.TArrival (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TArrival = int64(extraI) + } + // t.TSent (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TSent = int64(extraI) + } + return nil +} diff --git a/pkg/net/helloprotocol/hello_protocol.go b/pkg/net/helloprotocol/hello_protocol.go new file mode 100644 index 0000000000..662b7768fb --- /dev/null +++ b/pkg/net/helloprotocol/hello_protocol.go @@ -0,0 +1,333 @@ +package helloprotocol + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/net/exchange" + "github.com/filecoin-project/venus/pkg/net/peermgr" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/go-state-types/abi" + fbig "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/host" + net "github.com/libp2p/go-libp2p/core/network" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/venus/pkg/metrics" +) + +var log = logging.Logger("/fil/hello") + +// helloProtocolID is the libp2p protocol identifier for the hello protocol. +const helloProtocolID = "/fil/hello/1.0.0" + +var ( + genesisErrCt = metrics.NewInt64Counter("hello_genesis_error", "Number of errors encountered in hello protocol due to incorrect genesis block") + helloMsgErrCt = metrics.NewInt64Counter("hello_message_error", "Number of errors encountered in hello protocol due to malformed message") +) + +// HelloMessage is the data structure of a single message in the hello protocol. +type HelloMessage struct { + HeaviestTipSetCids types.TipSetKey + HeaviestTipSetHeight abi.ChainEpoch + HeaviestTipSetWeight fbig.Int + GenesisHash cid.Cid +} + +// LatencyMessage is written in response to a hello message for measuring peer +// latency. +type LatencyMessage struct { + TArrival int64 + TSent int64 +} + +// HelloProtocolHandler implements the 'Hello' protocol handler. +// +// Upon connecting to a new node, we send them a message +// containing some information about the state of our chain, +// and receive the same information from them. This is used to +// initiate a chainsync and detect connections to forks. +type HelloProtocolHandler struct { //nolint + host host.Host + + genesis cid.Cid + + // peerDiscovered is called when new peers tell us about their chain + peerDiscovered PeerDiscoveredCallback + + // helloTimeOut is block delay + helloTimeOut time.Duration + + peerMgr peermgr.IPeerMgr + exchange exchange.Client + chainStore *chain.Store + messageStore *chain.MessageStore +} + +type PeerDiscoveredCallback func(ci *types.ChainInfo) + +type GetTipSetFunc func() (*types.TipSet, error) + +// NewHelloProtocolHandler creates a new instance of the hello protocol `Handler` and registers it to +// the given `host.Host`. +func NewHelloProtocolHandler(h host.Host, + peerMgr peermgr.IPeerMgr, + exchange exchange.Client, + chainStore *chain.Store, + messageStore *chain.MessageStore, + gen cid.Cid, + helloTimeOut time.Duration, +) *HelloProtocolHandler { + return &HelloProtocolHandler{ + host: h, + genesis: gen, + peerMgr: peerMgr, + exchange: exchange, + chainStore: chainStore, + messageStore: messageStore, + helloTimeOut: helloTimeOut, + } +} + +// Register registers the handler with the network. +func (h *HelloProtocolHandler) Register(peerDiscoveredCallback PeerDiscoveredCallback) { + // register callbacks + h.peerDiscovered = peerDiscoveredCallback + + // register a handle for when a new connection against someone is created + h.host.SetStreamHandler(helloProtocolID, h.handleNewStream) + + // register for connection notifications + h.host.Network().Notify((*helloProtocolNotifiee)(h)) +} + +func (h *HelloProtocolHandler) handleNewStream(s net.Stream) { + ctx, cancel := context.WithTimeout(context.Background(), h.helloTimeOut) + defer cancel() + + hello, err := h.receiveHello(ctx, s) + if err != nil { + helloMsgErrCt.Inc(ctx, 1) + log.Debugf("failed to receive hello message:%s", err) + // can't process a hello received in error, but leave this connection + // open because we connections are innocent until proven guilty + // (with bad genesis) + return + } + latencyMsg := &LatencyMessage{TArrival: time.Now().UnixNano()} + + // process the hello message + from := s.Conn().RemotePeer() + if !hello.GenesisHash.Equals(h.genesis) { + log.Debugf("peer genesis cid: %s does not match ours: %s, disconnecting from peer: %s", &hello.GenesisHash, h.genesis, from) + genesisErrCt.Inc(context.Background(), 1) + _ = s.Conn().Close() + return + } + + go func() { + defer s.Close() // nolint: errcheck + // Send the latendy message + latencyMsg.TSent = time.Now().UnixNano() + err = sendLatency(latencyMsg, s) + if err != nil { + log.Error(err) + } + }() + + protos, err := h.host.Peerstore().GetProtocols(from) + if err != nil { + log.Warnf("got error from peerstore.GetProtocols: %s", err) + } + if len(protos) == 0 { + log.Warn("other peer hasnt completed libp2p identify, waiting a bit") + // TODO: this better + time.Sleep(time.Millisecond * 300) + } + + fullTipSet, err := h.loadLocalFullTipset(ctx, hello.HeaviestTipSetCids) + if err != nil { + fullTipSet, err = h.exchange.GetFullTipSet(ctx, []peer.ID{from}, hello.HeaviestTipSetCids) //nolint + if err == nil { + for _, b := range fullTipSet.Blocks { + _, err = h.chainStore.PutObject(ctx, b.Header) + if err != nil { + log.Errorf("fail to save block to tipset") + return + } + _, err = h.messageStore.StoreMessages(ctx, b.SECPMessages, b.BLSMessages) + if err != nil { + log.Errorf("fail to save block to tipset") + return + } + } + } + h.host.ConnManager().TagPeer(from, "new-block", 40) + } + if err != nil { + log.Warnf("failed to get tipset message from peer %s", from) + return + } + if fullTipSet == nil { + log.Warnf("handleNewStream get null full tipset, it's scarce!") + return + } + + // notify the local node of the new `block.ChainInfo` + h.peerMgr.AddFilecoinPeer(from) + ci := types.NewChainInfo(from, from, fullTipSet.TipSet()) + h.peerDiscovered(ci) +} + +func (h *HelloProtocolHandler) loadLocalFullTipset(ctx context.Context, tsk types.TipSetKey) (*types.FullTipSet, error) { + ts, err := h.chainStore.GetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + + fts := &types.FullTipSet{} + for _, b := range ts.Blocks() { + smsgs, bmsgs, err := h.messageStore.LoadMetaMessages(ctx, b.Messages) + if err != nil { + return nil, err + } + + fb := &types.FullBlock{ + Header: b, + BLSMessages: bmsgs, + SECPMessages: smsgs, + } + fts.Blocks = append(fts.Blocks, fb) + } + + return fts, nil +} + +// ErrBadGenesis is the error returned when a mismatch in genesis blocks happens. +var ErrBadGenesis = fmt.Errorf("bad genesis block") + +func (h *HelloProtocolHandler) getOurHelloMessage() (*HelloMessage, error) { + heaviest := h.chainStore.GetHead() + height := heaviest.Height() + weight := heaviest.ParentWeight() + + return &HelloMessage{ + GenesisHash: h.genesis, + HeaviestTipSetCids: heaviest.Key(), + HeaviestTipSetHeight: height, + HeaviestTipSetWeight: weight, + }, nil +} + +func (h *HelloProtocolHandler) receiveHello(ctx context.Context, s net.Stream) (*HelloMessage, error) { + var hello HelloMessage + err := hello.UnmarshalCBOR(s) + return &hello, err +} + +func (h *HelloProtocolHandler) receiveLatency(ctx context.Context, s net.Stream) (*LatencyMessage, error) { + var latency LatencyMessage + err := latency.UnmarshalCBOR(s) + if err != nil { + return nil, err + } + return &latency, nil +} + +// sendHello send a hello message on stream `s`. +func (h *HelloProtocolHandler) sendHello(s net.Stream) error { + msg, err := h.getOurHelloMessage() + if err != nil { + return err + } + buf := new(bytes.Buffer) + if err := msg.MarshalCBOR(buf); err != nil { + return err + } + + n, err := s.Write(buf.Bytes()) + if err != nil { + return err + } + if n != buf.Len() { + return fmt.Errorf("could not write all hello message bytes") + } + return nil +} + +// responding to latency +func sendLatency(msg *LatencyMessage, s net.Stream) error { + buf := new(bytes.Buffer) + if err := msg.MarshalCBOR(buf); err != nil { + return err + } + n, err := s.Write(buf.Bytes()) + if err != nil { + return err + } + if n != buf.Len() { + return fmt.Errorf("could not write all latency message bytes") + } + return nil +} + +// Note: hide `net.Notifyee` impl using a new-type +type helloProtocolNotifiee HelloProtocolHandler + +const helloTimeout = time.Second * 10 + +func (hn *helloProtocolNotifiee) asHandler() *HelloProtocolHandler { + return (*HelloProtocolHandler)(hn) +} + +// +// `net.Notifyee` impl for `helloNotify` +// + +func (hn *helloProtocolNotifiee) Connected(n net.Network, c net.Conn) { + // Connected is invoked when a connection is made to a libp2p node. + // + // - open stream on connection + // - send HelloMessage` on stream + // - read LatencyMessage response on stream + // + // Terminate the connection if it has a different genesis block + go func() { + // add timeout + ctx, cancel := context.WithTimeout(context.Background(), helloTimeout) + defer cancel() + s, err := hn.asHandler().host.NewStream(ctx, c.RemotePeer(), helloProtocolID) + if err != nil { + // If peer does not do hello keep connection open + return + } + defer func() { _ = s.Close() }() + // send out the hello message + err = hn.asHandler().sendHello(s) + if err != nil { + log.Debugf("failed to send hello handshake to peer %s: %s", c.RemotePeer(), err) + // Don't close connection for failed hello protocol impl + return + } + + // now receive latency message + _, err = hn.asHandler().receiveLatency(ctx, s) + if err != nil { + log.Debugf("failed to receive hello latency msg from peer %s: %s", c.RemotePeer(), err) + return + } + }() +} + +func (hn *helloProtocolNotifiee) Listen(n net.Network, a ma.Multiaddr) { /* empty */ } +func (hn *helloProtocolNotifiee) ListenClose(n net.Network, a ma.Multiaddr) { /* empty */ } +func (hn *helloProtocolNotifiee) Disconnected(n net.Network, c net.Conn) { /* empty */ } +func (hn *helloProtocolNotifiee) OpenedStream(n net.Network, s net.Stream) { /* empty */ } +func (hn *helloProtocolNotifiee) ClosedStream(n net.Network, s net.Stream) { /* empty */ } diff --git a/pkg/net/helloprotocol/hello_protocol_test.go b/pkg/net/helloprotocol/hello_protocol_test.go new file mode 100644 index 0000000000..4aee711056 --- /dev/null +++ b/pkg/net/helloprotocol/hello_protocol_test.go @@ -0,0 +1,208 @@ +// stm: #unit +package helloprotocol_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/filecoin-project/venus/pkg/net/helloprotocol" + "github.com/filecoin-project/venus/pkg/net/peermgr" + + ds "github.com/ipfs/go-datastore" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/host" + + "github.com/filecoin-project/go-address" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/net" + "github.com/filecoin-project/venus/pkg/repo" + th "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type mockHelloCallback struct { + mock.Mock +} + +func (msb *mockHelloCallback) HelloCallback(ci *types.ChainInfo) { + msb.Called(ci.Sender, ci.Head.Key()) +} + +func TestHelloHandshake(t *testing.T) { + tf.UnitTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mn, err := mocknet.WithNPeers(2) + require.NoError(t, err) + + a := mn.Hosts()[0] + b := mn.Hosts()[1] + + builder := chain.NewBuilder(t, address.Undef) + + genesisA := builder.Genesis() + store := builder.Store() + mstore := builder.Mstore() + heavy1 := builder.AppendOn(ctx, genesisA, 1) + oldStore := copyStoreAndSetHead(ctx, t, store, heavy1) + + heavy2 := builder.AppendOn(ctx, heavy1, 1) + _ = store.SetHead(ctx, heavy2) + msc1, msc2 := new(mockHelloCallback), new(mockHelloCallback) + + // peer manager + aPeerMgr, err := mockPeerMgr(ctx, t, a) + require.NoError(t, err) + + // stm: @DISCOVERY_HELLO_REGISTER_001 + helloprotocol.NewHelloProtocolHandler(a, aPeerMgr, nil, oldStore, mstore, genesisA.Blocks()[0].Cid(), time.Second*30).Register(msc1.HelloCallback) + helloprotocol.NewHelloProtocolHandler(b, aPeerMgr, nil, store, mstore, genesisA.Blocks()[0].Cid(), time.Second*30).Register(msc2.HelloCallback) + + msc1.On("HelloCallback", b.ID(), heavy2.Key()).Return() + msc2.On("HelloCallback", a.ID(), heavy1.Key()).Return() + + require.NoError(t, mn.LinkAll()) + require.NoError(t, mn.ConnectAllButSelf()) + + require.NoError(t, th.WaitForIt(10, 50*time.Millisecond, func() (bool, error) { + var msc1Done bool + var msc2Done bool + for _, call := range msc1.Calls { + if call.Method == "HelloCallback" { + if _, differences := msc1.ExpectedCalls[0].Arguments.Diff(call.Arguments); differences == 0 { + msc1Done = true + break + } + } + } + for _, call := range msc2.Calls { + if call.Method == "HelloCallback" { + if _, differences := msc2.ExpectedCalls[0].Arguments.Diff(call.Arguments); differences == 0 { + msc2Done = true + break + } + } + } + + return msc1Done && msc2Done, nil + })) +} + +func TestHelloBadGenesis(t *testing.T) { + tf.UnitTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mn, err := mocknet.WithNPeers(2) + assert.NoError(t, err) + + a := mn.Hosts()[0] + b := mn.Hosts()[1] + + builder := chain.NewBuilder(t, address.Undef) + store := builder.Store() + mstore := builder.Mstore() + + genesisA := builder.AppendOn(ctx, types.UndefTipSet, 1) + heavy1 := builder.AppendOn(ctx, genesisA, 1) + heavy2 := builder.AppendOn(ctx, heavy1, 1) + _ = store.SetHead(ctx, heavy2) + + builder2 := chain.NewBuilder(t, address.Undef) + genesisB := builder2.Build(ctx, types.UndefTipSet, 1, func(b *chain.BlockBuilder, i int) { + b.SetTicket([]byte{1, 3, 4, 5, 6, 1, 3, 6, 7, 8}) + }) + + fmt.Println(genesisB, genesisA) + msc1, msc2 := new(mockHelloCallback), new(mockHelloCallback) + + // peer manager + peerMgr, err := mockPeerMgr(ctx, t, a) + require.NoError(t, err) + + helloprotocol.NewHelloProtocolHandler(a, peerMgr, nil, store, mstore, genesisA.Blocks()[0].Cid(), time.Second*30).Register(msc1.HelloCallback) + helloprotocol.NewHelloProtocolHandler(b, peerMgr, nil, builder2.Store(), builder2.Mstore(), genesisB.Blocks()[0].Cid(), time.Second*30).Register(msc2.HelloCallback) + + msc1.On("HelloCallback", mock.Anything, mock.Anything, mock.Anything).Return() + msc2.On("HelloCallback", mock.Anything, mock.Anything, mock.Anything).Return() + + require.NoError(t, mn.LinkAll()) + require.NoError(t, mn.ConnectAllButSelf()) + + time.Sleep(time.Second) + + msc1.AssertNumberOfCalls(t, "HelloCallback", 0) + msc2.AssertNumberOfCalls(t, "HelloCallback", 0) +} + +func TestHelloMultiBlock(t *testing.T) { + tf.UnitTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mn, err := mocknet.WithNPeers(2) + assert.NoError(t, err) + + a := mn.Hosts()[0] + b := mn.Hosts()[1] + + builder := chain.NewBuilder(t, address.Undef) + store := builder.Store() + mstore := builder.Mstore() + + genesisTipset := builder.Genesis() + assert.Equal(t, 1, genesisTipset.Len()) + + heavy1 := builder.AppendOn(ctx, genesisTipset, 3) + heavy1 = builder.AppendOn(ctx, heavy1, 3) + oldStore := copyStoreAndSetHead(ctx, t, store, heavy1) + + heavy2 := builder.AppendOn(ctx, heavy1, 3) + _ = store.SetHead(ctx, heavy2) + msc1, msc2 := new(mockHelloCallback), new(mockHelloCallback) + + // peer manager + peerMgr, err := mockPeerMgr(ctx, t, a) + require.NoError(t, err) + + helloprotocol.NewHelloProtocolHandler(a, peerMgr, nil, oldStore, mstore, genesisTipset.At(0).Cid(), time.Second*30).Register(msc1.HelloCallback) + helloprotocol.NewHelloProtocolHandler(b, peerMgr, nil, store, mstore, genesisTipset.At(0).Cid(), time.Second*30).Register(msc2.HelloCallback) + + msc1.On("HelloCallback", b.ID(), heavy2.Key()).Return() + msc2.On("HelloCallback", a.ID(), heavy1.Key()).Return() + + assert.NoError(t, mn.LinkAll()) + assert.NoError(t, mn.ConnectAllButSelf()) + + time.Sleep(time.Second * 5) + + msc1.AssertExpectations(t) + msc2.AssertExpectations(t) +} + +func mockPeerMgr(ctx context.Context, t *testing.T, h host.Host) (*peermgr.PeerMgr, error) { + addrInfo, err := net.ParseAddresses(ctx, repo.NewInMemoryRepo().Config().Bootstrap.Addresses) + require.NoError(t, err) + + return peermgr.NewPeerMgr(h, dht.NewDHT(ctx, h, ds.NewMapDatastore()), 10, addrInfo) +} + +func copyStoreAndSetHead(ctx context.Context, t *testing.T, store *chain.Store, ts *types.TipSet) *chain.Store { + storeCopy := *store //nolint + err := storeCopy.SetHead(ctx, ts) + require.NoError(t, err) + return &storeCopy +} diff --git a/pkg/net/msgsub/validator.go b/pkg/net/msgsub/validator.go new file mode 100644 index 0000000000..d158601b44 --- /dev/null +++ b/pkg/net/msgsub/validator.go @@ -0,0 +1,67 @@ +package msgsub + +import ( + "bytes" + "context" + + logging "github.com/ipfs/go-log" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/metrics" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var ( + messageTopicLogger = logging.Logger("net/message_validator") + mDecodeMsgFail = metrics.NewInt64Counter("net/pubsub_message_decode_failure", "Number of messages that fail to decode seen on message pubsub channel") + mInvalidMsg = metrics.NewInt64Counter("net/pubsub_invalid_message", "Number of messages that fail syntax validation seen on message pubsub channel") +) + +// MessageTopicValidator may be registered on go-libp3p-pubsub to validate msgsub payloads. +type MessageTopicValidator struct { + validator pubsub.Validator + opts []pubsub.ValidatorOpt +} + +// NewMessageTopicValidator returns a MessageTopicValidator using the input +// signature and syntax validators. +func NewMessageTopicValidator(syntaxVal *consensus.DefaultMessageSyntaxValidator, sigVal *consensus.MessageSignatureValidator, opts ...pubsub.ValidatorOpt) *MessageTopicValidator { + return &MessageTopicValidator{ + opts: opts, + validator: func(ctx context.Context, p peer.ID, msg *pubsub.Message) bool { + unmarshaled := &types.SignedMessage{} + if err := unmarshaled.UnmarshalCBOR(bytes.NewReader(msg.GetData())); err != nil { + messageTopicLogger.Debugf("message from peer: %s failed to decode: %s", p.String(), err.Error()) + mDecodeMsgFail.Inc(ctx, 1) + return false + } + if err := syntaxVal.ValidateSignedMessageSyntax(ctx, unmarshaled); err != nil { + mCid := unmarshaled.Cid() + messageTopicLogger.Debugf("message %s from peer: %s failed to syntax validate: %s", mCid.String(), p.String(), err.Error()) + mInvalidMsg.Inc(ctx, 1) + return false + } + if err := sigVal.Validate(ctx, unmarshaled); err != nil { + mCid := unmarshaled.Cid() + messageTopicLogger.Debugf("message %s from peer: %s failed to signature validate: %s", mCid.String(), p.String(), err.Error()) + mInvalidMsg.Inc(ctx, 1) + return false + } + return true + }, + } +} + +func (mtv *MessageTopicValidator) Topic(network string) string { + return types.MessageTopic(network) +} + +func (mtv *MessageTopicValidator) Validator() pubsub.Validator { + return mtv.validator +} + +func (mtv *MessageTopicValidator) Opts() []pubsub.ValidatorOpt { + return mtv.opts +} diff --git a/pkg/net/network.go b/pkg/net/network.go new file mode 100644 index 0000000000..8dd1df68ec --- /dev/null +++ b/pkg/net/network.go @@ -0,0 +1,211 @@ +package net + +import ( + "context" + "sort" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/metrics" + network2 "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + swarm "github.com/libp2p/go-libp2p/p2p/net/swarm" + ma "github.com/multiformats/go-multiaddr" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +// Network is a unified interface for dealing with libp2p +type Network struct { + host host.Host + rawHost types.RawHost + metrics.Reporter + *Router +} + +// New returns a new Network +func New( + host host.Host, + rawHost types.RawHost, + router *Router, + reporter metrics.Reporter, +) *Network { + return &Network{ + host: host, + rawHost: rawHost, + Reporter: reporter, + Router: router, + } +} + +// GetPeerAddresses gets the current addresses of the node +func (network *Network) GetPeerAddresses() []ma.Multiaddr { + return network.host.Addrs() +} + +// GetPeerID gets the current peer id from libp2p-host +func (network *Network) GetPeerID() peer.ID { + return network.host.ID() +} + +// GetBandwidthStats gets stats on the current bandwidth usage of the network +func (network *Network) GetBandwidthStats() metrics.Stats { + return network.Reporter.GetBandwidthTotals() +} + +// GetBandwidthStatsByPeer returns statistics about the nodes bandwidth +// usage and current rate per peer +func (network *Network) GetBandwidthStatsByPeer() (map[string]metrics.Stats, error) { + out := make(map[string]metrics.Stats) + for p, s := range network.Reporter.GetBandwidthByPeer() { + out[p.String()] = s + } + return out, nil +} + +// GetBandwidthStatsByProtocol returns statistics about the nodes bandwidth +// usage and current rate per protocol +func (network *Network) GetBandwidthStatsByProtocol() (map[protocol.ID]metrics.Stats, error) { + return network.Reporter.GetBandwidthByProtocol(), nil +} + +// Connect connects to peer at the given address. Does not retry. +func (network *Network) Connect(ctx context.Context, p peer.AddrInfo) error { + if swarm, ok := network.host.Network().(*swarm.Swarm); ok { + swarm.Backoff().Clear(p.ID) + } + return network.host.Connect(ctx, p) +} + +// Peers lists peers currently available on the network +func (network *Network) Peers(ctx context.Context) ([]peer.AddrInfo, error) { + if network.host == nil { + return nil, errors.New("node must be online") + } + + conns := network.host.Network().Conns() + peers := make([]peer.AddrInfo, 0, len(conns)) + for _, conn := range conns { + peers = append(peers, peer.AddrInfo{ + ID: conn.RemotePeer(), + Addrs: []ma.Multiaddr{conn.RemoteMultiaddr()}, + }) + } + + return peers, nil +} + +// PeerInfo searches the peer info for a given peer id +func (network *Network) PeerInfo(ctx context.Context, p peer.ID) (*types.ExtendedPeerInfo, error) { + info := &types.ExtendedPeerInfo{ID: p} + + agent, err := network.host.Peerstore().Get(p, "AgentVersion") + if err == nil { + info.Agent = agent.(string) + } + + for _, a := range network.host.Peerstore().Addrs(p) { + info.Addrs = append(info.Addrs, a.String()) + } + sort.Strings(info.Addrs) + + protocols, err := network.host.Peerstore().GetProtocols(p) + if err == nil { + sort.Strings(protocols) + info.Protocols = protocols + } + + if cm := network.host.ConnManager().GetTagInfo(p); cm != nil { + info.ConnMgrMeta = &types.ConnMgrInfo{ + FirstSeen: cm.FirstSeen, + Value: cm.Value, + Tags: cm.Tags, + Conns: cm.Conns, + } + } + + return info, nil +} + +// AgentVersion returns agent version for a given peer id +func (network *Network) AgentVersion(ctx context.Context, p peer.ID) (string, error) { + ag, err := network.host.Peerstore().Get(p, "AgentVersion") + if err != nil { + return "", err + } + + if ag == nil { + return "unknown", nil + } + + return ag.(string), nil +} + +// Disconnect disconnect to peer at the given address +func (network *Network) Disconnect(p peer.ID) error { + return network.host.Network().ClosePeer(p) +} + +const apiProtectTag = "api" + +// ProtectAdd protect peer at the given peers id +func (network *Network) ProtectAdd(peers []peer.ID) error { + for _, p := range peers { + network.host.ConnManager().Protect(p, apiProtectTag) + } + + return nil +} + +// ProtectRemove unprotect peer at the given peers id +func (network *Network) ProtectRemove(peers []peer.ID) error { + for _, p := range peers { + network.host.ConnManager().Unprotect(p, apiProtectTag) + } + + return nil +} + +// ProtectList returns the peers that are protected +func (network *Network) ProtectList() ([]peer.ID, error) { + result := make([]peer.ID, 0) + for _, conn := range network.host.Network().Conns() { + if network.host.ConnManager().IsProtected(conn.RemotePeer(), apiProtectTag) { + result = append(result, conn.RemotePeer()) + } + } + + return result, nil +} + +// Connectedness returns a state signaling connection capabilities +func (network *Network) Connectedness(p peer.ID) (network2.Connectedness, error) { + return network.host.Network().Connectedness(p), nil +} + +// AutoNatStatus return a struct with current NAT status and public dial address +func (network *Network) AutoNatStatus() (types.NatInfo, error) { + autonat := network.rawHost.(*basichost.BasicHost).GetAutoNat() + + if autonat == nil { + return types.NatInfo{ + Reachability: network2.ReachabilityUnknown, + }, nil + } + + var maddr string + if autonat.Status() == network2.ReachabilityPublic { + pa, err := autonat.PublicAddr() + if err != nil { + return types.NatInfo{}, err + } + maddr = pa.String() + } + + return types.NatInfo{ + Reachability: autonat.Status(), + PublicAddr: maddr, + }, nil +} diff --git a/pkg/net/parse.go b/pkg/net/parse.go new file mode 100644 index 0000000000..cd8653a935 --- /dev/null +++ b/pkg/net/parse.go @@ -0,0 +1,89 @@ +package net + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + madns "github.com/multiformats/go-multiaddr-dns" +) + +// ParseAddresses is a function that takes in a slice of string peer addresses +// (multiaddr + peerid) and returns a slice of properly constructed peers +func ParseAddresses(ctx context.Context, addrs []string) ([]peer.AddrInfo, error) { + // resolve addresses + maddrs, err := resolveAddresses(ctx, addrs) + if err != nil { + return nil, err + } + + return peer.AddrInfosFromP2pAddrs(maddrs...) +} + +const ( + dnsResolveTimeout = 10 * time.Second +) + +// resolveAddresses resolves addresses parallelly +func resolveAddresses(ctx context.Context, addrs []string) ([]ma.Multiaddr, error) { + ctx, cancel := context.WithTimeout(ctx, dnsResolveTimeout) + defer cancel() + + var maddrs []ma.Multiaddr + var wg sync.WaitGroup + resolveErrC := make(chan error, len(addrs)) + + maddrC := make(chan ma.Multiaddr) + + for _, addr := range addrs { + maddr, err := ma.NewMultiaddr(addr) + if err != nil { + return nil, err + } + + // check whether address ends in `ipfs/Qm...` + if _, last := ma.SplitLast(maddr); last.Protocol().Code == ma.P_IPFS { + maddrs = append(maddrs, maddr) + continue + } + wg.Add(1) + go func(maddr ma.Multiaddr) { + defer wg.Done() + raddrs, err := madns.Resolve(ctx, maddr) + if err != nil { + resolveErrC <- err + return + } + // filter out addresses that still doesn't end in `ipfs/Qm...` + found := 0 + for _, raddr := range raddrs { + if _, last := ma.SplitLast(raddr); last != nil && last.Protocol().Code == ma.P_IPFS { + maddrC <- raddr + found++ + } + } + if found == 0 { + resolveErrC <- fmt.Errorf("found no ipfs peers at %s", maddr) + } + }(maddr) + } + go func() { + wg.Wait() + close(maddrC) + }() + + for maddr := range maddrC { + maddrs = append(maddrs, maddr) + } + + select { + case err := <-resolveErrC: + return nil, err + default: + } + + return maddrs, nil +} diff --git a/pkg/net/peermgr/peermgr.go b/pkg/net/peermgr/peermgr.go new file mode 100644 index 0000000000..e770228242 --- /dev/null +++ b/pkg/net/peermgr/peermgr.go @@ -0,0 +1,238 @@ +package peermgr + +import ( + "context" + "fmt" + "sync" + "time" + + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/event" + host "github.com/libp2p/go-libp2p/core/host" + net "github.com/libp2p/go-libp2p/core/network" + peer "github.com/libp2p/go-libp2p/core/peer" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("peermgr") + +const ( + MaxFilPeers = 320 + MinFilPeers = 128 +) + +type IPeerMgr interface { + AddFilecoinPeer(p peer.ID) + GetPeerLatency(p peer.ID) (time.Duration, bool) + SetPeerLatency(p peer.ID, latency time.Duration) + Disconnect(p peer.ID) + Stop(ctx context.Context) error + Run(ctx context.Context) +} + +var ( + _ IPeerMgr = &PeerMgr{} + _ IPeerMgr = &MockPeerMgr{} +) + +type PeerMgr struct { + bootstrappers []peer.AddrInfo + + // peerLeads is a set of peers we hear about through the network + // and who may be good peers to connect to for expanding our peer set + // peerLeads map[peer.ID]time.Time // TODO: unused + + peersLk sync.Mutex + peers map[peer.ID]time.Duration + + maxFilPeers int + minFilPeers int + + expanding chan struct{} + + h host.Host + dht *dht.IpfsDHT + + notifee *net.NotifyBundle + filPeerEmitter event.Emitter + + period time.Duration + done chan struct{} +} + +type FilPeerEvt struct { + Type FilPeerEvtType + ID peer.ID +} + +type FilPeerEvtType int + +const ( + AddFilPeerEvt FilPeerEvtType = iota + RemoveFilPeerEvt +) + +func NewPeerMgr(h host.Host, dht *dht.IpfsDHT, period time.Duration, bootstrap []peer.AddrInfo) (*PeerMgr, error) { + pm := &PeerMgr{ + h: h, + dht: dht, + bootstrappers: bootstrap, + + peers: make(map[peer.ID]time.Duration), + expanding: make(chan struct{}, 1), + + maxFilPeers: MaxFilPeers, + minFilPeers: MinFilPeers, + + done: make(chan struct{}), + period: period, + } + emitter, err := h.EventBus().Emitter(new(FilPeerEvt)) + if err != nil { + return nil, fmt.Errorf("creating NewFilPeer emitter: %w", err) + } + pm.filPeerEmitter = emitter + + pm.notifee = &net.NotifyBundle{ + DisconnectedF: func(_ net.Network, c net.Conn) { + pm.Disconnect(c.RemotePeer()) + }, + } + + h.Network().Notify(pm.notifee) + + return pm, nil +} + +func (pmgr *PeerMgr) AddFilecoinPeer(p peer.ID) { + _ = pmgr.filPeerEmitter.Emit(FilPeerEvt{Type: AddFilPeerEvt, ID: p}) //nolint:errcheck + pmgr.peersLk.Lock() + defer pmgr.peersLk.Unlock() + pmgr.peers[p] = time.Duration(0) +} + +func (pmgr *PeerMgr) GetPeerLatency(p peer.ID) (time.Duration, bool) { + pmgr.peersLk.Lock() + defer pmgr.peersLk.Unlock() + dur, ok := pmgr.peers[p] + return dur, ok +} + +func (pmgr *PeerMgr) SetPeerLatency(p peer.ID, latency time.Duration) { + pmgr.peersLk.Lock() + defer pmgr.peersLk.Unlock() + if _, ok := pmgr.peers[p]; ok { + pmgr.peers[p] = latency + } +} + +func (pmgr *PeerMgr) Disconnect(p peer.ID) { + disconnected := false + + if pmgr.h.Network().Connectedness(p) == net.NotConnected { + pmgr.peersLk.Lock() + _, disconnected = pmgr.peers[p] + if disconnected { + delete(pmgr.peers, p) + } + pmgr.peersLk.Unlock() + } + + if disconnected { + _ = pmgr.filPeerEmitter.Emit(FilPeerEvt{Type: RemoveFilPeerEvt, ID: p}) //nolint:errcheck + } +} + +func (pmgr *PeerMgr) Stop(ctx context.Context) error { + log.Warn("closing peermgr done") + _ = pmgr.filPeerEmitter.Close() + close(pmgr.done) + return nil +} + +func (pmgr *PeerMgr) Run(ctx context.Context) { + tick := time.NewTicker(pmgr.period) + defer tick.Stop() + + for { + pCount := pmgr.getPeerCount() + if pCount < pmgr.minFilPeers { + pmgr.expandPeers() + } else if pCount > pmgr.maxFilPeers { + log.Debugf("peer count about threshold: %d > %d", pCount, pmgr.maxFilPeers) + } + + select { + case <-tick.C: + continue + case <-pmgr.done: + log.Warn("exiting peermgr run") + return + } + } +} + +func (pmgr *PeerMgr) getPeerCount() int { + pmgr.peersLk.Lock() + defer pmgr.peersLk.Unlock() + return len(pmgr.peers) +} + +func (pmgr *PeerMgr) expandPeers() { + select { + case pmgr.expanding <- struct{}{}: + default: + return + } + + go func() { + ctx, cancel := context.WithTimeout(context.TODO(), time.Second*30) + defer cancel() + + pmgr.doExpand(ctx) + + <-pmgr.expanding + }() +} + +func (pmgr *PeerMgr) doExpand(ctx context.Context) { + pcount := pmgr.getPeerCount() + if pcount == 0 { + if len(pmgr.bootstrappers) == 0 { + log.Warn("no peers connected, and no bootstrappers configured") + return + } + + log.Info("connecting to bootstrap peers") + for _, bsp := range pmgr.bootstrappers { + if err := pmgr.h.Connect(ctx, bsp); err != nil { + log.Warnf("failed to connect to bootstrap peer: %s", err) + } + } + return + } + + // if we already have some peers and need more, the dht is really good at connecting to most peers. Use that for now until something better comes along. + if err := pmgr.dht.Bootstrap(ctx); err != nil { + log.Warnf("dht bootstrapping failed: %s", err) + } +} + +type MockPeerMgr struct{} + +func (m MockPeerMgr) AddFilecoinPeer(p peer.ID) {} + +func (m MockPeerMgr) GetPeerLatency(p peer.ID) (time.Duration, bool) { + return time.Duration(0), true +} + +func (m MockPeerMgr) SetPeerLatency(p peer.ID, latency time.Duration) {} + +func (m MockPeerMgr) Disconnect(p peer.ID) {} + +func (m MockPeerMgr) Stop(ctx context.Context) error { + return nil +} + +func (m MockPeerMgr) Run(ctx context.Context) {} diff --git a/pkg/net/pubsub/testing.go b/pkg/net/pubsub/testing.go new file mode 100644 index 0000000000..67a3448601 --- /dev/null +++ b/pkg/net/pubsub/testing.go @@ -0,0 +1,100 @@ +package pubsub + +import ( + "context" + "sync" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// FakeMessage is a simple pubsub message +type FakeMessage struct { + peerID peer.ID + data []byte +} + +// GetFrom returns the message's sender ID +func (m *FakeMessage) GetFrom() peer.ID { + return m.peerID +} + +// GetData returns the message's payload +func (m *FakeMessage) GetData() []byte { + return m.data +} + +// FakeSubscription is a fake pubsub subscription. +type FakeSubscription struct { + topic string + pending chan Message + err error + cancelled bool + awaitCancel sync.WaitGroup +} + +// NewFakeSubscription builds a new fake subscription to a topic. +func NewFakeSubscription(topic string, bufSize int) *FakeSubscription { + sub := &FakeSubscription{ + topic: topic, + pending: make(chan Message, bufSize), + awaitCancel: sync.WaitGroup{}, + } + sub.awaitCancel.Add(1) + return sub +} + +// Subscription interface + +// Topic returns this subscription's topic. +func (s *FakeSubscription) Topic() string { + return s.topic +} + +// Next returns the next messages from this subscription. +func (s *FakeSubscription) Next(ctx context.Context) (Message, error) { + if s.err != nil { + return nil, s.err + } + select { + case msg := <-s.pending: + return msg, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// Cancel cancels this subscription, after which no subsequently posted messages will be received. +func (s *FakeSubscription) Cancel() { + if s.cancelled { + panic("subscription already cancelled") + } + s.cancelled = true + s.awaitCancel.Done() +} + +// Manipulators + +// Post posts a new message to this subscription. +func (s *FakeSubscription) Post(msg Message) { + if s.err != nil { + panic("subscription has failed") + } + if !s.cancelled { + s.pending <- msg + } +} + +// Fail causes subsequent reads from this subscription to fail. +func (s *FakeSubscription) Fail(err error) { + if err != nil { + panic("error is nil") + } + if !s.cancelled { + s.err = err + } +} + +// AwaitCancellation waits for the subscription to be canceled by the subscriber. +func (s *FakeSubscription) AwaitCancellation() { + s.awaitCancel.Wait() +} diff --git a/pkg/net/pubsub/topic.go b/pkg/net/pubsub/topic.go new file mode 100644 index 0000000000..2bd2213207 --- /dev/null +++ b/pkg/net/pubsub/topic.go @@ -0,0 +1,82 @@ +package pubsub + +import ( + "context" + + libp2p "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" +) + +// Topic publishes and subscribes to a libp2p pubsub topic +type Topic struct { + pubsubTopic *libp2p.Topic +} + +// Message defines the common interface for venus message consumers. +// It's a subset of the go-libp2p-pubsub/pubsub.go Message type. +type Message interface { + GetSource() peer.ID + GetSender() peer.ID + GetData() []byte +} + +type message struct { + inner *libp2p.Message +} + +// Subscription is a handle to a pubsub subscription. +// This matches part of the interface to a libp2p.pubsub.Subscription. +type Subscription interface { + // Topic returns this subscription's topic name + Topic() string + // Next returns the next message from this subscription + Next(ctx context.Context) (Message, error) + // Cancel cancels this subscription + Cancel() +} + +// NewTopic builds a new topic. +func NewTopic(topic *libp2p.Topic) *Topic { + return &Topic{pubsubTopic: topic} +} + +// Subscribe subscribes to a pubsub topic +func (t *Topic) Subscribe() (Subscription, error) { + sub, err := t.pubsubTopic.Subscribe() + return &subscriptionWrapper{sub}, err +} + +// Publish publishes to a pubsub topic. It blocks until there is at least one +// peer on the mesh that can receive the publish. +func (t *Topic) Publish(ctx context.Context, data []byte) error { + // return t.pubsubTopic.Publish(ctx, data) + return t.pubsubTopic.Publish(ctx, data, libp2p.WithReadiness(libp2p.MinTopicSize(1))) +} + +// subscriptionWrapper extends a pubsub.Subscription in order to wrap the Message type. +type subscriptionWrapper struct { + *libp2p.Subscription +} + +// Next wraps pubsub.Subscription.Next, implicitly adapting *pubsub.Message to the Message interface. +func (w subscriptionWrapper) Next(ctx context.Context) (Message, error) { + msg, err := w.Subscription.Next(ctx) + if err != nil { + return nil, err + } + return message{ + inner: msg, + }, nil +} + +func (m message) GetSender() peer.ID { + return m.inner.ReceivedFrom +} + +func (m message) GetSource() peer.ID { + return m.inner.GetFrom() +} + +func (m message) GetData() []byte { + return m.inner.GetData() +} diff --git a/internal/pkg/net/router.go b/pkg/net/router.go similarity index 90% rename from internal/pkg/net/router.go rename to pkg/net/router.go index 8c4574ad48..bb7599ea0d 100644 --- a/internal/pkg/net/router.go +++ b/pkg/net/router.go @@ -5,13 +5,13 @@ import ( "errors" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/routing" - "github.com/libp2p/go-libp2p-kad-dht" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" ) // This struct wraps the filecoin nodes router. This router is a -// go-libp2p-core/routing.Routing interface that provides both PeerRouting, +// go-libp2p/core/routing.Routing interface that provides both PeerRouting, // ContentRouting and a Bootstrap init process. Filecoin nodes in online mode // use a go-libp2p-kad-dht DHT to satisfy this interface. Nodes run the // Bootstrap function to join the DHT on start up. The PeerRouting functionality @@ -49,7 +49,7 @@ func (r *Router) FindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo, e // GetClosestPeers returns a channel of the K closest peers to the given key, // K is the 'K Bucket' parameter of the Kademlia DHT protocol. -func (r *Router) GetClosestPeers(ctx context.Context, key string) (<-chan peer.ID, error) { +func (r *Router) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) { ipfsDHT, ok := r.routing.(*dht.IpfsDHT) if !ok { return nil, errors.New("underlying routing should be pointer of IpfsDHT") diff --git a/pkg/net/scorekeeper.go b/pkg/net/scorekeeper.go new file mode 100644 index 0000000000..c37cd33cd2 --- /dev/null +++ b/pkg/net/scorekeeper.go @@ -0,0 +1,31 @@ +package net + +import ( + "sync" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +type ScoreKeeper struct { + lk sync.Mutex + scores map[peer.ID]*pubsub.PeerScoreSnapshot +} + +func NewScoreKeeper() *ScoreKeeper { + return &ScoreKeeper{ + scores: make(map[peer.ID]*pubsub.PeerScoreSnapshot), + } +} + +func (sk *ScoreKeeper) Update(scores map[peer.ID]*pubsub.PeerScoreSnapshot) { + sk.lk.Lock() + sk.scores = scores + sk.lk.Unlock() +} + +func (sk *ScoreKeeper) Get() map[peer.ID]*pubsub.PeerScoreSnapshot { + sk.lk.Lock() + defer sk.lk.Unlock() + return sk.scores +} diff --git a/pkg/paychmgr/accessorcache.go b/pkg/paychmgr/accessorcache.go new file mode 100644 index 0000000000..358cf79006 --- /dev/null +++ b/pkg/paychmgr/accessorcache.go @@ -0,0 +1,71 @@ +package paychmgr + +import ( + "context" + + "github.com/filecoin-project/go-address" +) + +// accessorByFromTo gets a channel accessor for a given from / to pair. +// The channel accessor facilitates locking a channel so that operations +// must be performed sequentially on a channel (but can be performed at +// the same time on different channels). +func (pm *Manager) accessorByFromTo(from address.Address, to address.Address) (*channelAccessor, error) { + key := pm.accessorCacheKey(from, to) + + // First take a read lock and check the cache + pm.lk.RLock() + ca, ok := pm.channels[key] + pm.lk.RUnlock() + if ok { + return ca, nil + } + + // Not in cache, so take a write lock + pm.lk.Lock() + defer pm.lk.Unlock() + + // Need to check cache again in case it was updated between releasing read + // lock and taking write lock + ca, ok = pm.channels[key] + if !ok { + // Not in cache, so create a new one and store in cache + ca = pm.addAccessorToCache(from, to) + } + + return ca, nil +} + +// accessorByAddress gets a channel accessor for a given channel address. +// The channel accessor facilitates locking a channel so that operations +// must be performed sequentially on a channel (but can be performed at +// the same time on different channels). +func (pm *Manager) accessorByAddress(ctx context.Context, ch address.Address) (*channelAccessor, error) { + // Get the channel from / to + pm.lk.RLock() + channelInfo, err := pm.store.ByAddress(ctx, ch) + pm.lk.RUnlock() + if err != nil { + return nil, err + } + + // TODO: cache by channel address so we can get by address instead of using from / to + return pm.accessorByFromTo(channelInfo.Control, channelInfo.Target) +} + +// accessorCacheKey returns the cache key use to reference a channel accessor +func (pm *Manager) accessorCacheKey(from address.Address, to address.Address) string { + return from.String() + "->" + to.String() +} + +// addAccessorToCache adds a channel accessor to the cache. Note that the +// channel may not have been created yet, but we still want to reference +// the same channel accessor for a given from/to, so that all attempts to +// access a channel use the same lock (the lock on the accessor) +func (pm *Manager) addAccessorToCache(from address.Address, to address.Address) *channelAccessor { + key := pm.accessorCacheKey(from, to) + ca := newChannelAccessor(pm, from, to) + // TODO: Use LRU + pm.channels[key] = ca + return ca +} diff --git a/pkg/paychmgr/channellock.go b/pkg/paychmgr/channellock.go new file mode 100644 index 0000000000..0dc785ec0f --- /dev/null +++ b/pkg/paychmgr/channellock.go @@ -0,0 +1,33 @@ +package paychmgr + +import "sync" + +type rwlock interface { + RLock() + RUnlock() +} + +// channelLock manages locking for a specific channel. +// Some operations update the state of a single channel, and need to block +// other operations only on the same channel's state. +// Some operations update state that affects all channels, and need to block +// any operation against any channel. +type channelLock struct { + globalLock rwlock + chanLock sync.Mutex +} + +func (l *channelLock) Lock() { + // Wait for other operations by this channel to finish. + // Exclusive per-channel (no other ops by this channel allowed). + l.chanLock.Lock() + // Wait for operations affecting all channels to finish. + // Allows ops by other channels in parallel, but blocks all operations + // if global lock is taken exclusively (eg when adding a channel) + l.globalLock.RLock() +} + +func (l *channelLock) Unlock() { + l.globalLock.RUnlock() + l.chanLock.Unlock() +} diff --git a/pkg/paychmgr/manager.go b/pkg/paychmgr/manager.go new file mode 100644 index 0000000000..683830b728 --- /dev/null +++ b/pkg/paychmgr/manager.go @@ -0,0 +1,366 @@ +package paychmgr + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/filecoin-project/venus/venus-shared/types" + pchTypes "github.com/filecoin-project/venus/venus-shared/types/market" +) + +var log = logging.Logger("paych") + +var errProofNotSupported = errors.New("payment channel proof parameter is not supported") + +// managerAPI defines all methods needed by the manager +type managerAPI interface { + statemanger.IStateManager + paychDependencyAPI +} + +// managerAPIImpl is used to create a composite that implements managerAPI +type managerAPIImpl struct { + statemanger.IStateManager + paychDependencyAPI +} + +type Manager struct { + // The Manager context is used to terminate wait operations on shutdown + ctx context.Context + shutdown context.CancelFunc + + store *Store + sa *stateAccessor + pchapi managerAPI + + lk sync.RWMutex + channels map[string]*channelAccessor +} +type ManagerParams struct { + MPoolAPI IMessagePush + ChainInfoAPI IChainInfo + WalletAPI IWalletAPI + SM statemanger.IStateManager +} + +func NewManager(ctx context.Context, ds datastore.Batching, params *ManagerParams) (*Manager, error) { + ctx, shutdown := context.WithCancel(ctx) + impl := &managerAPIImpl{ + IStateManager: params.SM, + paychDependencyAPI: newPaychDependencyAPI(params.MPoolAPI, params.ChainInfoAPI, params.WalletAPI), + } + pm := &Manager{ + ctx: ctx, + shutdown: shutdown, + store: &Store{ds}, + sa: &stateAccessor{sm: impl}, + channels: make(map[string]*channelAccessor), + pchapi: impl, + } + return pm, pm.Start(ctx) +} + +// newManager is used by the tests to supply mocks +func newManager(ctx context.Context, pchStore *Store, pchapi managerAPI) (*Manager, error) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + pm := &Manager{ + store: pchStore, + sa: &stateAccessor{sm: pchapi}, + channels: make(map[string]*channelAccessor), + pchapi: pchapi, + shutdown: cancel, + } + return pm, pm.Start(ctx) +} + +// Start restarts tracking of any messages that were sent to chain. +func (pm *Manager) Start(ctx context.Context) error { + return pm.restartPending(ctx) +} + +// Stop shuts down any processes used by the manager +func (pm *Manager) Stop() { + pm.shutdown() +} + +type GetOpts struct { + Reserve bool + OffChain bool +} + +func (pm *Manager) GetPaych(ctx context.Context, from, to address.Address, amt big.Int, opts GetOpts) (address.Address, cid.Cid, error) { + if !opts.Reserve && opts.OffChain { + return address.Undef, cid.Undef, fmt.Errorf("can't fund payment channels without on-chain operations") + } + chanAccessor, err := pm.accessorByFromTo(from, to) + if err != nil { + return address.Undef, cid.Undef, err + } + + return chanAccessor.getPaych(ctx, amt, opts) +} + +func (pm *Manager) AvailableFunds(ctx context.Context, ch address.Address) (*types.ChannelAvailableFunds, error) { + ca, err := pm.accessorByAddress(ctx, ch) + if err != nil { + return nil, err + } + + ci, err := ca.getChannelInfo(ctx, ch) + if err != nil { + return nil, err + } + + return ca.availableFunds(ctx, ci.ChannelID) +} + +func (pm *Manager) AvailableFundsByFromTo(ctx context.Context, from address.Address, to address.Address) (*types.ChannelAvailableFunds, error) { + ca, err := pm.accessorByFromTo(from, to) + if err != nil { + return nil, err + } + + ci, err := ca.outboundActiveByFromTo(ctx, from, to) + if err == ErrChannelNotTracked { + // If there is no active channel between from / to we still want to + // return an empty ChannelAvailableFunds, so that clients can check + // for the existence of a channel between from / to without getting + // an error. + return &types.ChannelAvailableFunds{ + Channel: nil, + From: from, + To: to, + ConfirmedAmt: big.NewInt(0), + PendingAmt: big.NewInt(0), + NonReservedAmt: big.NewInt(0), + PendingAvailableAmt: big.NewInt(0), + PendingWaitSentinel: nil, + QueuedAmt: big.NewInt(0), + VoucherReedeemedAmt: big.NewInt(0), + }, nil + } + if err != nil { + return nil, err + } + + return ca.availableFunds(ctx, ci.ChannelID) +} + +// GetPaychWaitReady waits until the create channel / add funds message with the +// given message CID arrives. +// The returned channel address can safely be used against the Manager methods. +func (pm *Manager) GetPaychWaitReady(ctx context.Context, mcid cid.Cid) (address.Address, error) { + // Find the channel associated with the message CID + pm.lk.Lock() + ci, err := pm.store.ByMessageCid(ctx, mcid) + pm.lk.Unlock() + + if err != nil { + if err == datastore.ErrNotFound { + return address.Undef, fmt.Errorf("could not find wait msg cid %s", mcid) + } + return address.Undef, err + } + + chanAccessor, err := pm.accessorByFromTo(ci.Control, ci.Target) + if err != nil { + return address.Undef, err + } + + return chanAccessor.getPaychWaitReady(ctx, mcid) +} + +func (pm *Manager) ListChannels(ctx context.Context) ([]address.Address, error) { + // Need to take an exclusive lock here so that channel operations can't run + // in parallel (see channelLock) + pm.lk.Lock() + defer pm.lk.Unlock() + + return pm.store.ListChannels(ctx) +} + +func (pm *Manager) GetChannelInfo(ctx context.Context, addr address.Address) (*pchTypes.ChannelInfo, error) { + ca, err := pm.accessorByAddress(ctx, addr) + if err != nil { + return nil, err + } + return ca.getChannelInfo(ctx, addr) +} + +func (pm *Manager) CreateVoucher(ctx context.Context, ch address.Address, voucher types.SignedVoucher) (*types.VoucherCreateResult, error) { + ca, err := pm.accessorByAddress(ctx, ch) + if err != nil { + return nil, err + } + return ca.createVoucher(ctx, ch, voucher) +} + +// CheckVoucherValid checks if the given voucher is valid (is or could become spendable at some point). +// If the channel is not in the store, fetches the channel from state (and checks that +// the channel To address is owned by the wallet). +func (pm *Manager) CheckVoucherValid(ctx context.Context, ch address.Address, sv *types.SignedVoucher) error { + // Get an accessor for the channel, creating it from state if necessary + ca, err := pm.inboundChannelAccessor(ctx, ch) + if err != nil { + return err + } + + _, err = ca.checkVoucherValid(ctx, ch, sv) + return err +} + +// CheckVoucherSpendable checks if the given voucher is currently spendable +func (pm *Manager) CheckVoucherSpendable(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (bool, error) { + if len(proof) > 0 { + return false, errProofNotSupported + } + ca, err := pm.accessorByAddress(ctx, ch) + if err != nil { + return false, err + } + + return ca.checkVoucherSpendable(ctx, ch, sv, secret) +} + +// AddVoucherOutbound adds a voucher for an outbound channel. +// Returns an error if the channel is not already in the store. +func (pm *Manager) AddVoucherOutbound(ctx context.Context, ch address.Address, sv *types.SignedVoucher, proof []byte, minDelta big.Int) (big.Int, error) { + if len(proof) > 0 { + return big.NewInt(0), errProofNotSupported + } + ca, err := pm.accessorByAddress(ctx, ch) + if err != nil { + return big.NewInt(0), err + } + return ca.addVoucher(ctx, ch, sv, minDelta) +} + +// AddVoucherInbound adds a voucher for an inbound channel. +// If the channel is not in the store, fetches the channel from state (and checks that +// the channel To address is owned by the wallet). +func (pm *Manager) AddVoucherInbound(ctx context.Context, ch address.Address, sv *types.SignedVoucher, proof []byte, minDelta big.Int) (big.Int, error) { + if len(proof) > 0 { + return big.NewInt(0), errProofNotSupported + } + // Get an accessor for the channel, creating it from state if necessary + ca, err := pm.inboundChannelAccessor(ctx, ch) + if err != nil { + return big.Int{}, err + } + return ca.addVoucher(ctx, ch, sv, minDelta) +} + +// inboundChannelAccessor gets an accessor for the given channel. The channel +// must either exist in the store, or be an inbound channel that can be created +// from state. +func (pm *Manager) inboundChannelAccessor(ctx context.Context, ch address.Address) (*channelAccessor, error) { + // Make sure channel is in store, or can be fetched from state, and that + // the channel To address is owned by the wallet + ci, err := pm.trackInboundChannel(ctx, ch) + if err != nil { + return nil, err + } + + // This is an inbound channel, so To is the Control address (this node) + from := ci.Target + to := ci.Control + return pm.accessorByFromTo(from, to) +} + +func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address) (*pchTypes.ChannelInfo, error) { + // Need to take an exclusive lock here so that channel operations can't run + // in parallel (see channelLock) + pm.lk.Lock() + defer pm.lk.Unlock() + + // Check if channel is in store + ci, err := pm.store.ByAddress(ctx, ch) + if err == nil { + // Channel is in store, so it's already being tracked + return ci, nil + } + + // If there's an error (besides channel not in store) return err + if err != ErrChannelNotTracked { + return nil, err + } + + // Channel is not in store, so get channel from state + stateCi, err := pm.sa.loadStateChannelInfo(ctx, ch, pchTypes.DirInbound) + if err != nil { + return nil, err + } + + // Check that channel To address is in wallet + to := stateCi.Control // Inbound channel so To addr is Control (this node) + toKey, err := pm.pchapi.StateAccountKey(ctx, to, types.EmptyTSK) + if err != nil { + return nil, err + } + has, err := pm.pchapi.WalletHas(ctx, toKey) + if err != nil { + return nil, err + } + if !has { + msg := "cannot add voucher for channel %s: wallet does not have key for address %s" + return nil, fmt.Errorf(msg, ch, to) + } + + // Save channel to store + return pm.store.TrackChannel(ctx, stateCi) +} + +// TODO: secret vs proof doesn't make sense, there is only one, not two +func (pm *Manager) SubmitVoucher(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) { + if len(proof) > 0 { + return cid.Undef, errProofNotSupported + } + ca, err := pm.accessorByAddress(ctx, ch) + if err != nil { + return cid.Undef, err + } + return ca.submitVoucher(ctx, ch, sv, secret) +} + +func (pm *Manager) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ca, err := pm.accessorByAddress(ctx, ch) + if err != nil { + return 0, err + } + return ca.allocateLane(ctx, ch) +} + +func (pm *Manager) ListVouchers(ctx context.Context, ch address.Address) ([]*pchTypes.VoucherInfo, error) { + ca, err := pm.accessorByAddress(ctx, ch) + if err != nil { + return nil, err + } + return ca.listVouchers(ctx, ch) +} + +func (pm *Manager) Settle(ctx context.Context, addr address.Address) (cid.Cid, error) { + ca, err := pm.accessorByAddress(ctx, addr) + if err != nil { + return cid.Undef, err + } + return ca.settle(ctx, addr) +} + +func (pm *Manager) Collect(ctx context.Context, addr address.Address) (cid.Cid, error) { + ca, err := pm.accessorByAddress(ctx, addr) + if err != nil { + return cid.Undef, err + } + return ca.collect(ctx, addr) +} diff --git a/pkg/paychmgr/mock_test.go b/pkg/paychmgr/mock_test.go new file mode 100644 index 0000000000..3530645316 --- /dev/null +++ b/pkg/paychmgr/mock_test.go @@ -0,0 +1,256 @@ +package paychmgr + +import ( + "context" + "errors" + "sync" + + crypto2 "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" +) + +type mockManagerAPI struct { + *mockStateManager + *mockPaychAPI +} + +func (m mockManagerAPI) GetMarketState(ctx context.Context, ts *types.TipSet) (market.State, error) { + return nil, nil +} + +func newMockManagerAPI() *mockManagerAPI { + return &mockManagerAPI{ + mockStateManager: newMockStateManager(), + mockPaychAPI: newMockPaychAPI(), + } +} + +type mockPchState struct { + actor *types.Actor + state paych.State +} + +type mockStateManager struct { + lk sync.Mutex + accountState map[address.Address]address.Address + paychState map[address.Address]mockPchState + response *vm.Ret + lastCall *types.Message +} + +func newMockStateManager() *mockStateManager { + return &mockStateManager{ + accountState: make(map[address.Address]address.Address), + paychState: make(map[address.Address]mockPchState), + } +} + +func (sm *mockStateManager) setAccountAddress(a address.Address, lookup address.Address) { + sm.lk.Lock() + defer sm.lk.Unlock() + sm.accountState[a] = lookup +} + +func (sm *mockStateManager) setPaychState(a address.Address, actor *types.Actor, state paych.State) { + sm.lk.Lock() + defer sm.lk.Unlock() + sm.paychState[a] = mockPchState{actor, state} +} + +func (sm *mockStateManager) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + sm.lk.Lock() + defer sm.lk.Unlock() + keyAddr, ok := sm.accountState[addr] + if !ok { + return address.Undef, errors.New("not found") + } + return keyAddr, nil +} + +func (sm *mockStateManager) GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) { + sm.lk.Lock() + defer sm.lk.Unlock() + info, ok := sm.paychState[addr] + if !ok { + return nil, nil, errors.New("not found") + } + return info.actor, info.state, nil +} + +func (sm *mockStateManager) setCallResponse(response *vm.Ret) { + sm.lk.Lock() + defer sm.lk.Unlock() + + sm.response = response +} + +func (sm *mockStateManager) getLastCall() *types.Message { + sm.lk.Lock() + defer sm.lk.Unlock() + + return sm.lastCall +} + +func (sm *mockStateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*vm.Ret, error) { + sm.lk.Lock() + defer sm.lk.Unlock() + + sm.lastCall = msg + + return sm.response, nil +} + +type waitingCall struct { + response chan types.MessageReceipt +} + +type waitingResponse struct { + receipt types.MessageReceipt + done chan struct{} +} + +type mockPaychAPI struct { + lk sync.Mutex + messages map[cid.Cid]*types.SignedMessage + waitingCalls map[cid.Cid]*waitingCall + waitingResponses map[cid.Cid]*waitingResponse + wallet map[address.Address]struct{} + signingKey []byte +} + +func newMockPaychAPI() *mockPaychAPI { + return &mockPaychAPI{ + messages: make(map[cid.Cid]*types.SignedMessage), + waitingCalls: make(map[cid.Cid]*waitingCall), + waitingResponses: make(map[cid.Cid]*waitingResponse), + wallet: make(map[address.Address]struct{}), + } +} + +func (pchapi *mockPaychAPI) StateWaitMsg(ctx context.Context, mcid cid.Cid, confidence uint64) (*types.MsgLookup, error) { + pchapi.lk.Lock() + response := make(chan types.MessageReceipt) + + if response, ok := pchapi.waitingResponses[mcid]; ok { + defer pchapi.lk.Unlock() + defer func() { + go close(response.done) + }() + + delete(pchapi.waitingResponses, mcid) + return &types.MsgLookup{Receipt: response.receipt}, nil + } + + pchapi.waitingCalls[mcid] = &waitingCall{response: response} + pchapi.lk.Unlock() + + receipt := <-response + return &types.MsgLookup{Receipt: receipt}, nil +} + +func (pchapi *mockPaychAPI) receiveMsgResponse(mcid cid.Cid, receipt types.MessageReceipt) { + pchapi.lk.Lock() + + if call, ok := pchapi.waitingCalls[mcid]; ok { + defer pchapi.lk.Unlock() + + delete(pchapi.waitingCalls, mcid) + call.response <- receipt + return + } + + done := make(chan struct{}) + pchapi.waitingResponses[mcid] = &waitingResponse{receipt: receipt, done: done} + + pchapi.lk.Unlock() + + <-done +} + +// Send success response for any waiting calls +func (pchapi *mockPaychAPI) close() { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + success := types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + } + for mcid, call := range pchapi.waitingCalls { + delete(pchapi.waitingCalls, mcid) + call.response <- success + } +} + +func (pchapi *mockPaychAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + smsg := &types.SignedMessage{Message: *msg} + smsgCid := smsg.Cid() + pchapi.messages[smsgCid] = smsg + return smsg, nil +} + +func (pchapi *mockPaychAPI) pushedMessages(c cid.Cid) *types.SignedMessage { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + return pchapi.messages[c] +} + +func (pchapi *mockPaychAPI) pushedMessageCount() int { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + return len(pchapi.messages) +} + +func (pchapi *mockPaychAPI) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + return addr, nil +} + +func (pchapi *mockPaychAPI) WalletHas(ctx context.Context, addr address.Address) (bool, error) { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + _, ok := pchapi.wallet[addr] + return ok, nil +} + +func (pchapi *mockPaychAPI) addWalletAddress(addr address.Address) { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + pchapi.wallet[addr] = struct{}{} +} + +func (pchapi *mockPaychAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + return crypto2.Sign(msg, pchapi.signingKey, crypto.SigTypeSecp256k1) +} + +func (pchapi *mockPaychAPI) addSigningKey(key []byte) { + pchapi.lk.Lock() + defer pchapi.lk.Unlock() + + pchapi.signingKey = key +} + +func (pchapi *mockPaychAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { + return constants.TestNetworkVersion, nil +} diff --git a/pkg/paychmgr/msglistener.go b/pkg/paychmgr/msglistener.go new file mode 100644 index 0000000000..1f8841243f --- /dev/null +++ b/pkg/paychmgr/msglistener.go @@ -0,0 +1,56 @@ +package paychmgr + +import ( + "fmt" + + "github.com/hannahhoward/go-pubsub" + + "github.com/ipfs/go-cid" +) + +type msgListeners struct { + ps *pubsub.PubSub +} + +type msgCompleteEvt struct { + mcid cid.Cid + err error +} + +type subscriberFn func(msgCompleteEvt) + +func newMsgListeners() msgListeners { + ps := pubsub.New(func(event pubsub.Event, subFn pubsub.SubscriberFn) error { + evt, ok := event.(msgCompleteEvt) + if !ok { + return fmt.Errorf("wrong type of event") + } + sub, ok := subFn.(subscriberFn) + if !ok { + return fmt.Errorf("wrong type of subscriber") + } + sub(evt) + return nil + }) + return msgListeners{ps: ps} +} + +// onMsgComplete registers a callback for when the message with the given cid +// completes +func (ml *msgListeners) onMsgComplete(mcid cid.Cid, cb func(error)) pubsub.Unsubscribe { + var fn subscriberFn = func(evt msgCompleteEvt) { + if mcid.Equals(evt.mcid) { + cb(evt.err) + } + } + return ml.ps.Subscribe(fn) +} + +// fireMsgComplete is called when a message completes +func (ml *msgListeners) fireMsgComplete(mcid cid.Cid, err error) { + e := ml.ps.Publish(msgCompleteEvt{mcid: mcid, err: err}) + if e != nil { + // In theory we shouldn't ever get an error here + log.Errorf("unexpected error publishing message complete: %s", e) + } +} diff --git a/pkg/paychmgr/msglistener_test.go b/pkg/paychmgr/msglistener_test.go new file mode 100644 index 0000000000..e1e5478d5d --- /dev/null +++ b/pkg/paychmgr/msglistener_test.go @@ -0,0 +1,100 @@ +package paychmgr + +import ( + "fmt" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +func testCids() []cid.Cid { + c1, _ := cid.Decode("QmdmGQmRgRjazArukTbsXuuxmSHsMCcRYPAZoGhd6e3MuS") + c2, _ := cid.Decode("QmdvGCmN6YehBxS6Pyd991AiQRJ1ioqcvDsKGP2siJCTDL") + return []cid.Cid{c1, c2} +} + +func TestMsgListener(t *testing.T) { + tf.UnitTest(t) + ml := newMsgListeners() + + done := false + experr := fmt.Errorf("some err") + cids := testCids() + ml.onMsgComplete(cids[0], func(err error) { + require.Equal(t, experr, err) + done = true + }) + + ml.fireMsgComplete(cids[0], experr) + + if !done { + t.Fatal("failed to fire event") + } +} + +func TestMsgListenerNilErr(t *testing.T) { + tf.UnitTest(t) + ml := newMsgListeners() + + done := false + cids := testCids() + ml.onMsgComplete(cids[0], func(err error) { + require.Nil(t, err) + done = true + }) + + ml.fireMsgComplete(cids[0], nil) + + if !done { + t.Fatal("failed to fire event") + } +} + +func TestMsgListenerUnsub(t *testing.T) { + tf.UnitTest(t) + ml := newMsgListeners() + + done := false + experr := fmt.Errorf("some err") + cids := testCids() + unsub := ml.onMsgComplete(cids[0], func(err error) { + t.Fatal("should not call unsubscribed listener") + }) + ml.onMsgComplete(cids[0], func(err error) { + require.Equal(t, experr, err) + done = true + }) + + unsub() + ml.fireMsgComplete(cids[0], experr) + + if !done { + t.Fatal("failed to fire event") + } +} + +func TestMsgListenerMulti(t *testing.T) { + tf.UnitTest(t) + ml := newMsgListeners() + + count := 0 + cids := testCids() + ml.onMsgComplete(cids[0], func(err error) { + count++ + }) + ml.onMsgComplete(cids[0], func(err error) { + count++ + }) + ml.onMsgComplete(cids[1], func(err error) { + count++ + }) + + ml.fireMsgComplete(cids[0], nil) + require.Equal(t, 2, count) + + ml.fireMsgComplete(cids[1], nil) + require.Equal(t, 3, count) +} diff --git a/pkg/paychmgr/paych.go b/pkg/paychmgr/paych.go new file mode 100644 index 0000000000..e0aae820d5 --- /dev/null +++ b/pkg/paychmgr/paych.go @@ -0,0 +1,633 @@ +package paychmgr + +import ( + "context" + "errors" + "fmt" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/pkg/crypto" + lpaych "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + "github.com/filecoin-project/venus/venus-shared/types" + pchTypes "github.com/filecoin-project/venus/venus-shared/types/market" +) + +// insufficientFundsErr indicates that there are not enough funds in the +// channel to create a voucher +type insufficientFundsErr interface { + Shortfall() big.Int +} + +type ErrInsufficientFunds struct { + shortfall big.Int +} + +func newErrInsufficientFunds(shortfall big.Int) *ErrInsufficientFunds { + return &ErrInsufficientFunds{shortfall: shortfall} +} + +func (e *ErrInsufficientFunds) Error() string { + return fmt.Sprintf("not enough funds in channel to cover voucher - shortfall: %d", e.shortfall) +} + +func (e *ErrInsufficientFunds) Shortfall() big.Int { + return e.shortfall +} + +type laneState struct { + redeemed big.Int + nonce uint64 +} + +func (ls laneState) Redeemed() (big.Int, error) { + return ls.redeemed, nil +} + +func (ls laneState) Nonce() (uint64, error) { + return ls.nonce, nil +} + +// channelAccessor is used to simplify locking when accessing a channel +type channelAccessor struct { + from address.Address + to address.Address + + // chctx is used by background processes (eg when waiting for things to be + // confirmed on chain) + chctx context.Context + sa *stateAccessor + api managerAPI + store *Store + lk *channelLock + fundsReqQueue []*fundsReq + msgListeners msgListeners +} + +func newChannelAccessor(pm *Manager, from address.Address, to address.Address) *channelAccessor { + return &channelAccessor{ + from: from, + to: to, + chctx: pm.ctx, + sa: pm.sa, + api: pm.pchapi, + store: pm.store, + lk: &channelLock{globalLock: &pm.lk}, + msgListeners: newMsgListeners(), + } +} + +func (ca *channelAccessor) messageBuilder(ctx context.Context, from address.Address) (lpaych.MessageBuilder, error) { + nwVersion, err := ca.api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return nil, err + } + + ver, err := actorstypes.VersionForNetwork(nwVersion) + if err != nil { + return nil, err + } + return lpaych.Message(ver, from), nil +} + +func (ca *channelAccessor) getChannelInfo(ctx context.Context, addr address.Address) (*pchTypes.ChannelInfo, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + return ca.store.ByAddress(ctx, addr) +} + +func (ca *channelAccessor) outboundActiveByFromTo(ctx context.Context, from, to address.Address) (*pchTypes.ChannelInfo, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + return ca.store.OutboundActiveByFromTo(ctx, ca.api, from, to) +} + +// createVoucher creates a voucher with the given specification, setting its +// nonce, signing the voucher and storing it in the local datastore. +// If there are not enough funds in the channel to create the voucher, returns +// the shortfall in funds. +func (ca *channelAccessor) createVoucher(ctx context.Context, ch address.Address, voucher types.SignedVoucher) (*types.VoucherCreateResult, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + // Find the channel for the voucher + ci, err := ca.store.ByAddress(ctx, ch) + if err != nil { + return nil, fmt.Errorf("failed to get channel info by address: %w", err) + } + + // Set the voucher channel + sv := &voucher + sv.ChannelAddr = ch + + // Get the next nonce on the given lane + sv.Nonce = ca.nextNonceForLane(ci, voucher.Lane) + + // Sign the voucher + vb, err := sv.SigningBytes() + if err != nil { + return nil, fmt.Errorf("failed to get voucher signing bytes: %w", err) + } + + sig, err := ca.api.WalletSign(ctx, ci.Control, vb) + if err != nil { + return nil, fmt.Errorf("failed to sign voucher: %w", err) + } + sv.Signature = sig + + // Store the voucher + if _, err := ca.addVoucherUnlocked(ctx, ch, sv, big.NewInt(0)); err != nil { + // If there are not enough funds in the channel to cover the voucher, + // return a voucher create result with the shortfall + var ife insufficientFundsErr + if errors.As(err, &ife) { + return &types.VoucherCreateResult{ + Shortfall: ife.Shortfall(), + }, nil + } + + return nil, fmt.Errorf("failed to persist voucher: %w", err) + } + + return &types.VoucherCreateResult{Voucher: sv, Shortfall: big.NewInt(0)}, nil +} + +func (ca *channelAccessor) nextNonceForLane(ci *pchTypes.ChannelInfo, lane uint64) uint64 { + var maxnonce uint64 + for _, v := range ci.Vouchers { + if v.Voucher.Lane == lane { + if v.Voucher.Nonce > maxnonce { + maxnonce = v.Voucher.Nonce + } + } + } + + return maxnonce + 1 +} + +func (ca *channelAccessor) checkVoucherValid(ctx context.Context, ch address.Address, sv *types.SignedVoucher) (map[uint64]lpaych.LaneState, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + return ca.checkVoucherValidUnlocked(ctx, ch, sv) +} + +func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch address.Address, sv *types.SignedVoucher) (map[uint64]lpaych.LaneState, error) { + if sv.ChannelAddr != ch { + return nil, fmt.Errorf("voucher ChannelAddr doesn't match channel address, got %s, expected %s", sv.ChannelAddr, ch) + } + + // check voucher is unlocked + if sv.Extra != nil { + return nil, fmt.Errorf("voucher is Message Locked") + } + if sv.TimeLockMax != 0 { + return nil, fmt.Errorf("voucher is Max Time Locked") + } + if sv.TimeLockMin != 0 { + return nil, fmt.Errorf("voucher is Min Time Locked") + } + if len(sv.SecretHash) != 0 { + return nil, fmt.Errorf("voucher is Hash Locked") + } + + // Load payment channel actor state + act, pchState, err := ca.sa.loadPaychActorState(ctx, ch) + if err != nil { + return nil, err + } + + // Load channel "From" account actor state + f, err := pchState.From() + if err != nil { + return nil, err + } + + from, err := ca.api.ResolveToKeyAddress(ctx, f, nil) + if err != nil { + return nil, err + } + // verify voucher signature + vb, err := sv.SigningBytes() + if err != nil { + return nil, err + } + + // TODO: technically, either party may create and sign a voucher. + // However, for now, we only accept them from the channel creator. + // More complex handling logic can be added later + if err := crypto.Verify(sv.Signature, from, vb); err != nil { + return nil, err + } + + // Check the voucher against the highest known voucher nonce / value + laneStates, err := ca.laneState(ctx, pchState, ch) + if err != nil { + return nil, err + } + + // If the new voucher nonce value is less than the highest known + // nonce for the lane + ls, lsExists := laneStates[sv.Lane] + if lsExists { + n, err := ls.Nonce() + if err != nil { + return nil, err + } + + if sv.Nonce <= n { + return nil, fmt.Errorf("nonce too low") + } + + // If the voucher amount is less than the highest known voucher amount + r, err := ls.Redeemed() + if err != nil { + return nil, err + } + if sv.Amount.LessThanEqual(r) { + return nil, fmt.Errorf("voucher amount is lower than amount for voucher with lower nonce") + } + } + + // Total redeemed is the total redeemed amount for all lanes, including + // the new voucher + // eg + // + // lane 1 redeemed: 3 + // lane 2 redeemed: 2 + // voucher for lane 1: 5 + // + // Voucher supersedes lane 1 redeemed, therefore + // effective lane 1 redeemed: 5 + // + // lane 1: 5 + // lane 2: 2 + // - + // total: 7 + totalRedeemed, err := ca.totalRedeemedWithVoucher(laneStates, sv) + if err != nil { + return nil, err + } + + // Total required balance must not exceed actor balance + if act.Balance.LessThan(totalRedeemed) { + return nil, newErrInsufficientFunds(big.Sub(totalRedeemed, act.Balance)) + } + + if len(sv.Merges) != 0 { + return nil, fmt.Errorf("dont currently support paych lane merges") + } + + return laneStates, nil +} + +func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte) (bool, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + recipient, err := ca.getPaychRecipient(ctx, ch) + if err != nil { + return false, err + } + + ci, err := ca.store.ByAddress(ctx, ch) + if err != nil { + return false, err + } + + // Check if voucher has already been submitted + submitted, err := ci.WasVoucherSubmitted(sv) + if err != nil { + return false, err + } + if submitted { + return false, nil + } + + mb, err := ca.messageBuilder(ctx, recipient) + if err != nil { + return false, err + } + + mes, err := mb.Update(ch, sv, secret) + if err != nil { + return false, err + } + + ret, err := ca.api.Call(ctx, mes, nil) + if err != nil { + return false, err + } + + if ret.Receipt.ExitCode != 0 { + return false, nil + } + + return true, nil +} + +func (ca *channelAccessor) getPaychRecipient(ctx context.Context, ch address.Address) (address.Address, error) { + _, state, err := ca.api.GetPaychState(ctx, ch, nil) + if err != nil { + return address.Address{}, err + } + + return state.To() +} + +func (ca *channelAccessor) addVoucher(ctx context.Context, ch address.Address, sv *types.SignedVoucher, minDelta big.Int) (big.Int, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + return ca.addVoucherUnlocked(ctx, ch, sv, minDelta) +} + +func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Address, sv *types.SignedVoucher, minDelta big.Int) (big.Int, error) { + ci, err := ca.store.ByAddress(ctx, ch) + if err != nil { + return big.Int{}, err + } + + // Check if the voucher has already been added + for _, v := range ci.Vouchers { + eq, err := cborutil.Equals(sv, v.Voucher) + if err != nil { + return big.Int{}, err + } + if eq { + // Ignore the duplicate voucher. + log.Warnf("AddVoucher: voucher re-added") + return big.NewInt(0), nil + } + + } + + // Check voucher validity + laneStates, err := ca.checkVoucherValidUnlocked(ctx, ch, sv) + if err != nil { + return big.NewInt(0), err + } + + // The change in value is the delta between the voucher amount and + // the highest previous voucher amount for the lane + laneState, exists := laneStates[sv.Lane] + redeemed := big.NewInt(0) + if exists { + redeemed, err = laneState.Redeemed() + if err != nil { + return big.NewInt(0), err + } + } + + delta := big.Sub(sv.Amount, redeemed) + if minDelta.GreaterThan(delta) { + return delta, fmt.Errorf("addVoucher: supplied token amount too low; minD=%s, D=%s; laneAmt=%s; v.Amt=%s", minDelta, delta, redeemed, sv.Amount) + } + + ci.Vouchers = append(ci.Vouchers, &pchTypes.VoucherInfo{ + Voucher: sv, + }) + + if ci.NextLane <= sv.Lane { + ci.NextLane = sv.Lane + 1 + } + + return delta, ca.store.putChannelInfo(ctx, ci) +} + +func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte) (cid.Cid, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + ci, err := ca.store.ByAddress(ctx, ch) + if err != nil { + return cid.Undef, err + } + + has, err := ci.HasVoucher(sv) + if err != nil { + return cid.Undef, err + } + + // If the channel has the voucher + if has { + // Check that the voucher hasn't already been submitted + submitted, err := ci.WasVoucherSubmitted(sv) + if err != nil { + return cid.Undef, err + } + if submitted { + return cid.Undef, fmt.Errorf("cannot submit voucher that has already been submitted") + } + } + + mb, err := ca.messageBuilder(ctx, ci.Control) + if err != nil { + return cid.Undef, err + } + + msg, err := mb.Update(ch, sv, secret) + if err != nil { + return cid.Undef, err + } + + smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return cid.Undef, err + } + + // If the channel didn't already have the voucher + if !has { + // Add the voucher to the channel + ci.Vouchers = append(ci.Vouchers, &pchTypes.VoucherInfo{ + Voucher: sv, + }) + } + + // Mark the voucher and any lower-nonce vouchers as having been submitted + err = ca.store.MarkVoucherSubmitted(ctx, ci, sv) + if err != nil { + return cid.Undef, err + } + return smsg.Cid(), nil +} + +func (ca *channelAccessor) allocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + return ca.store.AllocateLane(ctx, ch) +} + +func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) ([]*pchTypes.VoucherInfo, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + // TODO: just having a passthrough method like this feels odd. Seems like + // there should be some filtering we're doing here + return ca.store.VouchersForPaych(ctx, ch) +} + +// laneState gets the LaneStates from chain, then applies all vouchers in +// the data store over the chain state +func (ca *channelAccessor) laneState(ctx context.Context, state lpaych.State, ch address.Address) (map[uint64]lpaych.LaneState, error) { + // TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct + // (but technically dont't need to) + + laneCount, err := state.LaneCount() + if err != nil { + return nil, err + } + + // Note: we use a map instead of an array to store laneStates because the + // api sets the lane ID (the index) and potentially they could use a + // very large index. + laneStates := make(map[uint64]lpaych.LaneState, laneCount) + err = state.ForEachLaneState(func(idx uint64, ls lpaych.LaneState) error { + laneStates[idx] = ls + return nil + }) + if err != nil { + return nil, err + } + + // Apply locally stored vouchers + vouchers, err := ca.store.VouchersForPaych(ctx, ch) + if err != nil && err != ErrChannelNotTracked { + return nil, err + } + + for _, v := range vouchers { + for range v.Voucher.Merges { + return nil, fmt.Errorf("paych merges not handled yet") + } + + // Check if there is an existing laneState in the payment channel + // for this voucher's lane + ls, ok := laneStates[v.Voucher.Lane] + + // If the voucher does not have a higher nonce than the existing + // laneState for this lane, ignore it + if ok { + n, err := ls.Nonce() + if err != nil { + return nil, err + } + if v.Voucher.Nonce < n { + continue + } + } + + // Voucher has a higher nonce, so replace laneState with this voucher + laneStates[v.Voucher.Lane] = laneState{v.Voucher.Amount, v.Voucher.Nonce} + } + + return laneStates, nil +} + +// Get the total redeemed amount across all lanes, after applying the voucher +func (ca *channelAccessor) totalRedeemedWithVoucher(laneStates map[uint64]lpaych.LaneState, sv *types.SignedVoucher) (big.Int, error) { + // TODO: merges + if len(sv.Merges) != 0 { + return big.Int{}, fmt.Errorf("dont currently support paych lane merges") + } + + total := big.NewInt(0) + for _, ls := range laneStates { + r, err := ls.Redeemed() + if err != nil { + return big.Int{}, err + } + total = big.Add(total, r) + } + + lane, ok := laneStates[sv.Lane] + if ok { + // If the voucher is for an existing lane, and the voucher nonce + // is higher than the lane nonce + n, err := lane.Nonce() + if err != nil { + return big.Int{}, err + } + + if sv.Nonce > n { + // Add the delta between the redeemed amount and the voucher + // amount to the total + r, err := lane.Redeemed() + if err != nil { + return big.Int{}, err + } + + delta := big.Sub(sv.Amount, r) + total = big.Add(total, delta) + } + } else { + // If the voucher is *not* for an existing lane, just add its + // value (implicitly a new lane will be created for the voucher) + total = big.Add(total, sv.Amount) + } + + return total, nil +} + +func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid.Cid, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + ci, err := ca.store.ByAddress(ctx, ch) + if err != nil { + return cid.Undef, err + } + + mb, err := ca.messageBuilder(ctx, ci.Control) + if err != nil { + return cid.Undef, err + } + msg, err := mb.Settle(ch) + if err != nil { + return cid.Undef, err + } + smgs, err := ca.api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return cid.Undef, err + } + + ci.Settling = true + err = ca.store.putChannelInfo(ctx, ci) + if err != nil { + log.Errorf("Error marking channel as settled: %s", err) + } + return smgs.Cid(), nil +} + +func (ca *channelAccessor) collect(ctx context.Context, ch address.Address) (cid.Cid, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + ci, err := ca.store.ByAddress(ctx, ch) + if err != nil { + return cid.Undef, err + } + + mb, err := ca.messageBuilder(ctx, ci.Control) + if err != nil { + return cid.Undef, err + } + + msg, err := mb.Collect(ch) + if err != nil { + return cid.Undef, err + } + + smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return cid.Undef, err + } + return smsg.Cid(), nil +} diff --git a/pkg/paychmgr/paych_test.go b/pkg/paychmgr/paych_test.go new file mode 100644 index 0000000000..dc4d1bc028 --- /dev/null +++ b/pkg/paychmgr/paych_test.go @@ -0,0 +1,843 @@ +// stm: #unit +package paychmgr + +import ( + "bytes" + "context" + "testing" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/v2/actors/builtin" + paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" + tutils "github.com/filecoin-project/specs-actors/v6/support/testing" + + crypto2 "github.com/filecoin-project/venus/pkg/crypto" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + paychmock "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych/mock" + "github.com/filecoin-project/venus/venus-shared/types" + pchTypes "github.com/filecoin-project/venus/venus-shared/types/market" +) + +func TestCheckVoucherValid(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) + toKeyPrivate, toKeyPublic := testGenerateKeyPair(t) + randKeyPrivate, _ := testGenerateKeyPair(t) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic)) + to := tutils.NewSECP256K1Addr(t, string(toKeyPublic)) + fromAcct := tutils.NewActorAddr(t, "fromAct") + toAcct := tutils.NewActorAddr(t, "toAct") + + mock := newMockManagerAPI() + mock.setAccountAddress(fromAcct, from) + mock.setAccountAddress(toAcct, to) + + tcases := []struct { + name string + expectError bool + key []byte + actorBalance big.Int + voucherAmount big.Int + voucherLane uint64 + voucherNonce uint64 + laneStates map[uint64]paych.LaneState + }{{ + name: "passes when voucher amount < balance", + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + }, { + name: "fails when funds too low", + expectError: true, + key: fromKeyPrivate, + actorBalance: big.NewInt(5), + voucherAmount: big.NewInt(10), + }, { + name: "fails when invalid signature", + expectError: true, + key: randKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + }, { + name: "fails when signed by channel To account (instead of From account)", + expectError: true, + key: toKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + }, { + name: "fails when nonce too low", + expectError: true, + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + voucherLane: 1, + voucherNonce: 2, + laneStates: map[uint64]paych.LaneState{ + 1: paychmock.NewMockLaneState(big.NewInt(2), 3), + }, + }, { + name: "passes when nonce higher", + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + voucherLane: 1, + voucherNonce: 3, + laneStates: map[uint64]paych.LaneState{ + 1: paychmock.NewMockLaneState(big.NewInt(2), 2), + }, + }, { + name: "passes when nonce for different lane", + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + voucherLane: 2, + voucherNonce: 2, + laneStates: map[uint64]paych.LaneState{ + 1: paychmock.NewMockLaneState(big.NewInt(2), 3), + }, + }, { + name: "fails when voucher has higher nonce but lower value than lane state", + expectError: true, + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + voucherLane: 1, + voucherNonce: 3, + laneStates: map[uint64]paych.LaneState{ + 1: paychmock.NewMockLaneState(big.NewInt(6), 2), + }, + }, { + // voucher supersedes lane 1 redeemed so + // lane 1 effective redeemed = voucher amount + // + // required balance = voucher amt + // = 7 + // So required balance: 7 < actor balance: 10 + name: "passes when voucher total redeemed <= balance", + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(6), + voucherLane: 1, + voucherNonce: 2, + laneStates: map[uint64]paych.LaneState{ + // Lane 1 (same as voucher lane 1) + 1: paychmock.NewMockLaneState(big.NewInt(4), 1), + }, + }, { + // required balance = total redeemed + // = 6 (voucher lane 1) + 5 (lane 2) + // = 11 + // So required balance: 11 > actor balance: 10 + name: "fails when voucher total redeemed > balance", + expectError: true, + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(6), + voucherLane: 1, + voucherNonce: 1, + laneStates: map[uint64]paych.LaneState{ + // Lane 2 (different from voucher lane 1) + 2: paychmock.NewMockLaneState(big.NewInt(5), 1), + }, + }, { + // voucher supersedes lane 1 redeemed so + // lane 1 effective redeemed = voucher amount + // + // required balance = total redeemed + // = 6 (new voucher lane 1) + 5 (lane 2) + // = 11 + // So required balance: 11 > actor balance: 10 + name: "fails when voucher total redeemed > balance", + expectError: true, + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(6), + voucherLane: 1, + voucherNonce: 2, + laneStates: map[uint64]paych.LaneState{ + // Lane 1 (superseded by new voucher in voucher lane 1) + 1: paychmock.NewMockLaneState(big.NewInt(5), 1), + // Lane 2 (different from voucher lane 1) + 2: paychmock.NewMockLaneState(big.NewInt(5), 1), + }, + }, { + // voucher supersedes lane 1 redeemed so + // lane 1 effective redeemed = voucher amount + // + // required balance = total redeemed + // = 5 (new voucher lane 1) + 5 (lane 2) + // = 10 + // So required balance: 10 <= actor balance: 10 + name: "passes when voucher total redeemed <= balance", + expectError: false, + key: fromKeyPrivate, + actorBalance: big.NewInt(10), + voucherAmount: big.NewInt(5), + voucherLane: 1, + voucherNonce: 2, + laneStates: map[uint64]paych.LaneState{ + // Lane 1 (superseded by new voucher in voucher lane 1) + 1: paychmock.NewMockLaneState(big.NewInt(4), 1), + // Lane 2 (different from voucher lane 1) + 2: paychmock.NewMockLaneState(big.NewInt(5), 1), + }, + }} + + for _, tcase := range tcases { + tcase := tcase + t.Run(tcase.name, func(t *testing.T) { + // Create an actor for the channel with the test case balance + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: tcase.actorBalance, + } + + mock.setPaychState(ch, act, paychmock.NewMockPayChState( + fromAcct, toAcct, abi.ChainEpoch(0), tcase.laneStates)) + + // Create a manager + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Add channel To address to wallet + mock.addWalletAddress(to) + + // Create the test case signed voucher + sv := createTestVoucher(t, ch, tcase.voucherLane, tcase.voucherNonce, tcase.voucherAmount, tcase.key) + + // Check the voucher's validity + // stm: @PAYCHMGR_MANAGER_CHECK_VOUCHER_VALD_001 + err = mgr.CheckVoucherValid(ctx, ch, sv) + if tcase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestCreateVoucher(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + // Set up a manager with a single payment channel + s := testSetupMgrWithChannel(t) + + // Create a voucher in lane 1 + voucherLane1Amt := big.NewInt(5) + voucher := types.SignedVoucher{ + Lane: 1, + Amount: voucherLane1Amt, + } + // stm: @PAYCHMGR_MANAGER_CREATE_VOUCHER_001 + res, err := s.mgr.CreateVoucher(ctx, s.ch, voucher) + require.NoError(t, err) + require.NotNil(t, res.Voucher) + require.Equal(t, s.ch, res.Voucher.ChannelAddr) + require.Equal(t, voucherLane1Amt, res.Voucher.Amount) + require.EqualValues(t, 0, res.Shortfall.Int64()) + + nonce := res.Voucher.Nonce + + // Create a voucher in lane 1 again, with a higher amount + voucherLane1Amt = big.NewInt(8) + voucher = types.SignedVoucher{ + Lane: 1, + Amount: voucherLane1Amt, + } + res, err = s.mgr.CreateVoucher(ctx, s.ch, voucher) + require.NoError(t, err) + require.NotNil(t, res.Voucher) + require.Equal(t, s.ch, res.Voucher.ChannelAddr) + require.Equal(t, voucherLane1Amt, res.Voucher.Amount) + require.EqualValues(t, 0, res.Shortfall.Int64()) + require.Equal(t, nonce+1, res.Voucher.Nonce) + + // Create a voucher in lane 2 that covers all the remaining funds + // in the channel + voucherLane2Amt := big.Sub(s.amt, voucherLane1Amt) + voucher = types.SignedVoucher{ + Lane: 2, + Amount: voucherLane2Amt, + } + res, err = s.mgr.CreateVoucher(ctx, s.ch, voucher) + require.NoError(t, err) + require.NotNil(t, res.Voucher) + require.Equal(t, s.ch, res.Voucher.ChannelAddr) + require.Equal(t, voucherLane2Amt, res.Voucher.Amount) + require.EqualValues(t, 0, res.Shortfall.Int64()) + + // Create a voucher in lane 2 that exceeds the remaining funds in the + // channel + voucherLane2Amt = big.Add(voucherLane2Amt, big.NewInt(1)) + voucher = types.SignedVoucher{ + Lane: 2, + Amount: voucherLane2Amt, + } + res, err = s.mgr.CreateVoucher(ctx, s.ch, voucher) + require.NoError(t, err) + + // Expect a shortfall value equal to the amount required to add the voucher + // to the channel + require.Nil(t, res.Voucher) + require.EqualValues(t, 1, res.Shortfall.Int64()) +} + +func TestAddVoucherDelta(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + // Set up a manager with a single payment channel + s := testSetupMgrWithChannel(t) + + voucherLane := uint64(1) + + // Expect error when adding a voucher whose amount is less than minDelta + minDelta := big.NewInt(2) + nonce := uint64(1) + voucherAmount := big.NewInt(1) + sv := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + // stm: @PAYCHMGR_MANAGER_ADD_VOUCHER_OUTBOUND_001 + _, err := s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) + require.Error(t, err) + + // Expect success when adding a voucher whose amount is equal to minDelta + nonce++ + voucherAmount = big.NewInt(2) + sv = createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + delta, err := s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) + require.NoError(t, err) + require.EqualValues(t, delta.Int64(), 2) + + // Check that delta is correct when there's an existing voucher + nonce++ + voucherAmount = big.NewInt(5) + sv = createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + delta, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) + require.NoError(t, err) + require.EqualValues(t, delta.Int64(), 3) + + // Check that delta is correct when voucher added to a different lane + nonce = uint64(1) + voucherAmount = big.NewInt(6) + voucherLane = uint64(2) + sv = createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + delta, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) + require.NoError(t, err) + require.EqualValues(t, delta.Int64(), 6) +} + +func TestAddVoucherNextLane(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + // Set up a manager with a single payment channel + s := testSetupMgrWithChannel(t) + + minDelta := big.NewInt(0) + voucherAmount := big.NewInt(2) + + // Add a voucher in lane 2 + nonce := uint64(1) + voucherLane := uint64(2) + sv := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + _, err := s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) + require.NoError(t, err) + + ci, err := s.mgr.GetChannelInfo(ctx, s.ch) + require.NoError(t, err) + require.EqualValues(t, ci.NextLane, 3) + + // Allocate a lane (should be lane 3) + lane, err := s.mgr.AllocateLane(ctx, s.ch) + require.NoError(t, err) + require.EqualValues(t, lane, 3) + + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) + require.NoError(t, err) + require.EqualValues(t, ci.NextLane, 4) + + // Add a voucher in lane 1 + voucherLane = uint64(1) + sv = createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) + require.NoError(t, err) + + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) + require.NoError(t, err) + require.EqualValues(t, ci.NextLane, 4) + + // Add a voucher in lane 7 + voucherLane = uint64(7) + sv = createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) + require.NoError(t, err) + + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) + require.NoError(t, err) + require.EqualValues(t, ci.NextLane, 8) +} + +func TestAllocateLane(t *testing.T) { + tf.UnitTest(t) + // Set up a manager with a single payment channel + s := testSetupMgrWithChannel(t) + + ctx := context.Background() + + // First lane should be 0 + lane, err := s.mgr.AllocateLane(ctx, s.ch) + require.NoError(t, err) + require.EqualValues(t, lane, 0) + + // Next lane should be 1 + lane, err = s.mgr.AllocateLane(ctx, s.ch) + require.NoError(t, err) + require.EqualValues(t, lane, 1) +} + +func TestAllocateLaneWithExistingLaneState(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic)) + to := tutils.NewSECP256K1Addr(t, "secpTo") + fromAcct := tutils.NewActorAddr(t, "fromAct") + toAcct := tutils.NewActorAddr(t, "toAct") + + mock := newMockManagerAPI() + mock.setAccountAddress(fromAcct, from) + mock.setAccountAddress(toAcct, to) + mock.addWalletAddress(to) + + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + // Create a channel that will be retrieved from state + actorBalance := big.NewInt(10) + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: actorBalance, + } + + mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + // stm: @PAYCHMGR_MANAGER_START_001 + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + // stm: @PAYCHMGR_MANAGER_STOP_001 + defer mgr.Stop() + + // Create a voucher on lane 2 + // (also reads the channel from state and puts it in the store) + voucherLane := uint64(2) + minDelta := big.NewInt(0) + nonce := uint64(2) + voucherAmount := big.NewInt(5) + sv := createTestVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate) + // stm: @PAYCHMGR_MANAGER_ADD_VOUCHER_INBOUND_001 + _, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta) + require.NoError(t, err) + + // Allocate lane should return the next lane (lane 3) + lane, err := mgr.AllocateLane(ctx, ch) + require.NoError(t, err) + require.EqualValues(t, 3, lane) +} + +func TestAddVoucherInboundWalletKey(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic)) + to := tutils.NewSECP256K1Addr(t, "secpTo") + fromAcct := tutils.NewActorAddr(t, "fromAct") + toAcct := tutils.NewActorAddr(t, "toAct") + + // Create an actor for the channel in state + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: big.NewInt(20), + } + + mock := newMockManagerAPI() + + mock.setAccountAddress(fromAcct, from) + mock.setAccountAddress(toAcct, to) + + mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + // Create a manager + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Add a voucher + nonce := uint64(1) + voucherLane := uint64(1) + minDelta := big.NewInt(0) + voucherAmount := big.NewInt(2) + sv := createTestVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate) + _, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta) + + // Should fail because there is no wallet key matching the channel To + // address (ie, the channel is not "owned" by this node) + require.Error(t, err) + + // Add wallet key for To address + mock.addWalletAddress(to) + + // Add voucher again + sv = createTestVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate) + _, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta) + + // Should now pass because there is a wallet key matching the channel To + // address + require.NoError(t, err) +} + +func TestBestSpendable(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + // Set up a manager with a single payment channel + s := testSetupMgrWithChannel(t) + + // Add vouchers to lane 1 with amounts: [1, 2, 3] + voucherLane := uint64(1) + minDelta := big.NewInt(0) + nonce := uint64(1) + voucherAmount := big.NewInt(1) + svL1V1 := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + _, err := s.mgr.AddVoucherInbound(ctx, s.ch, svL1V1, nil, minDelta) + require.NoError(t, err) + + nonce++ + voucherAmount = big.NewInt(2) + svL1V2 := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + _, err = s.mgr.AddVoucherInbound(ctx, s.ch, svL1V2, nil, minDelta) + require.NoError(t, err) + + nonce++ + voucherAmount = big.NewInt(3) + svL1V3 := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + _, err = s.mgr.AddVoucherInbound(ctx, s.ch, svL1V3, nil, minDelta) + require.NoError(t, err) + + // Add voucher to lane 2 with amounts: [2] + voucherLane = uint64(2) + nonce = uint64(1) + voucherAmount = big.NewInt(2) + svL2V1 := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + _, err = s.mgr.AddVoucherInbound(ctx, s.ch, svL2V1, nil, minDelta) + require.NoError(t, err) + + // Return success exit code from calls to check if voucher is spendable + bsapi := newMockBestSpendableAPI(s.mgr) + s.mock.setCallResponse(&vm.Ret{ + Receipt: types.MessageReceipt{ + ExitCode: 0, + }, + }) + + // Verify best spendable vouchers on each lane + vouchers, err := BestSpendableByLane(ctx, bsapi, s.ch) + require.NoError(t, err) + require.Len(t, vouchers, 2) + + vchr, ok := vouchers[1] + require.True(t, ok) + require.EqualValues(t, 3, vchr.Amount.Int64()) + + vchr, ok = vouchers[2] + require.True(t, ok) + require.EqualValues(t, 2, vchr.Amount.Int64()) + + // Submit voucher from lane 2 + // stm: @PAYCHMGR_MANAGER_SUBMIT_VOUCHER_001 + _, err = s.mgr.SubmitVoucher(ctx, s.ch, svL2V1, nil, nil) + require.NoError(t, err) + + // Best spendable voucher should no longer include lane 2 + // (because voucher has not been submitted) + vouchers, err = BestSpendableByLane(ctx, bsapi, s.ch) + require.NoError(t, err) + require.Len(t, vouchers, 1) + + // Submit first voucher from lane 1 + _, err = s.mgr.SubmitVoucher(ctx, s.ch, svL1V1, nil, nil) + require.NoError(t, err) + + // Best spendable voucher for lane 1 should still be highest value voucher + vouchers, err = BestSpendableByLane(ctx, bsapi, s.ch) + require.NoError(t, err) + require.Len(t, vouchers, 1) + + vchr, ok = vouchers[1] + require.True(t, ok) + require.EqualValues(t, 3, vchr.Amount.Int64()) +} + +func TestCheckSpendable(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + // Set up a manager with a single payment channel + s := testSetupMgrWithChannel(t) + + // Create voucher with Extra + voucherLane := uint64(1) + nonce := uint64(1) + voucherAmount := big.NewInt(1) + voucher := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + + // Add voucher + minDelta := big.NewInt(0) + _, err := s.mgr.AddVoucherInbound(ctx, s.ch, voucher, nil, minDelta) + require.NoError(t, err) + + // Return success exit code from VM call, which indicates that voucher is + // spendable + successResponse := &vm.Ret{ + Receipt: types.MessageReceipt{ + ExitCode: 0, + }, + } + s.mock.setCallResponse(successResponse) + + // Check that spendable is true + secret := []byte("secret") + // stm: @PAYCHMGR_MANAGER_CHECK_VOUCHER_SPENDABLE_001 + spendable, err := s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret, nil) + require.NoError(t, err) + require.True(t, spendable) + + // Check that the secret was passed through correctly + lastCall := s.mock.getLastCall() + var p paych2.UpdateChannelStateParams + err = p.UnmarshalCBOR(bytes.NewReader(lastCall.Params)) + require.NoError(t, err) + require.Equal(t, secret, p.Secret) + + // Check that if LegacyVM call returns non-success exit code, spendable is false + s.mock.setCallResponse(&vm.Ret{ + Receipt: types.MessageReceipt{ + ExitCode: 1, + }, + }) + spendable, err = s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret, nil) + require.NoError(t, err) + require.False(t, spendable) + + // Return success exit code (indicating voucher is spendable) + s.mock.setCallResponse(successResponse) + spendable, err = s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret, nil) + require.NoError(t, err) + require.True(t, spendable) + + // Check that voucher is no longer spendable once it has been submitted + _, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, nil, nil) + require.NoError(t, err) + + spendable, err = s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret, nil) + require.NoError(t, err) + require.False(t, spendable) +} + +func TestSubmitVoucher(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + // Set up a manager with a single payment channel + s := testSetupMgrWithChannel(t) + + // Create voucher with Extra + voucherLane := uint64(1) + nonce := uint64(1) + voucherAmount := big.NewInt(1) + voucher := createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + + // Add voucher + minDelta := big.NewInt(0) + _, err := s.mgr.AddVoucherInbound(ctx, s.ch, voucher, nil, minDelta) + require.NoError(t, err) + + // Submit voucher + secret := []byte("secret") + submitCid, err := s.mgr.SubmitVoucher(ctx, s.ch, voucher, secret, nil) + require.NoError(t, err) + + // Check that the secret was passed through correctly + msg := s.mock.pushedMessages(submitCid) + var p paych2.UpdateChannelStateParams + err = p.UnmarshalCBOR(bytes.NewReader(msg.Message.Params)) + require.NoError(t, err) + + // Submit a voucher without first adding it + nonce++ + voucherAmount = big.NewInt(3) + voucher = createTestVoucher(t, s.ch, voucherLane, nonce, voucherAmount, s.fromKeyPrivate) + submitCid, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, nil, nil) + require.NoError(t, err) + + msg = s.mock.pushedMessages(submitCid) + var p3 paych2.UpdateChannelStateParams + err = p3.UnmarshalCBOR(bytes.NewReader(msg.Message.Params)) + require.NoError(t, err) + + // Verify that vouchers are marked as submitted + // stm: @PAYCHMGR_MANAGER_LIST_VOUCHERS_001 + vis, err := s.mgr.ListVouchers(ctx, s.ch) + require.NoError(t, err) + require.Len(t, vis, 2) + + for _, vi := range vis { + require.True(t, vi.Submitted) + } + + // Attempting to submit the same voucher again should fail + _, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, nil, nil) + require.Error(t, err) +} + +type testScaffold struct { + mgr *Manager + mock *mockManagerAPI + ch address.Address + amt big.Int + fromAcct address.Address + fromKeyPrivate []byte +} + +func testSetupMgrWithChannel(t *testing.T) *testScaffold { + ctx := context.Background() + fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic)) + to := tutils.NewSECP256K1Addr(t, "secpTo") + fromAcct := tutils.NewActorAddr(t, "fromAct") + toAcct := tutils.NewActorAddr(t, "toAct") + + mock := newMockManagerAPI() + mock.setAccountAddress(fromAcct, from) + mock.setAccountAddress(toAcct, to) + + // Create channel in state + balance := big.NewInt(20) + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: balance, + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Create the channel in the manager's store + ci := &pchTypes.ChannelInfo{ + Channel: &ch, + Control: fromAcct, + Target: toAcct, + Direction: pchTypes.DirOutbound, + } + err = mgr.store.putChannelInfo(ctx, ci) + require.NoError(t, err) + + // Add the from signing key to the wallet + mock.addSigningKey(fromKeyPrivate) + + return &testScaffold{ + mgr: mgr, + mock: mock, + ch: ch, + amt: balance, + fromAcct: fromAcct, + fromKeyPrivate: fromKeyPrivate, + } +} + +func testGenerateKeyPair(t *testing.T) ([]byte, []byte) { + // stm: @CRYPTO_SIG_GENERATE_001 + priv, err := crypto2.Generate(crypto.SigTypeSecp256k1) + require.NoError(t, err) + // stm: @CRYPTO_SIG_TO_PUBLIC_001 + pub, err := crypto2.ToPublic(crypto.SigTypeSecp256k1, priv) + require.NoError(t, err) + return priv, pub +} + +func createTestVoucher(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *types.SignedVoucher { + sv := &types.SignedVoucher{ + ChannelAddr: ch, + Lane: voucherLane, + Nonce: nonce, + Amount: voucherAmount, + } + + signingBytes, err := sv.SigningBytes() + require.NoError(t, err) + sig, err := crypto2.Sign(signingBytes, key, crypto.SigTypeSecp256k1) + require.NoError(t, err) + sv.Signature = sig + return sv +} + +type mockBestSpendableAPI struct { + mgr *Manager +} + +func (m *mockBestSpendableAPI) PaychVoucherList(ctx context.Context, ch address.Address) ([]*types.SignedVoucher, error) { + vi, err := m.mgr.ListVouchers(ctx, ch) + if err != nil { + return nil, err + } + + out := make([]*types.SignedVoucher, len(vi)) + for k, v := range vi { + out[k] = v.Voucher + } + + return out, nil +} + +func (m *mockBestSpendableAPI) PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, voucher *types.SignedVoucher, secret []byte, proof []byte) (bool, error) { + return m.mgr.CheckVoucherSpendable(ctx, ch, voucher, secret, proof) +} + +func newMockBestSpendableAPI(mgr *Manager) BestSpendableAPI { + return &mockBestSpendableAPI{mgr: mgr} +} diff --git a/pkg/paychmgr/paychget_test.go b/pkg/paychmgr/paychget_test.go new file mode 100644 index 0000000000..a4b0f08569 --- /dev/null +++ b/pkg/paychmgr/paychget_test.go @@ -0,0 +1,1749 @@ +// stm: #unit +package paychmgr + +import ( + "context" + "sync" + "testing" + "time" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" + + cborrpc "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/v2/actors/builtin" + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + tutils "github.com/filecoin-project/specs-actors/v6/support/testing" + + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + lotusinit "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + paychmock "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych/mock" +) + +var onChainReserve = GetOpts{ + Reserve: true, + OffChain: false, +} + +var onChainNoReserve = GetOpts{ + Reserve: false, + OffChain: false, +} + +var offChainReserve = GetOpts{ + Reserve: true, + OffChain: true, +} + +var offChainNoReserve = GetOpts{ + Reserve: false, + OffChain: true, +} + +func testChannelResponse(t *testing.T, ch address.Address) types.MessageReceipt { + createChannelRet := init2.ExecReturn{ + IDAddress: ch, + RobustAddress: ch, + } + createChannelRetBytes, err := cborrpc.Dump(&createChannelRet) + require.NoError(t, err) + createChannelResponse := types.MessageReceipt{ + ExitCode: 0, + Return: createChannelRetBytes, + } + return createChannelResponse +} + +// TestPaychGetCreateChannelMsg tests that GetPaych sends a message to create +// a new channel with the correct funds +func TestPaychGetCreateChannelMsg(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + amt := big.NewInt(10) + // stm: @PAYCHMGR_MANAGER_GET_PAYCH_001 + ch, mcid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + require.Equal(t, address.Undef, ch) + + pushedMsg := mock.pushedMessages(mcid) + require.Equal(t, from, pushedMsg.Message.From) + require.Equal(t, lotusinit.Address, pushedMsg.Message.To) + require.Equal(t, amt, pushedMsg.Message.Value) +} + +func TestPaychGetOffchainNoReserveFails(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + amt := big.NewInt(10) + _, _, err = mgr.GetPaych(ctx, from, to, amt, offChainNoReserve) + require.Error(t, err) +} + +func TestPaychGetCreateOffchainReserveFails(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + amt := big.NewInt(10) + _, _, err = mgr.GetPaych(ctx, from, to, amt, offChainReserve) + require.Error(t, err) +} + +// TestPaychGetCreateChannelThenAddFunds tests creating a channel and then +// adding funds to it +func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + amt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // Should have no channels yet (message sent but channel not created) + // stm: @PAYCHMGR_MANAGER_LIST_CHANNELS_001 + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 0) + + // 1. Set up create channel response (sent in response to WaitForMsg()) + response := testChannelResponse(t, ch) + + done := make(chan struct{}) + go func() { + defer close(done) + + // 2. Request add funds - should block until create channel has completed + amt2 := big.NewInt(5) + ch2, addFundsMsgCid, err := mgr.GetPaych(ctx, from, to, amt2, onChainReserve) + + // 4. This GetPaych should return after create channel from first + // GetPaych completes + require.NoError(t, err) + + // Expect the channel to be the same + require.Equal(t, ch, ch2) + // Expect add funds message CID to be different to create message cid + require.NotEqual(t, createMsgCid, addFundsMsgCid) + + // Should have one channel, whose address is the channel that was created + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + // Amount should be amount sent to first GetPaych (to create + // channel). + // PendingAmount should be amount sent in second GetPaych + // (second GetPaych triggered add funds, which has not yet been confirmed) + // stm: @PAYCHMGR_MANAGER_GET_CHANNEL_INFO_001 + ci, err := mgr.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.EqualValues(t, 10, ci.Amount.Int64()) + require.EqualValues(t, 5, ci.PendingAmount.Int64()) + require.Nil(t, ci.CreateMsg) + + // Trigger add funds confirmation + mock.receiveMsgResponse(addFundsMsgCid, types.MessageReceipt{ExitCode: 0}) + + // Wait for add funds confirmation to be processed by manager + _, err = mgr.GetPaychWaitReady(ctx, addFundsMsgCid) + require.NoError(t, err) + + // Should still have one channel + cis, err = mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + // Channel amount should include last amount sent to GetPaych + ci, err = mgr.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.EqualValues(t, 15, ci.Amount.Int64()) + require.EqualValues(t, 0, ci.PendingAmount.Int64()) + require.Nil(t, ci.AddFundsMsg) + }() + + // 3. Send create channel response + mock.receiveMsgResponse(createMsgCid, response) + + <-done +} + +func TestPaychGetCreatePrefundedChannelThenAddFunds(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + amt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, amt, onChainNoReserve) + require.NoError(t, err) + + // Should have no channels yet (message sent but channel not created) + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 0) + + // 1. Set up create channel response (sent in response to WaitForMsg()) + response := testChannelResponse(t, ch) + + done := make(chan struct{}) + go func() { + defer close(done) + + // 2. Request add funds - shouldn't block + amt2 := big.NewInt(3) + ch2, addFundsMsgCid, err := mgr.GetPaych(ctx, from, to, amt2, offChainReserve) + + // 4. This GetPaych should return after create channel from first + // GetPaych completes + require.NoError(t, err) + + // Expect the channel to be the same + require.Equal(t, ch, ch2) + require.Equal(t, cid.Undef, addFundsMsgCid) + + // Should have one channel, whose address is the channel that was created + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + // Amount should be amount sent to first GetPaych (to create + // channel). + // PendingAmount should be zero, AvailableAmount should be Amount minus what we requested + + ci, err := mgr.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.EqualValues(t, 10, ci.Amount.Int64()) + require.EqualValues(t, 0, ci.PendingAmount.Int64()) + require.EqualValues(t, 7, ci.AvailableAmount.Int64()) + require.Nil(t, ci.CreateMsg) + require.Nil(t, ci.AddFundsMsg) + }() + + // 3. Send create channel response + mock.receiveMsgResponse(createMsgCid, response) + + <-done +} + +// TestPaychGetCreateChannelWithErrorThenCreateAgain tests that if an +// operation is queued up behind a create channel operation, and the create +// channel fails, then the waiting operation can succeed. +func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel + amt := big.NewInt(10) + _, mcid1, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // 1. Set up create channel response (sent in response to WaitForMsg()) + // This response indicates an error. + errResponse := types.MessageReceipt{ + ExitCode: 1, // error + Return: []byte{}, + } + + done := make(chan struct{}) + go func() { + defer close(done) + + // 2. Should block until create channel has completed. + // Because first channel create fails, this request + // should be for channel create again. + amt2 := big.NewInt(5) + ch2, mcid2, err := mgr.GetPaych(ctx, from, to, amt2, onChainReserve) + require.NoError(t, err) + require.Equal(t, address.Undef, ch2) + + // 4. Send a success response + ch := tutils.NewIDAddr(t, 100) + successResponse := testChannelResponse(t, ch) + mock.receiveMsgResponse(mcid2, successResponse) + + _, err = mgr.GetPaychWaitReady(ctx, mcid2) + require.NoError(t, err) + + // Should have one channel, whose address is the channel that was created + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + ci, err := mgr.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.Equal(t, amt2, ci.Amount) + }() + + // 3. Send error response to first channel create + mock.receiveMsgResponse(mcid1, errResponse) + + <-done +} + +// TestPaychGetRecoverAfterError tests that after a create channel fails, the +// next attempt to create channel can succeed. +func TestPaychGetRecoverAfterError(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel + amt := big.NewInt(10) + _, mcid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // Send error create channel response + mock.receiveMsgResponse(mcid, types.MessageReceipt{ + ExitCode: 1, // error + Return: []byte{}, + }) + + // Send create message for a channel again + amt2 := big.NewInt(7) + _, mcid2, err := mgr.GetPaych(ctx, from, to, amt2, onChainReserve) + require.NoError(t, err) + + // Send success create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(mcid2, response) + + _, err = mgr.GetPaychWaitReady(ctx, mcid2) + require.NoError(t, err) + + // Should have one channel, whose address is the channel that was created + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + ci, err := mgr.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.Equal(t, amt2, ci.Amount) + require.EqualValues(t, 0, ci.PendingAmount.Int64()) + require.Nil(t, ci.CreateMsg) +} + +// TestPaychGetRecoverAfterAddFundsError tests that after an add funds fails, the +// next attempt to add funds can succeed. +func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel + amt := big.NewInt(10) + _, mcid1, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // Send success create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(mcid1, response) + + // Send add funds message for channel + amt2 := big.NewInt(5) + _, mcid2, err := mgr.GetPaych(ctx, from, to, amt2, onChainReserve) + require.NoError(t, err) + + // Send error add funds response + mock.receiveMsgResponse(mcid2, types.MessageReceipt{ + ExitCode: 1, // error + Return: []byte{}, + }) + + _, err = mgr.GetPaychWaitReady(ctx, mcid2) + require.Error(t, err) + + // Should have one channel, whose address is the channel that was created + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + ci, err := mgr.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.Equal(t, amt, ci.Amount) + require.EqualValues(t, 0, ci.PendingAmount.Int64()) + require.Nil(t, ci.CreateMsg) + require.Nil(t, ci.AddFundsMsg) + + // Send add funds message for channel again + amt3 := big.NewInt(2) + _, mcid3, err := mgr.GetPaych(ctx, from, to, amt3, onChainReserve) + require.NoError(t, err) + + // Send success add funds response + mock.receiveMsgResponse(mcid3, types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + }) + + _, err = mgr.GetPaychWaitReady(ctx, mcid3) + require.NoError(t, err) + + // Should have one channel, whose address is the channel that was created + cis, err = mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + // Amount should include amount for successful add funds msg + ci, err = mgr.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.Equal(t, amt.Int64()+amt3.Int64(), ci.Amount.Int64()) + require.EqualValues(t, 0, ci.PendingAmount.Int64()) + require.Nil(t, ci.CreateMsg) + require.Nil(t, ci.AddFundsMsg) +} + +// TestPaychGetRecoverAfterAddFundsError tests that after an add funds fails, the +// next attempt to add funds can succeed. +func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + amt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // Simulate shutting down system + mock.close() + + // Create a new manager with the same datastore + mock2 := newMockManagerAPI() + defer mock2.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock2.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr2, err := newManager(ctx, store, mock2) + require.NoError(t, err) + + // Should have no channels yet (message sent but channel not created) + cis, err := mgr2.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 0) + + // 1. Set up create channel response (sent in response to WaitForMsg()) + response := testChannelResponse(t, ch) + + done := make(chan struct{}) + go func() { + defer close(done) + + // 2. Request add funds - should block until create channel has completed + amt2 := big.NewInt(5) + ch2, addFundsMsgCid, err := mgr2.GetPaych(ctx, from, to, amt2, onChainReserve) + + // 4. This GetPaych should return after create channel from first + // GetPaych completes + require.NoError(t, err) + + // Expect the channel to have been created + require.Equal(t, ch, ch2) + // Expect add funds message CID to be different to create message cid + require.NotEqual(t, createMsgCid, addFundsMsgCid) + + // Should have one channel, whose address is the channel that was created + cis, err := mgr2.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + // Amount should be amount sent to first GetPaych (to create + // channel). + // PendingAmount should be amount sent in second GetPaych + // (second GetPaych triggered add funds, which has not yet been confirmed) + ci, err := mgr2.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.EqualValues(t, 10, ci.Amount.Int64()) + require.EqualValues(t, 5, ci.PendingAmount.Int64()) + require.Nil(t, ci.CreateMsg) + }() + + // 3. Send create channel response + mock2.receiveMsgResponse(createMsgCid, response) + + <-done +} + +// TestPaychGetRestartAfterAddFundsMsg tests that if the system stops +// right after the add funds message is sent, the add funds will be +// processed when the system restarts. +func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel + amt := big.NewInt(10) + _, mcid1, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // Send success create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(mcid1, response) + + // Send add funds message for channel + amt2 := big.NewInt(5) + _, mcid2, err := mgr.GetPaych(ctx, from, to, amt2, onChainReserve) + require.NoError(t, err) + + // Simulate shutting down system + mock.close() + + // Create a new manager with the same datastore + mock2 := newMockManagerAPI() + defer mock2.close() + + mock2.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr2, err := newManager(ctx, store, mock2) + require.NoError(t, err) + + // Send success add funds response + mock2.receiveMsgResponse(mcid2, types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + }) + + _, err = mgr2.GetPaychWaitReady(ctx, mcid2) + require.NoError(t, err) + + // Should have one channel, whose address is the channel that was created + cis, err := mgr2.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 1) + require.Equal(t, ch, cis[0]) + + // Amount should include amount for successful add funds msg + ci, err := mgr2.GetChannelInfo(ctx, ch) + require.NoError(t, err) + require.Equal(t, amt.Int64()+amt2.Int64(), ci.Amount.Int64()) + require.EqualValues(t, 0, ci.PendingAmount.Int64()) + require.Nil(t, ci.CreateMsg) + require.Nil(t, ci.AddFundsMsg) +} + +// TestPaychGetWait tests that GetPaychWaitReady correctly waits for the +// channel to be created or funds to be added +func TestPaychGetWait(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + expch := tutils.NewIDAddr(t, 100) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(expch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // 1. Get + amt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + go func() { + // 3. Send response + response := testChannelResponse(t, expch) + mock.receiveMsgResponse(createMsgCid, response) + }() + + // 2. Wait till ready + ch, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, expch, ch) + + // 4. Wait again - message has already been received so should + // return immediately + ch, err = mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, expch, ch) + + // Request add funds + amt2 := big.NewInt(15) + _, addFundsMsgCid, err := mgr.GetPaych(ctx, from, to, amt2, onChainReserve) + require.NoError(t, err) + + go func() { + // 6. Send add funds response + addFundsResponse := types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + } + mock.receiveMsgResponse(addFundsMsgCid, addFundsResponse) + }() + + // 5. Wait for add funds + ch, err = mgr.GetPaychWaitReady(ctx, addFundsMsgCid) + require.NoError(t, err) + require.Equal(t, expch, ch) +} + +// TestPaychGetWaitErr tests that GetPaychWaitReady correctly handles errors +func TestPaychGetWaitErr(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // 1. Create channel + amt := big.NewInt(10) + _, mcid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + done := make(chan address.Address) + go func() { + defer close(done) + + // 2. Wait for channel to be created + _, err := mgr.GetPaychWaitReady(ctx, mcid) + + // 4. Channel creation should have failed + require.NotNil(t, err) + + // 5. Call wait again with the same message CID + _, err = mgr.GetPaychWaitReady(ctx, mcid) + + // 6. Should return immediately with the same error + require.NotNil(t, err) + }() + + // 3. Send error response to create channel + response := types.MessageReceipt{ + ExitCode: 1, // error + Return: []byte{}, + } + mock.receiveMsgResponse(mcid, response) + + <-done +} + +// TestPaychGetWaitCtx tests that GetPaychWaitReady returns early if the context +// is cancelled +func TestPaychGetWaitCtx(t *testing.T) { + tf.UnitTest(t) + ctx, cancel := context.WithCancel(context.Background()) + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + amt := big.NewInt(10) + _, mcid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // When the context is cancelled, should unblock wait + go func() { + cancel() + }() + + _, err = mgr.GetPaychWaitReady(ctx, mcid) + require.Error(t, ctx.Err(), err) +} + +// TestPaychGetMergeAddFunds tests that if a create channel is in +// progress and two add funds are queued up behind it, the two add funds +// will be merged +func TestPaychGetMergeAddFunds(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + + addFundsAmt1 := big.NewInt(5) + addFundsAmt2 := big.NewInt(3) + var addFundsCh1 address.Address + var addFundsCh2 address.Address + var addFundsMcid1 cid.Cid + var addFundsMcid2 cid.Cid + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + var err error + addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1, onChainReserve) + require.NoError(t, err) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + var err error + addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2, onChainReserve) + require.NoError(t, err) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds requests to be sent + addFundsSent.Wait() + + // Expect add funds requests to have same channel as create channel and + // same message cid as each other (because they should have been merged) + require.Equal(t, ch, addFundsCh1) + require.Equal(t, ch, addFundsCh2) + require.Equal(t, addFundsMcid1, addFundsMcid2) + + // Send success add funds response + mock.receiveMsgResponse(addFundsMcid1, types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + }) + + // Wait for add funds response + addFundsCh, err := mgr.GetPaychWaitReady(ctx, addFundsMcid1) + require.NoError(t, err) + require.Equal(t, ch, addFundsCh) + + // Make sure that one create channel message and one add funds message was + // sent + require.Equal(t, 2, mock.pushedMessageCount()) + + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) + + // Check merged add funds amount is the sum of the individual + // amounts + addFundsMsg := mock.pushedMessages(addFundsMcid1) + require.Equal(t, from, addFundsMsg.Message.From) + require.Equal(t, ch, addFundsMsg.Message.To) + require.Equal(t, big.Add(addFundsAmt1, addFundsAmt2), addFundsMsg.Message.Value) +} + +func TestPaychGetMergePrefundAndReserve(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + + addFundsAmt1 := big.NewInt(5) // 1 prefunds + addFundsAmt2 := big.NewInt(3) // 2 reserves + var addFundsCh1 address.Address + var addFundsCh2 address.Address + var addFundsMcid1 cid.Cid + var addFundsMcid2 cid.Cid + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + var err error + addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1, onChainNoReserve) + require.NoError(t, err) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + var err error + addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2, onChainReserve) + require.NoError(t, err) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds requests to be sent + addFundsSent.Wait() + + // Expect add funds requests to have same channel as create channel and + // same message cid as each other (because they should have been merged) + require.Equal(t, ch, addFundsCh1) + require.Equal(t, ch, addFundsCh2) + require.Equal(t, addFundsMcid1, addFundsMcid2) + + // Send success add funds response + mock.receiveMsgResponse(addFundsMcid1, types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + }) + + // Wait for add funds response + addFundsCh, err := mgr.GetPaychWaitReady(ctx, addFundsMcid1) + require.NoError(t, err) + require.Equal(t, ch, addFundsCh) + + // Make sure that one create channel message and one add funds message was + // sent + require.Equal(t, 2, mock.pushedMessageCount()) + + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) + + // Check merged add funds amount is the sum of the individual + // amounts + addFundsMsg := mock.pushedMessages(addFundsMcid1) + require.Equal(t, from, addFundsMsg.Message.From) + require.Equal(t, ch, addFundsMsg.Message.To) + require.Equal(t, types.BigAdd(addFundsAmt1, addFundsAmt2), addFundsMsg.Message.Value) +} + +func TestPaychGetMergePrefundAndReservePrefunded(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainNoReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + + addFundsAmt1 := big.NewInt(5) // 1 prefunds + addFundsAmt2 := big.NewInt(3) // 2 reserves + var addFundsCh1 address.Address + var addFundsCh2 address.Address + var addFundsMcid1 cid.Cid + var addFundsMcid2 cid.Cid + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + var err error + addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1, onChainNoReserve) + require.NoError(t, err) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + var err error + addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2, onChainReserve) + require.NoError(t, err) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds requests to be sent + addFundsSent.Wait() + + // Expect add funds requests to have same channel as create channel and + // same message cid as each other (because they should have been merged) + require.Equal(t, ch, addFundsCh1) + require.Equal(t, ch, addFundsCh2) + require.NotEqual(t, cid.Undef, addFundsMcid1) + require.Equal(t, cid.Undef, addFundsMcid2) + + // Send success add funds response + mock.receiveMsgResponse(addFundsMcid1, types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + }) + + // Wait for add funds response + addFundsCh, err := mgr.GetPaychWaitReady(ctx, addFundsMcid1) + require.NoError(t, err) + require.Equal(t, ch, addFundsCh) + + // Make sure that one create channel message and one add funds message was + // sent + require.Equal(t, 2, mock.pushedMessageCount()) + + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) + + // Check merged add funds amount is the sum of the individual + // amounts + addFundsMsg := mock.pushedMessages(addFundsMcid1) + require.Equal(t, from, addFundsMsg.Message.From) + require.Equal(t, ch, addFundsMsg.Message.To) + require.Equal(t, addFundsAmt1, addFundsMsg.Message.Value) +} + +func TestPaychGetMergePrefundAndReservePrefundedOneOffchain(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainNoReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + + addFundsAmt1 := big.NewInt(5) // 1 reserves + addFundsAmt2 := big.NewInt(3) // 2 reserves + var addFundsCh1 address.Address + var addFundsCh2 address.Address + var addFundsMcid1 cid.Cid + var addFundsMcid2 cid.Cid + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + var err error + addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1, offChainReserve) + require.NoError(t, err) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + var err error + addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2, onChainReserve) + require.NoError(t, err) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds requests to be sent + addFundsSent.Wait() + + // Expect add funds requests to have same channel as create channel and + // same message cid as each other (because they should have been merged) + require.Equal(t, ch, addFundsCh1) + require.Equal(t, ch, addFundsCh2) + require.Equal(t, cid.Undef, addFundsMcid1) + require.Equal(t, cid.Undef, addFundsMcid2) + + // Make sure that one create channel message was sent + require.Equal(t, 1, mock.pushedMessageCount()) + + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) +} + +func TestPaychGetMergePrefundAndReservePrefundedBothOffchainOneFail(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainNoReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + + addFundsAmt1 := big.NewInt(5) // 1 reserves + addFundsAmt2 := big.NewInt(6) // 2 reserves too much + var addFundsCh1 address.Address + var addFundsCh2 address.Address + var addFundsMcid1 cid.Cid + var addFundsMcid2 cid.Cid + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + var err error + addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1, offChainReserve) + require.NoError(t, err) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + var err error + addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2, offChainReserve) + require.Error(t, err) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds requests to be sent + addFundsSent.Wait() + + // Expect add funds requests to have same channel as create channel and + // same message cid as each other (because they should have been merged) + require.Equal(t, ch, addFundsCh1) + require.Equal(t, ch, addFundsCh2) + require.Equal(t, cid.Undef, addFundsMcid1) + require.Equal(t, cid.Undef, addFundsMcid2) + + // Make sure that one create channel message was sent + require.Equal(t, 1, mock.pushedMessageCount()) + + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) +} + +func TestPaychGetMergePrefundAndReserveOneOffchainOneFail(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + + addFundsAmt1 := big.NewInt(5) // 1 reserves + addFundsAmt2 := big.NewInt(6) // 2 reserves + var addFundsCh1 address.Address + var addFundsCh2 address.Address + var addFundsMcid1 cid.Cid + var addFundsMcid2 cid.Cid + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + var err error + addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1, onChainReserve) + require.NoError(t, err) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + var err error + addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2, offChainReserve) + require.Error(t, err) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds requests to be sent + addFundsSent.Wait() + + // Expect add funds requests to have same channel as create channel and + // same message cid as each other (because they should have been merged) + require.Equal(t, ch, addFundsCh1) + require.Equal(t, ch, addFundsCh2) + require.NotEqual(t, cid.Undef, addFundsMcid1) + require.Equal(t, cid.Undef, addFundsMcid2) + + // Make sure that one create channel message was sent + require.Equal(t, 2, mock.pushedMessageCount()) + + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) + + // Check merged add funds amount is the sum of the individual + // amounts + addFundsMsg := mock.pushedMessages(addFundsMcid1) + require.Equal(t, from, addFundsMsg.Message.From) + require.Equal(t, ch, addFundsMsg.Message.To) + require.Equal(t, addFundsAmt1, addFundsMsg.Message.Value) +} + +// TestPaychGetMergeAddFundsCtxCancelOne tests that when a queued add funds +// request is cancelled, its amount is removed from the total merged add funds +func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) { + tf.UnitTest(t) + // stm: @TOKEN_PAYCH_WAIT_READY_001 + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: types.NewInt(20), + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(from, to, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + addFundsAmt1 := big.NewInt(5) + addFundsAmt2 := big.NewInt(3) + var addFundsCh2 address.Address + var addFundsMcid2 cid.Cid + var addFundsErr1 error + addFundsCtx1, cancelAddFundsCtx1 := context.WithCancel(ctx) + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + _, _, addFundsErr1 = mgr.GetPaych(addFundsCtx1, from, to, addFundsAmt1, onChainReserve) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + var err error + addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2, onChainReserve) + require.NoError(t, err) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + // Cancel the first add funds request + cancelAddFundsCtx1() + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + // Wait for add funds requests to be sent + addFundsSent.Wait() + // Expect first add funds request to have been cancelled + require.NotNil(t, addFundsErr1) + require.Equal(t, ch, addFundsCh2) + // Send success add funds response + mock.receiveMsgResponse(addFundsMcid2, types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + }) + // Wait for add funds response + addFundsCh, err := mgr.GetPaychWaitReady(ctx, addFundsMcid2) + require.NoError(t, err) + require.Equal(t, ch, addFundsCh) + // Make sure that one create channel message and one add funds message was + // sent + require.Equal(t, 2, mock.pushedMessageCount()) + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) + // Check merged add funds amount only includes the second add funds amount + // (because first was cancelled) + addFundsMsg := mock.pushedMessages(addFundsMcid2) + require.Equal(t, from, addFundsMsg.Message.From) + require.Equal(t, ch, addFundsMsg.Message.To) + require.Equal(t, addFundsAmt2, addFundsMsg.Message.Value) +} + +// TestPaychGetMergeAddFundsCtxCancelAll tests that when all queued add funds +// requests are cancelled, no add funds message is sent +func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainReserve) + require.NoError(t, err) + + // Queue up two add funds requests behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(2) + + var addFundsErr1 error + var addFundsErr2 error + addFundsCtx1, cancelAddFundsCtx1 := context.WithCancel(ctx) + addFundsCtx2, cancelAddFundsCtx2 := context.WithCancel(ctx) + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + _, _, addFundsErr1 = mgr.GetPaych(addFundsCtx1, from, to, big.NewInt(5), onChainReserve) + }() + + go func() { + defer addFundsSent.Done() + + // Request add funds again - should merge with waiting add funds request + _, _, addFundsErr2 = mgr.GetPaych(addFundsCtx2, from, to, big.NewInt(3), onChainReserve) + }() + // Wait for add funds requests to be queued up + waitForQueueSize(t, mgr, from, to, 2) + + // Cancel all add funds requests + cancelAddFundsCtx1() + cancelAddFundsCtx2() + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds requests to error out + addFundsSent.Wait() + + require.NotNil(t, addFundsErr1) + require.NotNil(t, addFundsErr2) + + // Make sure that just the create channel message was sent + require.Equal(t, 1, mock.pushedMessageCount()) + + // Check create message amount is correct + createMsg := mock.pushedMessages(createMsgCid) + require.Equal(t, from, createMsg.Message.From) + require.Equal(t, lotusinit.Address, createMsg.Message.To) + require.Equal(t, createAmt, createMsg.Message.Value) +} + +// TestPaychAvailableFunds tests that PaychAvailableFunds returns the correct +// channel state +func TestPaychAvailableFunds(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) + ch := tutils.NewIDAddr(t, 100) + from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic)) + to := tutils.NewIDAddr(t, 102) + fromAcct := tutils.NewActorAddr(t, "fromAct") + toAcct := tutils.NewActorAddr(t, "toAct") + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // No channel created yet so available funds should be all zeroes + // stm: @PAYCHMGR_MANAGER_AVAILABLE_FUNDS_BY_FROM_TO_001 + av, err := mgr.AvailableFundsByFromTo(ctx, from, to) + require.NoError(t, err) + require.Nil(t, av.Channel) + require.Nil(t, av.PendingWaitSentinel) + require.EqualValues(t, 0, av.ConfirmedAmt.Int64()) + require.EqualValues(t, 0, av.PendingAmt.Int64()) + require.EqualValues(t, 0, av.QueuedAmt.Int64()) + require.EqualValues(t, 0, av.VoucherReedeemedAmt.Int64()) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainReserve) + require.NoError(t, err) + + // Available funds should reflect create channel message sent + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) + require.NoError(t, err) + require.Nil(t, av.Channel) + require.EqualValues(t, 0, av.ConfirmedAmt.Int64()) + require.EqualValues(t, createAmt, av.PendingAmt) + require.EqualValues(t, 0, av.QueuedAmt.Int64()) + require.EqualValues(t, 0, av.VoucherReedeemedAmt.Int64()) + // Should now have a pending wait sentinel + require.NotNil(t, av.PendingWaitSentinel) + + // Queue up an add funds request behind create channel + var addFundsSent sync.WaitGroup + addFundsSent.Add(1) + + addFundsAmt := big.NewInt(5) + var addFundsMcid cid.Cid + go func() { + defer addFundsSent.Done() + + // Request add funds - should block until create channel has completed + var err error + _, addFundsMcid, err = mgr.GetPaych(ctx, from, to, addFundsAmt, onChainReserve) + require.NoError(t, err) + }() + + // Wait for add funds request to be queued up + waitForQueueSize(t, mgr, from, to, 1) + + // Available funds should now include queued funds + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) + require.NoError(t, err) + require.Nil(t, av.Channel) + require.NotNil(t, av.PendingWaitSentinel) + require.EqualValues(t, 0, av.ConfirmedAmt.Int64()) + // create amount is still pending + require.EqualValues(t, createAmt, av.PendingAmt) + // queued amount now includes add funds amount + require.EqualValues(t, addFundsAmt, av.QueuedAmt) + require.EqualValues(t, 0, av.VoucherReedeemedAmt.Int64()) + + // Create channel in state + mock.setAccountAddress(fromAcct, from) + mock.setAccountAddress(toAcct, to) + act := &types.Actor{ + Code: builtin.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: createAmt, + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState))) + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Wait for create channel response + chres, err := mgr.GetPaychWaitReady(ctx, *av.PendingWaitSentinel) + require.NoError(t, err) + require.Equal(t, ch, chres) + + // Wait for add funds request to be sent + addFundsSent.Wait() + + // Available funds should now include the channel and also a wait sentinel + // for the add funds message + // stm: @PAYCHMGR_MANAGER_AVAILABLE_FUNDS_001 + av, err = mgr.AvailableFunds(ctx, ch) + require.NoError(t, err) + require.NotNil(t, av.Channel) + require.NotNil(t, av.PendingWaitSentinel) + // create amount is now confirmed + require.EqualValues(t, createAmt, av.ConfirmedAmt) + // add funds amount it now pending + require.EqualValues(t, addFundsAmt, av.PendingAmt) + require.EqualValues(t, 0, av.QueuedAmt.Int64()) + require.EqualValues(t, 0, av.VoucherReedeemedAmt.Int64()) + + // Send success add funds response + mock.receiveMsgResponse(addFundsMcid, types.MessageReceipt{ + ExitCode: 0, + Return: []byte{}, + }) + + // Wait for add funds response + _, err = mgr.GetPaychWaitReady(ctx, *av.PendingWaitSentinel) + require.NoError(t, err) + + // Available funds should no longer have a wait sentinel + av, err = mgr.AvailableFunds(ctx, ch) + require.NoError(t, err) + require.NotNil(t, av.Channel) + require.Nil(t, av.PendingWaitSentinel) + // confirmed amount now includes create and add funds amounts + require.EqualValues(t, big.Add(createAmt, addFundsAmt), av.ConfirmedAmt) + require.EqualValues(t, 0, av.PendingAmt.Int64()) + require.EqualValues(t, 0, av.QueuedAmt.Int64()) + require.EqualValues(t, 0, av.VoucherReedeemedAmt.Int64()) + + // Add some vouchers + voucherAmt1 := big.NewInt(3) + voucher := createTestVoucher(t, ch, 1, 1, voucherAmt1, fromKeyPrivate) + _, err = mgr.AddVoucherOutbound(ctx, ch, voucher, nil, big.NewInt(0)) + require.NoError(t, err) + + voucherAmt2 := big.NewInt(2) + voucher = createTestVoucher(t, ch, 2, 1, voucherAmt2, fromKeyPrivate) + _, err = mgr.AddVoucherOutbound(ctx, ch, voucher, nil, big.NewInt(0)) + require.NoError(t, err) + + av, err = mgr.AvailableFunds(ctx, ch) + require.NoError(t, err) + require.NotNil(t, av.Channel) + require.Nil(t, av.PendingWaitSentinel) + require.EqualValues(t, big.Add(createAmt, addFundsAmt), av.ConfirmedAmt) + require.EqualValues(t, 0, av.PendingAmt.Int64()) + require.EqualValues(t, 0, av.QueuedAmt.Int64()) + // voucher redeemed amount now includes vouchers + require.EqualValues(t, big.Add(voucherAmt1, voucherAmt2), av.VoucherReedeemedAmt) +} + +// waitForQueueSize waits for the funds request queue to be of the given size +func waitForQueueSize(t *testing.T, mgr *Manager, from address.Address, to address.Address, size int) { + ca, err := mgr.accessorByFromTo(from, to) + require.NoError(t, err) + + for { + if ca.queueSize() == size { + return + } + + time.Sleep(time.Millisecond) + } +} diff --git a/pkg/paychmgr/paychvoucherfunds_test.go b/pkg/paychmgr/paychvoucherfunds_test.go new file mode 100644 index 0000000000..e8511a23a3 --- /dev/null +++ b/pkg/paychmgr/paychvoucherfunds_test.go @@ -0,0 +1,107 @@ +package paychmgr + +import ( + "context" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + tutils2 "github.com/filecoin-project/specs-actors/v6/support/testing" + + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + lpaych "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + paychmock "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych/mock" +) + +// TestPaychAddVoucherAfterAddFunds tests adding a voucher to a channel with +// insufficient funds, then adding funds to the channel, then adding the +// voucher again +func TestPaychAddVoucherAfterAddFunds(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) + ch := tutils2.NewIDAddr(t, 100) + from := tutils2.NewSECP256K1Addr(t, string(fromKeyPublic)) + to := tutils2.NewSECP256K1Addr(t, "secpTo") + fromAcct := tutils2.NewActorAddr(t, "fromAct") + toAcct := tutils2.NewActorAddr(t, "toAct") + + mock := newMockManagerAPI() + defer mock.close() + + // Add the from signing key to the wallet + mock.setAccountAddress(fromAcct, from) + mock.setAccountAddress(toAcct, to) + mock.addSigningKey(fromKeyPrivate) + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + // Send create message for a channel with value 10 + createAmt := big.NewInt(10) + _, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt, onChainReserve) + require.NoError(t, err) + + // Send create channel response + response := testChannelResponse(t, ch) + mock.receiveMsgResponse(createMsgCid, response) + + // Create an actor in state for the channel with the initial channel balance + act := &types.Actor{ + Code: builtin2.AccountActorCodeID, + Head: cid.Cid{}, + Nonce: 0, + Balance: createAmt, + } + mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]lpaych.LaneState))) + + // Wait for create response to be processed by manager + _, err = mgr.GetPaychWaitReady(ctx, createMsgCid) + require.NoError(t, err) + + // Create a voucher with a value equal to the channel balance + voucher := paych.SignedVoucher{Amount: createAmt, Lane: 1} + res, err := mgr.CreateVoucher(ctx, ch, voucher) + require.NoError(t, err) + require.NotNil(t, res.Voucher) + + // Create a voucher in a different lane with an amount that exceeds the + // channel balance + excessAmt := big.NewInt(5) + voucher = paych.SignedVoucher{Amount: excessAmt, Lane: 2} + res, err = mgr.CreateVoucher(ctx, ch, voucher) + require.NoError(t, err) + require.Nil(t, res.Voucher) + require.Equal(t, res.Shortfall, excessAmt) + + // Add funds so as to cover the voucher shortfall + _, addFundsMsgCid, err := mgr.GetPaych(ctx, from, to, excessAmt, onChainReserve) + require.NoError(t, err) + + // Trigger add funds confirmation + mock.receiveMsgResponse(addFundsMsgCid, types.MessageReceipt{ExitCode: 0}) + + // Update actor test case balance to reflect added funds + act.Balance = big.Add(createAmt, excessAmt) + + // Wait for add funds confirmation to be processed by manager + _, err = mgr.GetPaychWaitReady(ctx, addFundsMsgCid) + require.NoError(t, err) + + // Adding same voucher that previously exceeded channel balance + // should succeed now that the channel balance has been increased + res, err = mgr.CreateVoucher(ctx, ch, voucher) + require.NoError(t, err) + require.NotNil(t, res.Voucher) +} diff --git a/pkg/paychmgr/pcapi.go b/pkg/paychmgr/pcapi.go new file mode 100644 index 0000000000..215992679e --- /dev/null +++ b/pkg/paychmgr/pcapi.go @@ -0,0 +1,72 @@ +package paychmgr + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// paychDependencyAPI defines the API methods needed by the payment channel manager +type paychDependencyAPI interface { + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*types.MsgLookup, error) + WalletHas(ctx context.Context, addr address.Address) (bool, error) + WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) + StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + MpoolPushMessage(ctx context.Context, msg *types.Message, maxFee *types.MessageSendSpec) (*types.SignedMessage, error) +} + +type IMessagePush interface { + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) +} + +type IChainInfo interface { + StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) +} + +type IWalletAPI interface { + WalletHas(ctx context.Context, addr address.Address) (bool, error) + WalletSign(ctx context.Context, k address.Address, msg []byte, meta types.MsgMeta) (*crypto.Signature, error) +} +type pcAPI struct { + mpAPI IMessagePush + chainInfoAPI IChainInfo + walletAPI IWalletAPI +} + +func newPaychDependencyAPI(mpAPI IMessagePush, c IChainInfo, w IWalletAPI) paychDependencyAPI { + return &pcAPI{mpAPI: mpAPI, chainInfoAPI: c, walletAPI: w} +} + +func (o *pcAPI) StateAccountKey(ctx context.Context, address address.Address, tsk types.TipSetKey) (address.Address, error) { + return o.chainInfoAPI.StateAccountKey(ctx, address, tsk) +} + +func (o *pcAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*types.MsgLookup, error) { + return o.chainInfoAPI.StateWaitMsg(ctx, msg, confidence, constants.LookbackNoLimit, true) +} + +func (o *pcAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, maxFee *types.MessageSendSpec) (*types.SignedMessage, error) { + return o.mpAPI.MpoolPushMessage(ctx, msg, maxFee) +} + +func (o *pcAPI) WalletHas(ctx context.Context, addr address.Address) (bool, error) { + return o.walletAPI.WalletHas(ctx, addr) +} + +func (o *pcAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) { + return o.walletAPI.WalletSign(ctx, k, msg, types.MsgMeta{Type: types.MTSignedVoucher}) +} + +func (o *pcAPI) StateNetworkVersion(ctx context.Context, ts types.TipSetKey) (network.Version, error) { + return o.chainInfoAPI.StateNetworkVersion(ctx, ts) +} diff --git a/pkg/paychmgr/settle_test.go b/pkg/paychmgr/settle_test.go new file mode 100644 index 0000000000..1c208cdd90 --- /dev/null +++ b/pkg/paychmgr/settle_test.go @@ -0,0 +1,72 @@ +package paychmgr + +import ( + "context" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/big" + tutils "github.com/filecoin-project/specs-actors/support/testing" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" +) + +func TestPaychSettle(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + expch := tutils.NewIDAddr(t, 100) + expch2 := tutils.NewIDAddr(t, 101) + from := tutils.NewIDAddr(t, 101) + to := tutils.NewIDAddr(t, 102) + + mock := newMockManagerAPI() + defer mock.close() + + mgr, err := newManager(ctx, store, mock) + require.NoError(t, err) + + amt := big.NewInt(10) + _, mcid, err := mgr.GetPaych(ctx, from, to, amt, onChainReserve) + require.NoError(t, err) + + // Send channel create response + response := testChannelResponse(t, expch) + mock.receiveMsgResponse(mcid, response) + + // Get the channel address + ch, err := mgr.GetPaychWaitReady(ctx, mcid) + require.NoError(t, err) + require.Equal(t, expch, ch) + + // Settle the channel + _, err = mgr.Settle(ctx, ch) + require.NoError(t, err) + + // Send another request for funds to the same from/to + // (should create a new channel because the previous channel + // is settling) + amt2 := big.NewInt(5) + _, mcid2, err := mgr.GetPaych(ctx, from, to, amt2, onChainReserve) + require.NoError(t, err) + require.NotEqual(t, cid.Undef, mcid2) + + // Send new channel create response + response2 := testChannelResponse(t, expch2) + mock.receiveMsgResponse(mcid2, response2) + + // Make sure the new channel is different from the old channel + ch2, err := mgr.GetPaychWaitReady(ctx, mcid2) + require.NoError(t, err) + require.NotEqual(t, ch, ch2) + + // There should now be two channels + cis, err := mgr.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, cis, 2) +} diff --git a/pkg/paychmgr/settler/settler.go b/pkg/paychmgr/settler/settler.go new file mode 100644 index 0000000000..7906f9de73 --- /dev/null +++ b/pkg/paychmgr/settler/settler.go @@ -0,0 +1,106 @@ +package settler + +import ( + "context" + "sync" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/events" + "github.com/filecoin-project/venus/pkg/paychmgr" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("payment-channel-settler") + +type API struct { + events.IEvent + Settler +} +type PaymentChannelSettler interface { + check(ts *types.TipSet) (done bool, more bool, err error) + messageHandler(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) + revertHandler(ctx context.Context, ts *types.TipSet) error + matcher(msg *types.Message) (matched bool, err error) +} +type paymentChannelSettler struct { + ctx context.Context + api Settler +} + +func NewPaymentChannelSettler(ctx context.Context, api Settler) PaymentChannelSettler { + return &paymentChannelSettler{ + ctx: ctx, + api: api, + } +} + +func (pcs *paymentChannelSettler) check(ts *types.TipSet) (done bool, more bool, err error) { + return false, true, nil +} + +func (pcs *paymentChannelSettler) messageHandler(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + // Ignore unsuccessful settle messages + if rec.ExitCode != 0 { + return true, nil + } + + bestByLane, err := paychmgr.BestSpendableByLane(pcs.ctx, pcs.api, msg.To) + if err != nil { + return true, err + } + var wg sync.WaitGroup + wg.Add(len(bestByLane)) + for _, voucher := range bestByLane { + submitMessageCID, err := pcs.api.PaychVoucherSubmit(pcs.ctx, msg.To, voucher, nil, nil) + if err != nil { + return true, err + } + go func(voucher *paych.SignedVoucher, submitMessageCID cid.Cid) { + defer wg.Done() + msgLookup, err := pcs.api.StateWaitMsg(pcs.ctx, submitMessageCID, constants.MessageConfidence, constants.LookbackNoLimit, true) + if err != nil { + log.Errorf("submitting voucher: %s", err.Error()) + } + if msgLookup.Receipt.ExitCode != 0 { + log.Errorf("failed submitting voucher: %+v", voucher) + } + }(voucher, submitMessageCID) + } + wg.Wait() + return true, nil +} + +func (pcs *paymentChannelSettler) revertHandler(ctx context.Context, ts *types.TipSet) error { + return nil +} + +func (pcs *paymentChannelSettler) matcher(msg *types.Message) (matched bool, err error) { + // Check if this is a settle payment channel message + if msg.Method != builtin.MethodsPaych.Settle { + return false, nil + } + // Check if this payment channel is of concern to this node (i.e. tracked in payment channel store), + // and its inbound (i.e. we're getting vouchers that we may need to redeem) + trackedAddresses, err := pcs.api.PaychList(pcs.ctx) + if err != nil { + return false, err + } + for _, addr := range trackedAddresses { + if msg.To == addr { + status, err := pcs.api.PaychStatus(pcs.ctx, addr) + if err != nil { + return false, err + } + if status.Direction == types.PCHInbound { + return true, nil + } + } + } + return false, nil +} diff --git a/pkg/paychmgr/settler/stl.go b/pkg/paychmgr/settler/stl.go new file mode 100644 index 0000000000..c1f04b2144 --- /dev/null +++ b/pkg/paychmgr/settler/stl.go @@ -0,0 +1,70 @@ +package settler + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/filecoin-project/venus/pkg/paychmgr" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +type Settler interface { + PaychList(context.Context) ([]address.Address, error) + PaychStatus(ctx context.Context, pch address.Address) (*types.Status, error) + PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) + PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) + PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) +} + +type settler struct { + mgr *paychmgr.Manager + ciAPI paychmgr.IChainInfo +} + +func NewSetter(mgr *paychmgr.Manager, chainInfoAPI paychmgr.IChainInfo) Settler { + return &settler{mgr, chainInfoAPI} +} + +func (o *settler) PaychList(ctx context.Context) ([]address.Address, error) { + return o.mgr.ListChannels(ctx) +} + +func (o *settler) PaychStatus(ctx context.Context, pch address.Address) (*types.Status, error) { + ci, err := o.mgr.GetChannelInfo(ctx, pch) + if err != nil { + return nil, err + } + return &types.Status{ + ControlAddr: ci.Control, + Direction: types.PCHDir(ci.Direction), + }, nil +} + +func (o *settler) PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) { + return o.mgr.CheckVoucherSpendable(ctx, ch, sv, secret, proof) +} + +func (o *settler) PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) { + vi, err := o.mgr.ListVouchers(ctx, pch) + if err != nil { + return nil, err + } + + out := make([]*paych.SignedVoucher, len(vi)) + for k, v := range vi { + out[k] = v.Voucher + } + return out, nil +} + +func (o *settler) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) { + return o.mgr.SubmitVoucher(ctx, ch, sv, secret, proof) +} + +func (o *settler) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) { + return o.ciAPI.StateWaitMsg(ctx, cid, confidence, lookbackLimit, allowReplaced) +} diff --git a/pkg/paychmgr/simple.go b/pkg/paychmgr/simple.go new file mode 100644 index 0000000000..221d13e792 --- /dev/null +++ b/pkg/paychmgr/simple.go @@ -0,0 +1,832 @@ +package paychmgr + +import ( + "bytes" + "context" + "errors" + "fmt" + "sort" + "sync" + + "github.com/ipfs/go-cid" + "golang.org/x/sync/errgroup" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" + pchTypes "github.com/filecoin-project/venus/venus-shared/types/market" +) + +// paychFundsRes is the response to a create channel or add funds request +type paychFundsRes struct { + channel address.Address + mcid cid.Cid + err error +} + +// fundsReq is a request to create a channel or add funds to a channel +type fundsReq struct { + ctx context.Context + promise chan *paychFundsRes + amt big.Int + opts GetOpts + + lk sync.Mutex + // merge parent, if this req is part of a merge + merge *mergedFundsReq +} + +func newFundsReq(ctx context.Context, amt big.Int, opts GetOpts) *fundsReq { + promise := make(chan *paychFundsRes, 1) + return &fundsReq{ + ctx: ctx, + promise: promise, + amt: amt, + opts: opts, + } +} + +// onComplete is called when the funds request has been executed +func (r *fundsReq) onComplete(res *paychFundsRes) { + select { + case <-r.ctx.Done(): + case r.promise <- res: + } +} + +// cancel is called when the req's context is cancelled +func (r *fundsReq) cancel() { + r.lk.Lock() + defer r.lk.Unlock() + + // If there's a merge parent, tell the merge parent to check if it has any + // active reqs left + if r.merge != nil { + r.merge.checkActive() + } +} + +// isActive indicates whether the req's context has been cancelled +func (r *fundsReq) isActive() bool { + return r.ctx.Err() == nil +} + +// setMergeParent sets the merge that this req is part of +func (r *fundsReq) setMergeParent(m *mergedFundsReq) { + r.lk.Lock() + defer r.lk.Unlock() + + r.merge = m +} + +// mergedFundsReq merges together multiple add funds requests that are queued +// up, so that only one message is sent for all the requests (instead of one +// message for each request) +type mergedFundsReq struct { + ctx context.Context + cancel context.CancelFunc + reqs []*fundsReq +} + +func newMergedFundsReq(reqs []*fundsReq) *mergedFundsReq { + ctx, cancel := context.WithCancel(context.Background()) + + rqs := make([]*fundsReq, len(reqs)) + copy(rqs, reqs) + m := &mergedFundsReq{ + ctx: ctx, + cancel: cancel, + reqs: rqs, + } + + for _, r := range m.reqs { + r.setMergeParent(m) + } + + sort.Slice(m.reqs, func(i, j int) bool { + if m.reqs[i].opts.OffChain != m.reqs[j].opts.OffChain { // off-chain first + return m.reqs[i].opts.OffChain + } + + if m.reqs[i].opts.Reserve != m.reqs[j].opts.Reserve { // non-reserve after off-chain + return m.reqs[i].opts.Reserve + } + + // sort by amount asc (reducing latency for smaller requests) + return m.reqs[i].amt.LessThan(m.reqs[j].amt) + }) + + // If the requests were all cancelled while being added, cancel the context + // immediately + m.checkActive() + + return m +} + +// Called when a fundsReq is cancelled +func (m *mergedFundsReq) checkActive() { + // Check if there are any active fundsReqs + for _, r := range m.reqs { + if r.isActive() { + return + } + } + + // If all fundsReqs have been cancelled, cancel the context + m.cancel() +} + +// onComplete is called when the queue has executed the mergeFundsReq. +// Calls onComplete on each fundsReq in the mergeFundsReq. +func (m *mergedFundsReq) onComplete(res *paychFundsRes) { + for _, r := range m.reqs { + if r.isActive() { + r.onComplete(res) + } + } +} + +// sum is the sum of the amounts in all requests in the merge +func (m *mergedFundsReq) sum() (big.Int, big.Int) { + sum := big.NewInt(0) + avail := types.NewInt(0) + for _, r := range m.reqs { + if r.isActive() { + sum = big.Add(sum, r.amt) + if !r.opts.Reserve { + avail = types.BigAdd(avail, r.amt) + } + } + } + return sum, avail +} + +// completeAmount completes first non-reserving requests up to the available amount +func (m *mergedFundsReq) completeAmount(avail types.BigInt, channelInfo *pchTypes.ChannelInfo) (*paychFundsRes, types.BigInt, types.BigInt) { + used, failed := types.NewInt(0), types.NewInt(0) + next := 0 + + // order: [offchain+reserve, !offchain+reserve, !offchain+!reserve] + for i, r := range m.reqs { + if !r.opts.Reserve { + // non-reserving request are put after reserving requests, so we are done here + break + } + + // don't try to fill inactive requests + if !r.isActive() { + continue + } + + if r.amt.GreaterThan(types.BigSub(avail, used)) { + // requests are sorted by amount ascending, so if we hit this, there aren't any more requests we can fill + + if r.opts.OffChain { + // can't fill, so OffChain want an error + if r.isActive() { + failed = types.BigAdd(failed, r.amt) + r.onComplete(&paychFundsRes{ + channel: *channelInfo.Channel, + err: fmt.Errorf("not enough funds available in the payment channel %s; add funds with 'lotus paych add-funds %s %s %s'", channelInfo.Channel, channelInfo.From(), channelInfo.To(), types.FIL(r.amt).Unitless()), + }) + } + next = i + 1 + continue + } + + break + } + + used = types.BigAdd(used, r.amt) + r.onComplete(&paychFundsRes{channel: *channelInfo.Channel}) + next = i + 1 + } + + m.reqs = m.reqs[next:] + if len(m.reqs) == 0 { + return &paychFundsRes{channel: *channelInfo.Channel}, used, failed + } + return nil, used, failed +} + +func (m *mergedFundsReq) failOffChainNoChannel(from, to address.Address) (*paychFundsRes, types.BigInt) { + next := 0 + freed := types.NewInt(0) + + for i, r := range m.reqs { + if !r.opts.OffChain { + break + } + + freed = types.BigAdd(freed, r.amt) + if !r.isActive() { + continue + } + r.onComplete(&paychFundsRes{err: fmt.Errorf("payment channel doesn't exist, create with 'lotus paych add-funds %s %s %s'", from, to, types.FIL(r.amt).Unitless())}) + next = i + 1 + } + + m.reqs = m.reqs[next:] + if len(m.reqs) == 0 { + return &paychFundsRes{err: fmt.Errorf("payment channel doesn't exist, create with 'lotus paych add-funds %s %s 0'", from, to)}, freed + } + + return nil, freed +} + +// getPaych ensures that a channel exists between the from and to addresses, +// and reserves (or adds as available) the given amount of funds. +// If the channel does not exist a create channel message is sent and the +// message CID is returned. +// If the channel does exist an add funds message is sent and both the channel +// address and message CID are returned. +// If there is an in progress operation (create channel / add funds), getPaych +// blocks until the previous operation completes, then returns both the channel +// address and the CID of the new add funds message. +// If an operation returns an error, subsequent waiting operations will still +// be attempted. +func (ca *channelAccessor) getPaych(ctx context.Context, amt big.Int, opts GetOpts) (address.Address, cid.Cid, error) { + // Add the request to add funds to a queue and wait for the result + freq := newFundsReq(ctx, amt, opts) + ca.enqueue(ctx, freq) + select { + case res := <-freq.promise: + return res.channel, res.mcid, res.err + case <-ctx.Done(): + freq.cancel() + return address.Undef, cid.Undef, ctx.Err() + } +} + +// Queue up an add funds operation +func (ca *channelAccessor) enqueue(ctx context.Context, task *fundsReq) { + ca.lk.Lock() + defer ca.lk.Unlock() + + ca.fundsReqQueue = append(ca.fundsReqQueue, task) + go ca.processQueue(ctx, "") // nolint: errcheck +} + +// Run the operations in the queue +func (ca *channelAccessor) processQueue(ctx context.Context, channelID string) (*types.ChannelAvailableFunds, error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + // Remove cancelled requests + ca.filterQueue() + + // If there's nothing in the queue, bail out + if len(ca.fundsReqQueue) == 0 { + return ca.currentAvailableFunds(ctx, channelID, big.NewInt(0)) + } + + // Merge all pending requests into one. + // For example if there are pending requests for 3, 2, 4 then + // amt = 3 + 2 + 4 = 9 + merged := newMergedFundsReq(ca.fundsReqQueue) + amt, avail := merged.sum() + if amt.IsZero() { + // Note: The amount can be zero if requests are cancelled as we're + // building the mergedFundsReq + return ca.currentAvailableFunds(ctx, channelID, amt) + } + + res := ca.processTask(merged, amt, avail) + + // If the task is waiting on an external event (eg something to appear on + // chain) it will return nil + if res == nil { + // Stop processing the fundsReqQueue and wait. When the event occurs it will + // call processQueue() again + return ca.currentAvailableFunds(ctx, channelID, amt) + } + + // Finished processing so clear the queue + ca.fundsReqQueue = nil + + // Call the task callback with its results + merged.onComplete(res) + + return ca.currentAvailableFunds(ctx, channelID, big.NewInt(0)) +} + +// filterQueue filters cancelled requests out of the queue +func (ca *channelAccessor) filterQueue() { + if len(ca.fundsReqQueue) == 0 { + return + } + + // Remove cancelled requests + i := 0 + for _, r := range ca.fundsReqQueue { + if r.isActive() { + ca.fundsReqQueue[i] = r + i++ + } + } + + // Allow GC of remaining slice elements + for rem := i; rem < len(ca.fundsReqQueue); rem++ { + ca.fundsReqQueue[i] = nil + } + + // Resize slice + ca.fundsReqQueue = ca.fundsReqQueue[:i] +} + +// queueSize is the size of the funds request queue (used by tests) +func (ca *channelAccessor) queueSize() int { + ca.lk.Lock() + defer ca.lk.Unlock() + + return len(ca.fundsReqQueue) +} + +// msgWaitComplete is called when the message for a previous task is confirmed +// or there is an error. +func (ca *channelAccessor) msgWaitComplete(ctx context.Context, mcid cid.Cid, err error) { + ca.lk.Lock() + defer ca.lk.Unlock() + + // Save the message result to the store + dserr := ca.store.SaveMessageResult(ctx, mcid, err) + if dserr != nil { + log.Errorf("saving message result: %s", dserr) + } + + // Inform listeners that the message has completed + ca.msgListeners.fireMsgComplete(mcid, err) + + // The queue may have been waiting for msg completion to proceed, so + // process the next queue item + if len(ca.fundsReqQueue) > 0 { + go ca.processQueue(ctx, "") // nolint: errcheck + } +} + +func (ca *channelAccessor) currentAvailableFunds(ctx context.Context, channelID string, queuedAmt big.Int) (*types.ChannelAvailableFunds, error) { + if len(channelID) == 0 { + return nil, nil + } + + channelInfo, err := ca.store.ByChannelID(ctx, channelID) + if err != nil { + return nil, err + } + + // The channel may have a pending create or add funds message + waitSentinel := channelInfo.CreateMsg + if waitSentinel == nil { + waitSentinel = channelInfo.AddFundsMsg + } + + // Get the total amount redeemed by vouchers. + // This includes vouchers that have been submitted, and vouchers that are + // in the datastore but haven't yet been submitted. + totalRedeemed := big.NewInt(0) + if channelInfo.Channel != nil { + ch := *channelInfo.Channel + _, pchState, err := ca.sa.loadPaychActorState(ca.chctx, ch) + if err != nil { + return nil, err + } + + laneStates, err := ca.laneState(ctx, pchState, ch) + if err != nil { + return nil, err + } + + for _, ls := range laneStates { + r, err := ls.Redeemed() + if err != nil { + return nil, err + } + totalRedeemed = big.Add(totalRedeemed, r) + } + } + + return &types.ChannelAvailableFunds{ + Channel: channelInfo.Channel, + From: channelInfo.From(), + To: channelInfo.To(), + ConfirmedAmt: channelInfo.Amount, + PendingAmt: channelInfo.PendingAmount, + NonReservedAmt: channelInfo.AvailableAmount, + PendingAvailableAmt: channelInfo.PendingAvailableAmount, + PendingWaitSentinel: waitSentinel, + QueuedAmt: queuedAmt, + VoucherReedeemedAmt: totalRedeemed, + }, nil +} + +// processTask checks the state of the channel and takes appropriate action +// (see description of getPaych). +// Note that processTask may be called repeatedly in the same state, and should +// return nil if there is no state change to be made (eg when waiting for a +// message to be confirmed on chain) +func (ca *channelAccessor) processTask(merged *mergedFundsReq, amt, avail types.BigInt) *paychFundsRes { + ctx := merged.ctx + + // Get the payment channel for the from/to addresses. + // Note: It's ok if we get ErrChannelNotTracked. It just means we need to + // create a channel. + channelInfo, err := ca.store.OutboundActiveByFromTo(ctx, ca.api, ca.from, ca.to) + if err != nil && err != ErrChannelNotTracked { + return &paychFundsRes{err: err} + } + + // If a channel has not yet been created, create one. + if channelInfo == nil { + res, freed := merged.failOffChainNoChannel(ca.from, ca.to) + if res != nil { + return res + } + amt = types.BigSub(amt, freed) + + mcid, err := ca.createPaych(ctx, amt, avail) + if err != nil { + return &paychFundsRes{err: err} + } + + return &paychFundsRes{mcid: mcid} + } + + // If the create channel message has been sent but the channel hasn't + // been created on chain yet + if channelInfo.CreateMsg != nil { + // Wait for the channel to be created before trying again + return nil + } + + // If an add funds message was sent to the chain but hasn't been confirmed + // on chain yet + if channelInfo.AddFundsMsg != nil { + // Wait for the add funds message to be confirmed before trying again + return nil + } + + // Try to fill requests using available funds, without going to the chain + res, amt := ca.completeAvailable(ctx, merged, channelInfo, amt, avail) + + if res != nil || amt.LessThanEqual(types.NewInt(0)) { + return res + } + + // We need to add more funds, so send an add funds message to + // cover the amount for this request + mcid, err := ca.addFunds(ctx, channelInfo, amt, avail) + if err != nil { + return &paychFundsRes{err: err} + } + return &paychFundsRes{channel: *channelInfo.Channel, mcid: *mcid} +} + +// createPaych sends a message to create the channel and returns the message cid +func (ca *channelAccessor) createPaych(ctx context.Context, amt, avail big.Int) (cid.Cid, error) { + mb, err := ca.messageBuilder(ctx, ca.from) + if err != nil { + return cid.Undef, err + } + msg, err := mb.Create(ca.to, amt) + if err != nil { + return cid.Undef, err + } + + smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return cid.Undef, fmt.Errorf("initializing paych actor: %w", err) + } + mcid := smsg.Cid() + // Create a new channel in the store + ci, err := ca.store.CreateChannel(ctx, ca.from, ca.to, mcid, amt, avail) + if err != nil { + log.Errorf("creating channel: %s", err) + return cid.Undef, err + } + + // Wait for the channel to be created on chain + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, mcid) + + return mcid, nil +} + +// waitForPaychCreateMsg waits for mcid to appear on chain and stores the robust address of the +// created payment channel +func (ca *channelAccessor) waitForPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitPaychCreateMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) +} + +func (ca *channelAccessor) waitPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) error { + mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, constants.MessageConfidence) + if err != nil { + log.Errorf("wait msg: %w", err) + return err + } + // If channel creation failed + if mwait.Receipt.ExitCode != 0 { + ca.lk.Lock() + defer ca.lk.Unlock() + + // Channel creation failed, so remove the channel from the datastore + dserr := ca.store.RemoveChannel(ctx, channelID) + if dserr != nil { + log.Errorf("failed to remove channel %s: %s", channelID, dserr) + } + + err := fmt.Errorf("payment channel creation failed (exit code %d)", mwait.Receipt.ExitCode) + log.Error(err) + return err + } + + // TODO: ActorUpgrade abstract over this. + // This "works" because it hasn't changed from v0 to v2, but we still + // need an abstraction here. + var decodedReturn init2.ExecReturn + err = decodedReturn.UnmarshalCBOR(bytes.NewReader(mwait.Receipt.Return)) + if err != nil { + log.Error(err) + return err + } + + ca.lk.Lock() + defer ca.lk.Unlock() + + // Store robust address of channel + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *pchTypes.ChannelInfo) { + channelInfo.Channel = &decodedReturn.RobustAddress + channelInfo.Amount = channelInfo.PendingAmount + channelInfo.AvailableAmount = channelInfo.PendingAvailableAmount + channelInfo.PendingAmount = big.NewInt(0) + channelInfo.PendingAvailableAmount = big.NewInt(0) + channelInfo.CreateMsg = nil + }) + + return nil +} + +// completeAvailable fills reserving fund requests using already available funds, without interacting with the chain +func (ca *channelAccessor) completeAvailable(ctx context.Context, merged *mergedFundsReq, channelInfo *pchTypes.ChannelInfo, amt, av types.BigInt) (*paychFundsRes, types.BigInt) { + toReserve := types.BigSub(amt, av) + avail := types.NewInt(0) + + // reserve at most what we need + ca.mutateChannelInfo(ctx, channelInfo.ChannelID, func(ci *pchTypes.ChannelInfo) { + avail = ci.AvailableAmount + if avail.GreaterThan(toReserve) { + avail = toReserve + } + ci.AvailableAmount = big.Sub(ci.AvailableAmount, avail) + }) + + res, used, failed := merged.completeAmount(avail, channelInfo) + + // return any unused reserved funds (e.g. from cancelled requests) + ca.mutateChannelInfo(ctx, channelInfo.ChannelID, func(ci *pchTypes.ChannelInfo) { + ci.AvailableAmount = types.BigAdd(ci.AvailableAmount, types.BigSub(avail, used)) + }) + + return res, types.BigSub(amt, types.BigAdd(used, failed)) +} + +// addFunds sends a message to add funds to the channel and returns the message cid +func (ca *channelAccessor) addFunds(ctx context.Context, channelInfo *pchTypes.ChannelInfo, amt, avail big.Int) (*cid.Cid, error) { + msg := &types.Message{ + To: *channelInfo.Channel, + From: channelInfo.Control, + Value: amt, + Method: 0, + } + + smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return nil, err + } + mcid := smsg.Cid() + // Store the add funds message CID on the channel + ca.mutateChannelInfo(ctx, channelInfo.ChannelID, func(ci *pchTypes.ChannelInfo) { + ci.PendingAmount = amt + ci.PendingAvailableAmount = avail + ci.AddFundsMsg = &mcid + }) + + // Store a reference from the message CID to the channel, so that we can + // look up the channel from the message CID + err = ca.store.SaveNewMessage(ctx, channelInfo.ChannelID, mcid) + if err != nil { + log.Errorf("saving add funds message CID %s: %s", mcid, err) + } + + go ca.waitForAddFundsMsg(ctx, channelInfo.ChannelID, mcid) + + return &mcid, nil +} + +// TODO func (ca *channelAccessor) freeFunds(ctx context.Context, channelInfo *ChannelInfo, amt, avail types.BigInt) (*cid.Cid, error) { + +// waitForAddFundsMsg waits for mcid to appear on chain and returns error, if any +func (ca *channelAccessor) waitForAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitAddFundsMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) +} + +func (ca *channelAccessor) waitAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) error { + mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, constants.MessageConfidence) + if err != nil { + log.Error(err) + return err + } + + if mwait.Receipt.ExitCode != 0 { + err := fmt.Errorf("voucher channel creation failed: adding funds (exit code %d)", mwait.Receipt.ExitCode) + log.Error(err) + + ca.lk.Lock() + defer ca.lk.Unlock() + + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *pchTypes.ChannelInfo) { + channelInfo.PendingAmount = big.NewInt(0) + channelInfo.PendingAvailableAmount = big.NewInt(0) + channelInfo.AddFundsMsg = nil + }) + + return err + } + + ca.lk.Lock() + defer ca.lk.Unlock() + + // Store updated amount + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *pchTypes.ChannelInfo) { + channelInfo.Amount = big.Add(channelInfo.Amount, channelInfo.PendingAmount) + channelInfo.AvailableAmount = types.BigAdd(channelInfo.AvailableAmount, channelInfo.PendingAvailableAmount) + channelInfo.PendingAmount = big.NewInt(0) + channelInfo.PendingAvailableAmount = big.NewInt(0) + channelInfo.AddFundsMsg = nil + }) + + return nil +} + +// Change the state of the channel in the store +func (ca *channelAccessor) mutateChannelInfo(ctx context.Context, channelID string, mutate func(*pchTypes.ChannelInfo)) { + channelInfo, err := ca.store.ByChannelID(ctx, channelID) + // If there's an error reading or writing to the store just log an error. + // For now we're assuming it's unlikely to happen in practice. + // Later we may want to implement a transactional approach, whereby + // we record to the store that we're going to send a message, send + // the message, and then record that the message was sent. + if err != nil { + log.Errorf("Error reading channel info from store: %s", err) + return + } + + mutate(channelInfo) + + err = ca.store.putChannelInfo(ctx, channelInfo) + if err != nil { + log.Errorf("Error writing channel info to store: %s", err) + } +} + +// restartPending checks the datastore to see if there are any channels that +// have outstanding create / add funds messages, and if so, waits on the +// messages. +// Outstanding messages can occur if a create / add funds message was sent and +// then the system was shut down or crashed before the result was received. +func (pm *Manager) restartPending(ctx context.Context) error { + cis, err := pm.store.WithPendingAddFunds(ctx) + if err != nil { + return err + } + + group := errgroup.Group{} + for _, chanInfo := range cis { + ci := chanInfo + if ci.CreateMsg != nil { + group.Go(func() error { + ca, err := pm.accessorByFromTo(ci.Control, ci.Target) + if err != nil { + return fmt.Errorf("error initializing payment channel manager %s -> %s: %s", ci.Control, ci.Target, err) + } + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, *ci.CreateMsg) + return nil + }) + } else if ci.AddFundsMsg != nil { + group.Go(func() error { + ca, err := pm.accessorByAddress(ctx, *ci.Channel) + if err != nil { + return fmt.Errorf("error initializing payment channel manager %s: %s", ci.Channel, err) + } + go ca.waitForAddFundsMsg(ctx, ci.ChannelID, *ci.AddFundsMsg) + return nil + }) + } + } + + return group.Wait() +} + +// getPaychWaitReady waits for a the response to the message with the given cid +func (ca *channelAccessor) getPaychWaitReady(ctx context.Context, mcid cid.Cid) (address.Address, error) { + ca.lk.Lock() + + // First check if the message has completed + msgInfo, err := ca.store.GetMessage(ctx, mcid) + if err != nil { + ca.lk.Unlock() + + return address.Undef, err + } + + // If the create channel / add funds message failed, return an error + if len(msgInfo.Err) > 0 { + ca.lk.Unlock() + + return address.Undef, errors.New(msgInfo.Err) + } + + // If the message has completed successfully + if msgInfo.Received { + ca.lk.Unlock() + + // Get the channel address + ci, err := ca.store.ByMessageCid(ctx, mcid) + if err != nil { + return address.Undef, err + } + + if ci.Channel == nil { + panic(fmt.Sprintf("create / add funds message %s succeeded but channelInfo.Channel is nil", mcid)) + } + return *ci.Channel, nil + } + + // The message hasn't completed yet so wait for it to complete + promise := ca.msgPromise(ctx, mcid) + + // Unlock while waiting + ca.lk.Unlock() + + select { + case res := <-promise: + return res.channel, res.err + case <-ctx.Done(): + return address.Undef, ctx.Err() + } +} + +type onMsgRes struct { + channel address.Address + err error +} + +// msgPromise returns a channel that receives the result of the message with +// the given CID +func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan onMsgRes { + promise := make(chan onMsgRes) + triggerUnsub := make(chan struct{}) + unsub := ca.msgListeners.onMsgComplete(mcid, func(err error) { + close(triggerUnsub) + + // Use a go-routine so as not to block the event handler loop + go func() { + res := onMsgRes{err: err} + if res.err == nil { + // Get the channel associated with the message cid + ci, err := ca.store.ByMessageCid(ctx, mcid) + if err != nil { + res.err = err + } else { + res.channel = *ci.Channel + } + } + + // Pass the result to the caller + select { + case promise <- res: + case <-ctx.Done(): + } + }() + }) + + // Unsubscribe when the message is received or the context is done + go func() { + select { + case <-ctx.Done(): + case <-triggerUnsub: + } + + unsub() + }() + + return promise +} + +func (ca *channelAccessor) availableFunds(ctx context.Context, channelID string) (*types.ChannelAvailableFunds, error) { + return ca.processQueue(ctx, channelID) +} diff --git a/pkg/paychmgr/state.go b/pkg/paychmgr/state.go new file mode 100644 index 0000000000..ba78b880fc --- /dev/null +++ b/pkg/paychmgr/state.go @@ -0,0 +1,88 @@ +package paychmgr + +import ( + "context" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/venus/pkg/statemanger" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + "github.com/filecoin-project/venus/venus-shared/types" + pchTypes "github.com/filecoin-project/venus/venus-shared/types/market" +) + +type stateAccessor struct { + sm statemanger.IStateManager +} + +func (ca *stateAccessor) loadPaychActorState(ctx context.Context, ch address.Address) (*types.Actor, paych.State, error) { + return ca.sm.GetPaychState(ctx, ch, nil) +} + +func (ca *stateAccessor) loadStateChannelInfo(ctx context.Context, ch address.Address, dir uint64) (*pchTypes.ChannelInfo, error) { + _, st, err := ca.loadPaychActorState(ctx, ch) + if err != nil { + return nil, err + } + + // Load channel "From" account actor state + f, err := st.From() + if err != nil { + return nil, err + } + from, err := ca.sm.ResolveToKeyAddress(ctx, f, nil) + if err != nil { + return nil, err + } + t, err := st.To() + if err != nil { + return nil, err + } + to, err := ca.sm.ResolveToKeyAddress(ctx, t, nil) + if err != nil { + return nil, err + } + + nextLane, err := ca.nextLaneFromState(ctx, st) + if err != nil { + return nil, err + } + + ci := &pchTypes.ChannelInfo{ + Channel: &ch, + Direction: dir, + NextLane: nextLane, + } + + if dir == pchTypes.DirOutbound { + ci.Control = from + ci.Target = to + } else { + ci.Control = to + ci.Target = from + } + + return ci, nil +} + +func (ca *stateAccessor) nextLaneFromState(ctx context.Context, st paych.State) (uint64, error) { + laneCount, err := st.LaneCount() + if err != nil { + return 0, err + } + if laneCount == 0 { + return 0, nil + } + + maxID := uint64(0) + if err := st.ForEachLaneState(func(idx uint64, _ paych.LaneState) error { + if idx > maxID { + maxID = idx + } + return nil + }); err != nil { + return 0, err + } + + return maxID + 1, nil +} diff --git a/pkg/paychmgr/store.go b/pkg/paychmgr/store.go new file mode 100644 index 0000000000..6f48ab33df --- /dev/null +++ b/pkg/paychmgr/store.go @@ -0,0 +1,393 @@ +package paychmgr + +import ( + "bytes" + "context" + "errors" + "fmt" + + "github.com/google/uuid" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + fbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/venus/pkg/repo" + pchTypes "github.com/filecoin-project/venus/venus-shared/types/market" +) + +var ErrChannelNotTracked = errors.New("channel not tracked") + +type Store struct { + ds datastore.Datastore +} + +// for test +func NewStore(ds repo.Datastore) *Store { + dsTmp := namespace.Wrap(ds, datastore.NewKey("/paych/")) + return &Store{ + ds: dsTmp, + } +} + +const ( + dsKeyChannelInfo = "ChannelInfo" + dsKeyMsgCid = "MsgCid" +) + +// TrackChannel stores a channel, returning an error if the channel was already +// being tracked +func (ps *Store) TrackChannel(ctx context.Context, ci *pchTypes.ChannelInfo) (*pchTypes.ChannelInfo, error) { + _, err := ps.ByAddress(ctx, *ci.Channel) + switch err { + default: + return nil, err + case nil: + return nil, fmt.Errorf("already tracking channel: %s", ci.Channel) + case ErrChannelNotTracked: + err = ps.putChannelInfo(ctx, ci) + if err != nil { + return nil, err + } + + return ps.ByAddress(ctx, *ci.Channel) + } +} + +// ListChannels returns the addresses of all channels that have been created +func (ps *Store) ListChannels(ctx context.Context) ([]address.Address, error) { + cis, err := ps.findChans(ctx, func(ci *pchTypes.ChannelInfo) bool { + return ci.Channel != nil + }, 0) + if err != nil { + return nil, err + } + + addrs := make([]address.Address, 0, len(cis)) + for _, ci := range cis { + addrs = append(addrs, *ci.Channel) + } + + return addrs, nil +} + +// findChan finds a single channel using the given filter. +// If there isn't a channel that matches the filter, returns ErrChannelNotTracked +func (ps *Store) findChan(ctx context.Context, filter func(ci *pchTypes.ChannelInfo) bool) (*pchTypes.ChannelInfo, error) { + cis, err := ps.findChans(ctx, filter, 1) + if err != nil { + return nil, err + } + + if len(cis) == 0 { + return nil, ErrChannelNotTracked + } + + return &cis[0], err +} + +// findChans loops over all channels, only including those that pass the filter. +// max is the maximum number of channels to return. Set to zero to return unlimited channels. +func (ps *Store) findChans(ctx context.Context, filter func(*pchTypes.ChannelInfo) bool, max int) ([]pchTypes.ChannelInfo, error) { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyChannelInfo}) + if err != nil { + return nil, err + } + defer res.Close() //nolint:errcheck + + var stored pchTypes.ChannelInfo + var matches []pchTypes.ChannelInfo + + for { + res, ok := res.NextSync() + if !ok { + break + } + + if res.Error != nil { + return nil, err + } + + ci, err := unmarshallChannelInfo(&stored, res.Value) + if err != nil { + return nil, err + } + + if !filter(ci) { + continue + } + + matches = append(matches, *ci) + + // If we've reached the maximum number of matches, return. + // Note that if max is zero we return an unlimited number of matches + // because len(matches) will always be at least 1. + if len(matches) == max { + return matches, nil + } + } + + return matches, nil +} + +// AllocateLane allocates a new lane for the given channel +func (ps *Store) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ci, err := ps.ByAddress(ctx, ch) + if err != nil { + return 0, err + } + + out := ci.NextLane + ci.NextLane++ + + return out, ps.putChannelInfo(ctx, ci) +} + +// VouchersForPaych gets the vouchers for the given channel +func (ps *Store) VouchersForPaych(ctx context.Context, ch address.Address) ([]*pchTypes.VoucherInfo, error) { + ci, err := ps.ByAddress(ctx, ch) + if err != nil { + return nil, err + } + + return ci.Vouchers, nil +} + +func (ps *Store) MarkVoucherSubmitted(ctx context.Context, ci *pchTypes.ChannelInfo, sv *paych.SignedVoucher) error { + err := ci.MarkVoucherSubmitted(sv) + if err != nil { + return err + } + return ps.putChannelInfo(ctx, ci) +} + +// ByAddress gets the channel that matches the given address +func (ps *Store) ByAddress(ctx context.Context, addr address.Address) (*pchTypes.ChannelInfo, error) { + return ps.findChan(ctx, func(ci *pchTypes.ChannelInfo) bool { + return ci.Channel != nil && *ci.Channel == addr + }) +} + +// The datastore key used to identify the message +func dskeyForMsg(mcid cid.Cid) datastore.Key { + return datastore.KeyWithNamespaces([]string{dsKeyMsgCid, mcid.String()}) +} + +// SaveNewMessage is called when a message is sent +func (ps *Store) SaveNewMessage(ctx context.Context, channelID string, mcid cid.Cid) error { + k := dskeyForMsg(mcid) + + b, err := cborutil.Dump(&pchTypes.MsgInfo{ChannelID: channelID, MsgCid: mcid}) + if err != nil { + return err + } + + return ps.ds.Put(ctx, k, b) +} + +// SaveMessageResult is called when the result of a message is received +func (ps *Store) SaveMessageResult(ctx context.Context, mcid cid.Cid, msgErr error) error { + minfo, err := ps.GetMessage(ctx, mcid) + if err != nil { + return err + } + + k := dskeyForMsg(mcid) + minfo.Received = true + if msgErr != nil { + minfo.Err = msgErr.Error() + } + + b, err := cborutil.Dump(minfo) + if err != nil { + return err + } + + return ps.ds.Put(ctx, k, b) +} + +// ByMessageCid gets the channel associated with a message +func (ps *Store) ByMessageCid(ctx context.Context, mcid cid.Cid) (*pchTypes.ChannelInfo, error) { + minfo, err := ps.GetMessage(ctx, mcid) + if err != nil { + return nil, err + } + + ci, err := ps.findChan(ctx, func(ci *pchTypes.ChannelInfo) bool { + return ci.ChannelID == minfo.ChannelID + }) + if err != nil { + return nil, err + } + + return ci, err +} + +// GetMessage gets the message info for a given message CID +func (ps *Store) GetMessage(ctx context.Context, mcid cid.Cid) (*pchTypes.MsgInfo, error) { + k := dskeyForMsg(mcid) + + val, err := ps.ds.Get(ctx, k) + if err != nil { + return nil, err + } + + var minfo pchTypes.MsgInfo + if err := minfo.UnmarshalCBOR(bytes.NewReader(val)); err != nil { + return nil, err + } + + return &minfo, nil +} + +// OutboundActiveByFromTo looks for outbound channels that have not been +// settled, with the given from / to addresses +func (ps *Store) OutboundActiveByFromTo(ctx context.Context, sma managerAPI, from address.Address, to address.Address) (*pchTypes.ChannelInfo, error) { + return ps.findChan(ctx, func(ci *pchTypes.ChannelInfo) bool { + if ci.Direction != pchTypes.DirOutbound { + return false + } + if ci.Settling { + return false + } + if ci.Channel != nil { + _, st, err := sma.GetPaychState(ctx, *ci.Channel, nil) + if err != nil { + return false + } + sat, err := st.SettlingAt() + if err != nil { + return false + } + if sat != 0 { + return false + } + } + return ci.Control == from && ci.Target == to + }) +} + +// WithPendingAddFunds is used on startup to find channels for which a +// create channel or add funds message has been sent, but lotus shut down +// before the response was received. +func (ps *Store) WithPendingAddFunds(ctx context.Context) ([]pchTypes.ChannelInfo, error) { + return ps.findChans(ctx, func(ci *pchTypes.ChannelInfo) bool { + if ci.Direction != pchTypes.DirOutbound { + return false + } + return ci.CreateMsg != nil || ci.AddFundsMsg != nil + }, 0) +} + +// ByChannelID gets channel info by channel ID +func (ps *Store) ByChannelID(ctx context.Context, channelID string) (*pchTypes.ChannelInfo, error) { + var stored pchTypes.ChannelInfo + + res, err := ps.ds.Get(ctx, dskeyForChannel(channelID)) + if err != nil { + if err == datastore.ErrNotFound { + return nil, ErrChannelNotTracked + } + return nil, err + } + + return unmarshallChannelInfo(&stored, res) +} + +// CreateChannel creates an outbound channel for the given from / to +func (ps *Store) CreateChannel(ctx context.Context, from address.Address, to address.Address, createMsgCid cid.Cid, amt, avail fbig.Int) (*pchTypes.ChannelInfo, error) { + ci := &pchTypes.ChannelInfo{ + Direction: pchTypes.DirOutbound, + NextLane: 0, + Control: from, + Target: to, + CreateMsg: &createMsgCid, + PendingAmount: amt, + PendingAvailableAmount: avail, + } + + // Save the new channel + err := ps.putChannelInfo(ctx, ci) + if err != nil { + return nil, err + } + + // Save a reference to the create message + err = ps.SaveNewMessage(ctx, ci.ChannelID, createMsgCid) + if err != nil { + return nil, err + } + + return ci, err +} + +// RemoveChannel removes the channel with the given channel ID +func (ps *Store) RemoveChannel(ctx context.Context, channelID string) error { + return ps.ds.Delete(ctx, dskeyForChannel(channelID)) +} + +// The datastore key used to identify the channel info +func dskeyForChannel(channelID string) datastore.Key { + return datastore.KeyWithNamespaces([]string{dsKeyChannelInfo, channelID}) +} + +// putChannelInfo stores the channel info in the datastore +func (ps *Store) putChannelInfo(ctx context.Context, ci *pchTypes.ChannelInfo) error { + if len(ci.ChannelID) == 0 { + ci.ChannelID = uuid.New().String() + } + k := dskeyForChannel(ci.ChannelID) + + b, err := marshallChannelInfo(ci) + if err != nil { + return err + } + + return ps.ds.Put(ctx, k, b) +} + +// TODO: This is a hack to get around not being able to CBOR marshall a nil +// address.Address. It's been fixed in address.Address but we need to wait +// for the change to propagate to specs-actors before we can remove this hack. +var emptyAddr address.Address + +func init() { + addr, err := address.NewActorAddress([]byte("empty")) + if err != nil { + panic(err) + } + emptyAddr = addr +} + +func marshallChannelInfo(ci *pchTypes.ChannelInfo) ([]byte, error) { + // See note above about CBOR marshalling address.Address + if ci.Channel == nil { + ci.Channel = &emptyAddr + } + return cborutil.Dump(ci) +} + +func unmarshallChannelInfo(stored *pchTypes.ChannelInfo, value []byte) (*pchTypes.ChannelInfo, error) { + if err := stored.UnmarshalCBOR(bytes.NewReader(value)); err != nil { + return nil, err + } + + // See note above about CBOR marshalling address.Address + if stored.Channel != nil && *stored.Channel == emptyAddr { + stored.Channel = nil + } + + // backwards compat + if stored.AvailableAmount.Int == nil { + stored.AvailableAmount = fbig.NewInt(0) + stored.PendingAvailableAmount = fbig.NewInt(0) + } + + return stored, nil +} diff --git a/pkg/paychmgr/store_test.go b/pkg/paychmgr/store_test.go new file mode 100644 index 0000000000..fcc8bac80b --- /dev/null +++ b/pkg/paychmgr/store_test.go @@ -0,0 +1,90 @@ +package paychmgr + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + tutils "github.com/filecoin-project/specs-actors/support/testing" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + pchTypes "github.com/filecoin-project/venus/venus-shared/types/market" +) + +func TestStore(t *testing.T) { + tf.UnitTest(t) + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) + ctx := context.Background() + addrs, err := store.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, addrs, 0) + + ch := tutils.NewIDAddr(t, 100) + ci := &pchTypes.ChannelInfo{ + Channel: &ch, + Control: tutils.NewIDAddr(t, 101), + Target: tutils.NewIDAddr(t, 102), + + Direction: pchTypes.DirOutbound, + Vouchers: []*pchTypes.VoucherInfo{{Voucher: nil, Proof: []byte{}}}, + } + + ch2 := tutils.NewIDAddr(t, 200) + ci2 := &pchTypes.ChannelInfo{ + Channel: &ch2, + Control: tutils.NewIDAddr(t, 201), + Target: tutils.NewIDAddr(t, 202), + + Direction: pchTypes.DirOutbound, + Vouchers: []*pchTypes.VoucherInfo{{Voucher: nil, Proof: []byte{}}}, + } + + // Track the channel + _, err = store.TrackChannel(ctx, ci) + require.NoError(t, err) + + // Tracking same channel again should error + _, err = store.TrackChannel(ctx, ci) + require.Error(t, err) + + // Track another channel + _, err = store.TrackChannel(ctx, ci2) + require.NoError(t, err) + + // List channels should include all channels + addrs, err = store.ListChannels(ctx) + require.NoError(t, err) + require.Len(t, addrs, 2) + t0100, err := address.NewIDAddress(100) + require.NoError(t, err) + t0200, err := address.NewIDAddress(200) + require.NoError(t, err) + require.Contains(t, addrs, t0100) + require.Contains(t, addrs, t0200) + + // Request vouchers for channel + vouchers, err := store.VouchersForPaych(ctx, *ci.Channel) + require.NoError(t, err) + require.Len(t, vouchers, 1) + + // Requesting voucher for non-existent channel should error + _, err = store.VouchersForPaych(ctx, tutils.NewIDAddr(t, 300)) + require.Equal(t, err, ErrChannelNotTracked) + + // Allocate lane for channel + lane, err := store.AllocateLane(ctx, *ci.Channel) + require.NoError(t, err) + require.Equal(t, lane, uint64(0)) + + // Allocate next lane for channel + lane, err = store.AllocateLane(ctx, *ci.Channel) + require.NoError(t, err) + require.Equal(t, lane, uint64(1)) + + // Allocate next lane for non-existent channel should error + _, err = store.AllocateLane(ctx, tutils.NewIDAddr(t, 300)) + require.Equal(t, err, ErrChannelNotTracked) +} diff --git a/pkg/paychmgr/util.go b/pkg/paychmgr/util.go new file mode 100644 index 0000000000..1f03d2c818 --- /dev/null +++ b/pkg/paychmgr/util.go @@ -0,0 +1,36 @@ +package paychmgr + +import ( + "context" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/builtin/v8/paych" +) + +type BestSpendableAPI interface { + PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) + PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) +} + +// BestSpendableByLane return spendable voucher in channel address +func BestSpendableByLane(ctx context.Context, api BestSpendableAPI, ch address.Address) (map[uint64]*paych.SignedVoucher, error) { + vouchers, err := api.PaychVoucherList(ctx, ch) + if err != nil { + return nil, err + } + + bestByLane := make(map[uint64]*paych.SignedVoucher) + for _, voucher := range vouchers { + spendable, err := api.PaychVoucherCheckSpendable(ctx, ch, voucher, nil, nil) + if err != nil { + return nil, err + } + if spendable { + if bestByLane[voucher.Lane] == nil || voucher.Amount.GreaterThan(bestByLane[voucher.Lane].Amount) { + bestByLane[voucher.Lane] = voucher + } + } + } + return bestByLane, nil +} diff --git a/pkg/repo/fskeystore/fskeystore.go b/pkg/repo/fskeystore/fskeystore.go new file mode 100644 index 0000000000..15fcc1b559 --- /dev/null +++ b/pkg/repo/fskeystore/fskeystore.go @@ -0,0 +1,163 @@ +package fskeystore + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("fskeystore") + +// ErrNoSuchKey is returned if a key of the given name is not found in the store +var ErrNoSuchKey = fmt.Errorf("no key by the given name was found") + +// ErrKeyExists is returned when writing a key would overwrite an existing key +var ErrKeyExists = fmt.Errorf("key by that name already exists, refusing to overwrite") + +// ErrKeyFmt is returned when the key's format is invalid +var ErrKeyFmt = fmt.Errorf("key has invalid format") + +// FSKeystore is a keystore backed by files in a given directory stored on disk. +type FSKeystore struct { + dir string +} + +// NewFSKeystore returns a new filesystem keystore at directory `dir` +func NewFSKeystore(dir string) (*FSKeystore, error) { + _, err := os.Stat(dir) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + if err := os.Mkdir(dir, 0o700); err != nil { + return nil, err + } + } + + return &FSKeystore{dir}, nil +} + +// Has returns whether or not a key exists in the Keystore +func (ks *FSKeystore) Has(name string) (bool, error) { + kp := filepath.Join(ks.dir, name) + + _, err := os.Stat(kp) + + if os.IsNotExist(err) { + return false, nil + } + + if err != nil { + return false, err + } + + if err := validateName(name); err != nil { + return false, err + } + + return true, nil +} + +// Put stores a key in the Keystore, if a key with the same name already exists, returns ErrKeyExists +func (ks *FSKeystore) Put(name string, data []byte) error { + if err := validateName(name); err != nil { + return err + } + + kp := filepath.Join(ks.dir, name) + + _, err := os.Stat(kp) + if err == nil { + return ErrKeyExists + } else if !os.IsNotExist(err) { + return err + } + + fi, err := os.Create(kp) + if err != nil { + return err + } + defer func() { + _ = fi.Close() + }() + + _, err = fi.Write(data) + + return err +} + +// Get retrieves a key from the Keystore if it exists, and returns ErrNoSuchKey +// otherwise. +func (ks *FSKeystore) Get(name string) ([]byte, error) { + if err := validateName(name); err != nil { + return nil, err + } + + kp := filepath.Join(ks.dir, name) + + data, err := os.ReadFile(kp) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrNoSuchKey + } + return nil, err + } + + return data, nil +} + +// Delete removes a key from the Keystore +func (ks *FSKeystore) Delete(name string) error { + if err := validateName(name); err != nil { + return err + } + + kp := filepath.Join(ks.dir, name) + + return os.Remove(kp) +} + +// List returns a list of key identifiers +func (ks *FSKeystore) List() ([]string, error) { + dir, err := os.Open(ks.dir) + if err != nil { + return nil, err + } + + dirs, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + list := make([]string, 0, len(dirs)) + + for _, name := range dirs { + err := validateName(name) + if err == nil { + list = append(list, name) + } else { + log.Warnf("Ignoring the invalid keyfile: %s", name) + } + } + + return list, nil +} + +func validateName(name string) error { + if name == "" { + return fmt.Errorf("key names must be at least one character: %v", ErrKeyFmt) + } + + if strings.Contains(name, "/") { + return fmt.Errorf("key names may not contain slashes: %v", ErrKeyFmt) + } + + if strings.HasPrefix(name, ".") { + return fmt.Errorf("key names may not begin with a period: %v", ErrKeyFmt) + } + + return nil +} diff --git a/pkg/repo/fskeystore/fskeystore_test.go b/pkg/repo/fskeystore/fskeystore_test.go new file mode 100644 index 0000000000..fbbbc2455c --- /dev/null +++ b/pkg/repo/fskeystore/fskeystore_test.go @@ -0,0 +1,271 @@ +package fskeystore + +import ( + "bytes" + "fmt" + "math/rand" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/libp2p/go-libp2p/core/crypto" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func assertDirContents(dir string, exp []string) error { + finfos, err := os.ReadDir(dir) + if err != nil { + return err + } + + if len(finfos) != len(exp) { + return fmt.Errorf("expected %d directory entries", len(exp)) + } + + var names []string + for _, fi := range finfos { + names = append(names, fi.Name()) + } + + sort.Strings(names) + sort.Strings(exp) + if len(names) != len(exp) { + return fmt.Errorf("directory had wrong number of entries in it") + } + + for i, v := range names { + if v != exp[i] { + return fmt.Errorf("had wrong entry in directory") + } + } + return nil +} + +func TestKeystoreBasics(t *testing.T) { + tf.UnitTest(t) + tdir := t.TempDir() + + ks, err := NewFSKeystore(tdir) + if err != nil { + t.Fatal(err) + } + + l, err := ks.List() + if err != nil { + t.Fatal(err) + } + + if len(l) != 0 { + t.Fatal("expected no keys") + } + + k1 := privKeyOrFatal(t) + k2 := privKeyOrFatal(t) + k3 := privKeyOrFatal(t) + k4 := privKeyOrFatal(t) + + err = ks.Put("foo", k1) + if err != nil { + t.Fatal(err) + } + + err = ks.Put("bar", k2) + if err != nil { + t.Fatal(err) + } + + l, err = ks.List() + if err != nil { + t.Fatal(err) + } + + sort.Strings(l) + if l[0] != "bar" || l[1] != "foo" { + t.Fatal("wrong entries listed") + } + + if err := assertDirContents(tdir, []string{"foo", "bar"}); err != nil { + t.Fatal(err) + } + + err = ks.Put("foo", k3) + if err == nil { + t.Fatal("should not be able to overwrite key") + } + + if err := assertDirContents(tdir, []string{"foo", "bar"}); err != nil { + t.Fatal(err) + } + + exist, err := ks.Has("foo") + if !exist { + t.Fatal("should know it has a key named foo") + } + if err != nil { + t.Fatal(err) + } + + exist, err = ks.Has("nonexistingkey") + if exist { + t.Fatal("should know it doesn't have a key named nonexistingkey") + } + if err != nil { + t.Fatal(err) + } + + if err := ks.Delete("bar"); err != nil { + t.Fatal(err) + } + + if err := assertDirContents(tdir, []string{"foo"}); err != nil { + t.Fatal(err) + } + + if err := ks.Put("beep", k3); err != nil { + t.Fatal(err) + } + + if err := ks.Put("boop", k4); err != nil { + t.Fatal(err) + } + + if err := assertDirContents(tdir, []string{"foo", "beep", "boop"}); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "foo", k1); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "beep", k3); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "boop", k4); err != nil { + t.Fatal(err) + } + + if err := ks.Put("..///foo/", k1); err == nil { + t.Fatal("shouldnt be able to put a poorly named key") + } + + if err := ks.Put("", k1); err == nil { + t.Fatal("shouldnt be able to put a key with no name") + } + + if err := ks.Put(".foo", k1); err == nil { + t.Fatal("shouldnt be able to put a key with a 'hidden' name") + } +} + +func TestInvalidKeyFiles(t *testing.T) { + tf.UnitTest(t) + + tdir := t.TempDir() + + ks, err := NewFSKeystore(tdir) + if err != nil { + t.Fatal(err) + } + + bytes := privKeyOrFatal(t) + + err = os.WriteFile(filepath.Join(ks.dir, "valid"), bytes, 0o644) + if err != nil { + t.Fatal(err) + } + + err = os.WriteFile(filepath.Join(ks.dir, ".invalid"), bytes, 0o644) + if err != nil { + t.Fatal(err) + } + + l, err := ks.List() + if err != nil { + t.Fatal(err) + } + + sort.Strings(l) + if len(l) != 1 { + t.Fatal("wrong entry count") + } + + if l[0] != "valid" { + t.Fatal("wrong entries listed") + } + + exist, err := ks.Has("valid") + if err != nil { + t.Fatal(err) + } + if !exist { + t.Fatal("should know it has a key named valid") + } + + if _, err = ks.Has(".invalid"); err == nil { + t.Fatal("shouldnt be able to put a key with a 'hidden' name") + } +} + +func TestNonExistingKey(t *testing.T) { + tf.UnitTest(t) + + tdir := t.TempDir() + + ks, err := NewFSKeystore(tdir) + if err != nil { + t.Fatal(err) + } + + k, err := ks.Get("does-it-exist") + if err != ErrNoSuchKey { + t.Fatalf("expected: %s, got %s", ErrNoSuchKey, err) + } + if k != nil { + t.Fatalf("Get on nonexistant key should give nil") + } +} + +func TestMakeKeystoreNoDir(t *testing.T) { + tf.UnitTest(t) + + _, err := NewFSKeystore("/this/is/not/a/real/dir") + if err == nil { + t.Fatal("shouldnt be able to make a keystore in a nonexistant directory") + } +} + +type rr struct{} + +func (rr rr) Read(b []byte) (int, error) { + return rand.Read(b) +} + +func privKeyOrFatal(t *testing.T) []byte { + priv, _, err := crypto.GenerateEd25519Key(rr{}) + if err != nil { + t.Fatal(err) + } + + kbytes, err := crypto.MarshalPrivateKey(priv) + if err != nil { + t.Fatal(err) + } + + return kbytes +} + +func assertGetKey(ks Keystore, name string, exp []byte) error { + outK, err := ks.Get(name) + if err != nil { + return err + } + + if !bytes.Equal(outK, exp) { + return fmt.Errorf("key we got out didnt match expectation") + } + + return nil +} diff --git a/pkg/repo/fskeystore/keystore.go b/pkg/repo/fskeystore/keystore.go new file mode 100644 index 0000000000..0404bacbc1 --- /dev/null +++ b/pkg/repo/fskeystore/keystore.go @@ -0,0 +1,16 @@ +package fskeystore + +// Keystore provides a key management interface +type Keystore interface { + // Has returns whether or not a key exist in the Keystore + Has(string) (bool, error) + // Put stores a key in the Keystore, if a key with the same name already exists, returns ErrKeyExists + Put(string, []byte) error + // Get retrieves a key from the Keystore if it exists, and returns ErrNoSuchKey + // otherwise. + Get(string) ([]byte, error) + // Delete removes a key from the Keystore + Delete(string) error + // List returns a list of key identifier + List() ([]string, error) +} diff --git a/pkg/repo/fskeystore/memkeystore.go b/pkg/repo/fskeystore/memkeystore.go new file mode 100644 index 0000000000..ca342fce50 --- /dev/null +++ b/pkg/repo/fskeystore/memkeystore.go @@ -0,0 +1,81 @@ +package fskeystore + +type keyMap map[string][]byte + +// MemKeystore is a keystore backed by an in-memory map +type MemKeystore struct { + values keyMap +} + +// NewMemKeystore returns a new map-based keystore +func NewMemKeystore() *MemKeystore { + return &MemKeystore{ + values: keyMap{}, + } +} + +// Has returns whether or not a key exists in the keystore map +func (mk *MemKeystore) Has(k string) (bool, error) { + if err := validateName(k); err != nil { + return false, err + } + _, found := mk.values[k] + return found, nil +} + +// Put stores a key in the map, if a key with the same name already exists, +// returns ErrKeyExists +func (mk *MemKeystore) Put(k string, b []byte) error { + if err := validateName(k); err != nil { + return err + } + + _, found := mk.values[k] + if found { + return ErrKeyExists + } + + mk.values[k] = b + return nil +} + +// Get retrieves a key from the map if it exists, else it returns ErrNoSuchKey +func (mk *MemKeystore) Get(k string) ([]byte, error) { + if err := validateName(k); err != nil { + return nil, err + } + + v, found := mk.values[k] + if !found { + return nil, ErrNoSuchKey + } + + return v, nil +} + +// Delete removes a key from the map +func (mk *MemKeystore) Delete(k string) error { + if err := validateName(k); err != nil { + return err + } + if _, found := mk.values[k]; !found { + return ErrNoSuchKey + } + + delete(mk.values, k) + return nil +} + +// List returns a list of key identifiers in random order +func (mk *MemKeystore) List() ([]string, error) { + out := make([]string, 0, len(mk.values)) + for k := range mk.values { + err := validateName(k) + if err == nil { + out = append(out, k) + } else { + log.Warningf("ignoring the invalid keyfile: %s", k) + } + } + return out, nil +} diff --git a/pkg/repo/fskeystore/memkeystore_test.go b/pkg/repo/fskeystore/memkeystore_test.go new file mode 100644 index 0000000000..f3b989dd07 --- /dev/null +++ b/pkg/repo/fskeystore/memkeystore_test.go @@ -0,0 +1,103 @@ +package fskeystore + +import ( + "sort" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestMemKeyStoreBasics(t *testing.T) { + tf.UnitTest(t) + + ks := NewMemKeystore() + + l, err := ks.List() + if err != nil { + t.Fatal(err) + } + + if len(l) != 0 { + t.Fatal("expected no keys") + } + + k1 := privKeyOrFatal(t) + k2 := privKeyOrFatal(t) + k3 := privKeyOrFatal(t) + k4 := privKeyOrFatal(t) + + err = ks.Put("foo", k1) + if err != nil { + t.Fatal(err) + } + + err = ks.Put("bar", k2) + if err != nil { + t.Fatal(err) + } + + l, err = ks.List() + if err != nil { + t.Fatal(err) + } + + sort.Strings(l) + if l[0] != "bar" || l[1] != "foo" { + t.Fatal("wrong entries listed") + } + + err = ks.Put("foo", k3) + if err == nil { + t.Fatal("should not be able to overwrite key") + } + + exist, err := ks.Has("foo") + if !exist { + t.Fatal("should know it has a key named foo") + } + if err != nil { + t.Fatal(err) + } + + exist, err = ks.Has("nonexistingkey") + if exist { + t.Fatal("should know it doesn't have a key named nonexistingkey") + } + if err != nil { + t.Fatal(err) + } + + if err := ks.Delete("bar"); err != nil { + t.Fatal(err) + } + if err := ks.Put("beep", k3); err != nil { + t.Fatal(err) + } + + if err := ks.Put("boop", k4); err != nil { + t.Fatal(err) + } + if err := assertGetKey(ks, "foo", k1); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "beep", k3); err != nil { + t.Fatal(err) + } + + if err := assertGetKey(ks, "boop", k4); err != nil { + t.Fatal(err) + } + + if err := ks.Put("..///foo/", k1); err == nil { + t.Fatal("shouldnt be able to put a poorly named key") + } + + if err := ks.Put("", k1); err == nil { + t.Fatal("shouldnt be able to put a key with no name") + } + + if err := ks.Put(".foo", k1); err == nil { + t.Fatal("shouldnt be able to put a key with a 'hidden' name") + } +} diff --git a/pkg/repo/fskeystore/sync.go b/pkg/repo/fskeystore/sync.go new file mode 100644 index 0000000000..ccbbd53a59 --- /dev/null +++ b/pkg/repo/fskeystore/sync.go @@ -0,0 +1,59 @@ +package fskeystore + +import ( + "sync" +) + +// MutexKeystore contains a child keystore and a mutex. +// used for coarse sync +type MutexKeystore struct { + sync.RWMutex + + child Keystore +} + +// MutexWrap constructs a keystore with a coarse lock around +// the entire keystore, for every single operation +func MutexWrap(k Keystore) *MutexKeystore { + return &MutexKeystore{child: k} +} + +// Children implements Shim +func (mk *MutexKeystore) Children() []Keystore { + return []Keystore{mk.child} +} + +// Put implements Keystore.Put +func (mk *MutexKeystore) Put(k string, data []byte) error { + mk.Lock() + defer mk.Unlock() + return mk.child.Put(k, data) +} + +// Get implements Keystore.Get +func (mk *MutexKeystore) Get(k string) ([]byte, error) { + mk.RLock() + defer mk.RUnlock() + return mk.child.Get(k) +} + +// Has implements Keystore.Has +func (mk *MutexKeystore) Has(k string) (bool, error) { + mk.RLock() + defer mk.RUnlock() + return mk.child.Has(k) +} + +// Delete implements Keystore.Delete +func (mk *MutexKeystore) Delete(k string) error { + mk.Lock() + defer mk.Unlock() + return mk.child.Delete(k) +} + +// List implements Keystore.List +func (mk *MutexKeystore) List() ([]string, error) { + mk.RLock() + defer mk.RUnlock() + return mk.child.List() +} diff --git a/pkg/repo/fsrepo.go b/pkg/repo/fsrepo.go new file mode 100644 index 0000000000..a78376aa93 --- /dev/null +++ b/pkg/repo/fsrepo.go @@ -0,0 +1,695 @@ +package repo + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/filecoin-project/venus/pkg/repo/fskeystore" + + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + bstore "github.com/ipfs/go-ipfs-blockstore" + + badgerds "github.com/ipfs/go-ds-badger2" + lockfile "github.com/ipfs/go-fs-lock" + logging "github.com/ipfs/go-log/v2" + "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/config" +) + +// Version is the version of repo schema that this code understands. +const LatestVersion uint = 10 + +const ( + // apiFile is the filename containing the filecoin node's api address. + apiToken = "token" + apiFile = "api" + configFilename = "config.json" + tempConfigFilename = ".config.json.temp" + lockFile = "repo.lock" + versionFilename = "version" + walletDatastorePrefix = "wallet" + chainDatastorePrefix = "chain" + metaDatastorePrefix = "metadata" + paychDatastorePrefix = "paych" + snapshotStorePrefix = "snapshots" + snapshotFilenamePrefix = "snapshot" + dataTransfer = "data-transfer" +) + +var log = logging.Logger("repo") + +// FSRepo is a repo implementation backed by a filesystem. +type FSRepo struct { + // Path to the repo root directory. + path string + version uint + + // lk protects the config file + lk sync.RWMutex + cfg *config.Config + + ds *blockstoreutil.BadgerBlockstore + keystore fskeystore.Keystore + walletDs Datastore + chainDs Datastore + metaDs Datastore + // marketDs Datastore + paychDs Datastore + // lockfile is the file system lock to prevent others from opening the same repo. + lockfile io.Closer +} + +var _ Repo = (*FSRepo)(nil) + +// InitFSRepo initializes a new repo at the target path with the provided configuration. +// The successful result creates a symlink at targetPath pointing to a sibling directory +// named with a timestamp and repo version number. +// The link path must be empty prior. If the computed actual directory exists, it must be empty. +func InitFSRepo(targetPath string, version uint, cfg *config.Config) error { + repoPath, err := homedir.Expand(targetPath) + if err != nil { + return err + } + + if repoPath == "" { // path contained no separator + repoPath = "./" + } + + exists, err := fileExists(repoPath) + if err != nil { + return errors.Wrapf(err, "error inspecting repo path %s", repoPath) + } else if exists { + return errors.Errorf("repo at %s, file exists", repoPath) + } + + // Create the actual directory and then the link to it. + return InitFSRepoDirect(repoPath, version, cfg) +} + +// InitFSRepoDirect initializes a new repo at a target path, establishing a provided configuration. +// The target path must not exist, or must reference an empty, read/writable directory. +func InitFSRepoDirect(targetPath string, version uint, cfg *config.Config) error { + repoPath, err := homedir.Expand(targetPath) + if err != nil { + return err + } + + if err := ensureWritableDirectory(repoPath); err != nil { + return errors.Wrap(err, "no writable directory") + } + + empty, err := isEmptyDir(repoPath) + if err != nil { + return errors.Wrapf(err, "failed to list repo directory %s", repoPath) + } + if !empty { + return fmt.Errorf("refusing to initialize repo in non-empty directory %s", repoPath) + } + + if err := WriteVersion(repoPath, version); err != nil { + return errors.Wrap(err, "initializing repo version failed") + } + + if err := initConfig(repoPath, cfg); err != nil { + return errors.Wrap(err, "initializing config file failed") + } + if err := initDataTransfer(repoPath); err != nil { + return errors.Wrap(err, "initializing data-transfer directory failed") + } + return nil +} + +func Exists(repoPath string) (bool, error) { + _, err := os.Stat(filepath.Join(repoPath, walletDatastorePrefix)) + notExist := os.IsNotExist(err) + if notExist { + err = nil + + _, err = os.Stat(filepath.Join(repoPath, configFilename)) + notExist = os.IsNotExist(err) + if notExist { + err = nil + } + } + return !notExist, err +} + +// OpenFSRepo opens an initialized fsrepo, expecting a specific version. +// The provided path may be to a directory, or a symbolic link pointing at a directory, which +// will be resolved just once at open. +func OpenFSRepo(repoPath string, version uint) (*FSRepo, error) { + repoPath, err := homedir.Expand(repoPath) + if err != nil { + return nil, err + } + + hasConfig, err := hasConfig(repoPath) + if err != nil { + return nil, errors.Wrap(err, "failed to check for repo config") + } + + if !hasConfig { + return nil, errors.Errorf("no config found at %s", repoPath) + } + + info, err := os.Stat(repoPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to stat repo link %s", repoPath) + } + + // Resolve path if it's a symlink. + var actualPath string + if info.IsDir() { + actualPath = repoPath + } else { + actualPath, err = os.Readlink(repoPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to follow repo symlink %s", repoPath) + } + } + + r := &FSRepo{path: actualPath, version: version} + + r.lockfile, err = lockfile.Lock(r.path, lockFile) + if err != nil { + return nil, errors.Wrap(err, "failed to take repo lock") + } + + if err := r.loadFromDisk(); err != nil { + _ = r.lockfile.Close() + return nil, err + } + + return r, nil +} + +// MakeRepoDirName constructs a name for a concrete repo directory, which includes its +// version number and a timestamp. The name will begin with prefix and, if uniqueifier is +// non-zero, end with that (intended as an ordinal for finding a free name). +// E.g. ".filecoin-20190102-140425-012-1 +// This is exported for use by migrations. +func MakeRepoDirName(prefix string, ts time.Time, version uint, uniqueifier uint) string { + name := strings.Join([]string{ + prefix, + ts.Format("20060102-150405"), + fmt.Sprintf("v%03d", version), + }, "-") + if uniqueifier != 0 { + name = name + fmt.Sprintf("-%d", uniqueifier) + } + return name +} + +func (r *FSRepo) loadFromDisk() error { + localVersion, err := r.readVersion() + if err != nil { + return errors.Wrap(err, "failed to read version") + } + + if localVersion > r.version { + return fmt.Errorf("binary needs update to handle repo version, got %d expected %d. Update binary to latest release", localVersion, LatestVersion) + } + + if err := r.loadConfig(); err != nil { + return errors.Wrap(err, "failed to load config file") + } + + if err := r.openDatastore(); err != nil { + return errors.Wrap(err, "failed to open datastore") + } + + if err := r.openKeystore(); err != nil { + return errors.Wrap(err, "failed to open keystore") + } + + if err := r.openWalletDatastore(); err != nil { + return errors.Wrap(err, "failed to open wallet datastore") + } + + if err := r.openChainDatastore(); err != nil { + return errors.Wrap(err, "failed to open chain datastore") + } + + if err := r.openMetaDatastore(); err != nil { + return errors.Wrap(err, "failed to open metadata datastore") + } + + if err := r.openPaychDataStore(); err != nil { + return errors.Wrap(err, "failed to open paych datastore") + } + + return nil +} + +// configModule returns the configuration object. +func (r *FSRepo) Config() *config.Config { + r.lk.RLock() + defer r.lk.RUnlock() + + return r.cfg +} + +// ReplaceConfig replaces the current config with the newly passed in one. +func (r *FSRepo) ReplaceConfig(cfg *config.Config) error { + if err := r.SnapshotConfig(r.Config()); err != nil { + log.Warnf("failed to create snapshot: %s", err.Error()) + } + r.lk.Lock() + defer r.lk.Unlock() + + r.cfg = cfg + tmp := filepath.Join(r.path, tempConfigFilename) + err := os.RemoveAll(tmp) + if err != nil { + return err + } + err = r.cfg.WriteFile(tmp) + if err != nil { + return err + } + return os.Rename(tmp, filepath.Join(r.path, configFilename)) +} + +// SnapshotConfig stores a copy `cfg` in /snapshots/ appending the +// time of snapshot to the filename. +func (r *FSRepo) SnapshotConfig(cfg *config.Config) error { + snapshotFile := filepath.Join(r.path, snapshotStorePrefix, genSnapshotFileName()) + exists, err := fileExists(snapshotFile) + if err != nil { + return errors.Wrap(err, "error checking snapshot file") + } else if exists { + // this should never happen + return fmt.Errorf("file already exists: %s", snapshotFile) + } + return cfg.WriteFile(snapshotFile) +} + +// Datastore returns the datastore. +func (r *FSRepo) Datastore() blockstoreutil.Blockstore { + return r.ds +} + +// WalletDatastore returns the wallet datastore. +func (r *FSRepo) WalletDatastore() Datastore { + return r.walletDs +} + +// ChainDatastore returns the chain datastore. +func (r *FSRepo) ChainDatastore() Datastore { + return r.chainDs +} + +func (r *FSRepo) MetaDatastore() Datastore { + return r.metaDs +} + +/*func (r *FSRepo) MarketDatastore() Datastore { + return r.marketDs +}*/ + +func (r *FSRepo) PaychDatastore() Datastore { + return r.paychDs +} + +// Version returns the version of the repo +func (r *FSRepo) Version() uint { + return r.version +} + +// Keystore returns the keystore +func (r *FSRepo) Keystore() fskeystore.Keystore { + return r.keystore +} + +// Close closes the repo. +func (r *FSRepo) Close() error { + if err := r.ds.Close(); err != nil { + return errors.Wrap(err, "failed to close datastore") + } + + if err := r.walletDs.Close(); err != nil { + return errors.Wrap(err, "failed to close wallet datastore") + } + + if err := r.chainDs.Close(); err != nil { + return errors.Wrap(err, "failed to close chain datastore") + } + + if err := r.metaDs.Close(); err != nil { + return errors.Wrap(err, "failed to close meta datastore") + } + + if err := r.paychDs.Close(); err != nil { + return errors.Wrap(err, "failed to close paych datastore") + } + + /*if err := r.marketDs.Close(); err != nil { + return errors.Wrap(err, "failed to close market datastore") + }*/ + + if err := r.removeAPIFile(); err != nil { + return errors.Wrap(err, "error removing API file") + } + + return r.lockfile.Close() +} + +func (r *FSRepo) removeFile(path string) error { + if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func (r *FSRepo) removeAPIFile() error { + return r.removeFile(filepath.Join(r.path, apiFile)) +} + +// Tests whether a repo directory contains the expected config file. +func hasConfig(p string) (bool, error) { + configPath := filepath.Join(p, configFilename) + + _, err := os.Lstat(configPath) + switch { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, err + } +} + +func (r *FSRepo) loadConfig() error { + configFile := filepath.Join(r.path, configFilename) + + cfg, err := config.ReadFile(configFile) + if err != nil { + return errors.Wrapf(err, "failed to read config file at %q", configFile) + } + + r.cfg = cfg + return nil +} + +// readVersion reads the repo's version file (but does not change r.version). +func (r *FSRepo) readVersion() (uint, error) { + return ReadVersion(r.path) +} + +func (r *FSRepo) openDatastore() error { + switch r.cfg.Datastore.Type { + case "badgerds": + path := filepath.Join(r.path, r.cfg.Datastore.Path) + opts, err := blockstoreutil.BadgerBlockstoreOptions(path, false) + if err != nil { + return err + } + opts.Prefix = bstore.BlockPrefix.String() + ds, err := blockstoreutil.Open(opts) + if err != nil { + return err + } + r.ds = ds + default: + return fmt.Errorf("unknown datastore type in config: %s", r.cfg.Datastore.Type) + } + + return nil +} + +func (r *FSRepo) openKeystore() error { + ksp := filepath.Join(r.path, "keystore") + + ks, err := fskeystore.NewFSKeystore(ksp) + if err != nil { + return err + } + + r.keystore = ks + + return nil +} + +func (r *FSRepo) openChainDatastore() error { + ds, err := badgerds.NewDatastore(filepath.Join(r.path, chainDatastorePrefix), badgerOptions()) + if err != nil { + return err + } + + r.chainDs = ds + + return nil +} + +func (r *FSRepo) openMetaDatastore() error { + ds, err := badgerds.NewDatastore(filepath.Join(r.path, metaDatastorePrefix), badgerOptions()) + if err != nil { + return err + } + + r.metaDs = ds + + return nil +} + +func (r *FSRepo) openPaychDataStore() error { + var err error + r.paychDs, err = badgerds.NewDatastore(filepath.Join(r.path, paychDatastorePrefix), badgerOptions()) + if err != nil { + return err + } + return nil +} + +func (r *FSRepo) openWalletDatastore() error { + // TODO: read wallet datastore info from config, use that to open it up + ds, err := badgerds.NewDatastore(filepath.Join(r.path, walletDatastorePrefix), badgerOptions()) + if err != nil { + return err + } + + r.walletDs = ds + + return nil +} + +// WriteVersion writes the given version to the repo version file. +func WriteVersion(p string, version uint) error { + return os.WriteFile(filepath.Join(p, versionFilename), []byte(strconv.Itoa(int(version))), 0o644) +} + +// ReadVersion returns the unparsed (string) version +// from the version file in the specified repo. +func ReadVersion(repoPath string) (uint, error) { + file, err := os.ReadFile(filepath.Join(repoPath, versionFilename)) + if err != nil { + return 0, err + } + verStr := strings.Trim(string(file), "\n") + version, err := strconv.ParseUint(verStr, 10, 32) + if err != nil { + return 0, err + } + return uint(version), nil +} + +func initConfig(p string, cfg *config.Config) error { + configFile := filepath.Join(p, configFilename) + exists, err := fileExists(configFile) + if err != nil { + return errors.Wrap(err, "error inspecting config file") + } else if exists { + return fmt.Errorf("config file already exists: %s", configFile) + } + + if err := cfg.WriteFile(configFile); err != nil { + return err + } + + // make the snapshot dir + snapshotDir := filepath.Join(p, snapshotStorePrefix) + return ensureWritableDirectory(snapshotDir) +} + +func initDataTransfer(p string) error { + dataTransferDir := filepath.Join(p, dataTransfer) + state, err := os.Stat(dataTransferDir) + if err == nil { + if state.IsDir() { + return nil + } + return errors.New("error must be a directory") + } + if !os.IsNotExist(err) { + return err + } + // create data-transfer state + return os.MkdirAll(dataTransferDir, 0o777) +} + +func genSnapshotFileName() string { + return fmt.Sprintf("%s-%d.json", snapshotFilenamePrefix, time.Now().UTC().UnixNano()) +} + +// Ensures that path points to a read/writable directory, creating it if necessary. +func ensureWritableDirectory(path string) error { + // Attempt to create the requested directory, accepting that something might already be there. + err := os.Mkdir(path, 0o775) + + if err == nil { + return nil // Skip the checks below, we just created it. + } else if !os.IsExist(err) { + return errors.Wrapf(err, "failed to create directory %s", path) + } + + // Inspect existing directory. + stat, err := os.Stat(path) + if err != nil { + return errors.Wrapf(err, "failed to stat path \"%s\"", path) + } + if !stat.IsDir() { + return errors.Errorf("%s is not a directory", path) + } + if (stat.Mode() & 0o600) != 0o600 { + return errors.Errorf("insufficient permissions for path %s, got %04o need %04o", path, stat.Mode(), 0o600) + } + return nil +} + +// Tests whether the directory at path is empty +func isEmptyDir(path string) (bool, error) { + infos, err := os.ReadDir(path) + if err != nil { + return false, err + } + return len(infos) == 0, nil +} + +func fileExists(file string) (bool, error) { + _, err := os.Stat(file) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// SetAPIAddr writes the address to the API file. SetAPIAddr expects parameter +// `port` to be of the form `:`. +func (r *FSRepo) SetAPIAddr(maddr string) error { + f, err := os.Create(filepath.Join(r.path, apiFile)) + if err != nil { + return errors.Wrap(err, "could not create API file") + } + + defer f.Close() // nolint: errcheck + + _, err = f.WriteString(maddr) + if err != nil { + // If we encounter an error writing to the API file, + // delete the API file. The error encountered while + // deleting the API file will be returned (if one + // exists) instead of the write-error. + if err := r.removeAPIFile(); err != nil { + return errors.Wrap(err, "failed to remove API file") + } + + return errors.Wrap(err, "failed to write to API file") + } + + return nil +} + +// Path returns the path the fsrepo is at +func (r *FSRepo) Path() (string, error) { + return r.path, nil +} + +// JournalPath returns the path the journal is at. +func (r *FSRepo) JournalPath() string { + return fmt.Sprintf("%s/journal.json", r.path) +} + +// APIAddrFromRepoPath returns the api addr from the filecoin repo +func APIAddrFromRepoPath(repoPath string) (string, error) { + repoPath, err := homedir.Expand(repoPath) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("can't resolve local repo path %s", repoPath)) + } + return apiAddrFromFile(repoPath) +} + +// APIAddrFromRepoPath returns the token from the filecoin repo +func APITokenFromRepoPath(repoPath string) (string, error) { + repoPath, err := homedir.Expand(repoPath) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("can't resolve local repo path %s", repoPath)) + } + return apiTokenFromFile(repoPath) +} + +// APIAddrFromFile reads the address from the API file at the given path. +// A relevant comment from a similar function at go-ipfs/repo/fsrepo/fsrepo.go: +// This is a concurrent operation, meaning that any process may read this file. +// Modifying this file, therefore, should use "mv" to replace the whole file +// and avoid interleaved read/writes +func apiAddrFromFile(repoPath string) (string, error) { + jsonrpcFile := filepath.Join(repoPath, apiFile) + jsonrpcAPI, err := os.ReadFile(jsonrpcFile) + if err != nil { + return "", errors.Wrap(err, "failed to read API file") + } + + return string(jsonrpcAPI), nil +} + +// apiTokenFromFile reads the token from the token file at the given path. +func apiTokenFromFile(repoPath string) (string, error) { + tokenFile := filepath.Join(repoPath, apiToken) + token, err := os.ReadFile(tokenFile) + if err != nil { + return "", errors.Wrap(err, "failed to read API file") + } + + return strings.TrimSpace(string(token)), nil +} + +// APIAddr reads the FSRepo's api file and returns the api address +func (r *FSRepo) APIAddr() (string, error) { + return apiAddrFromFile(filepath.Clean(r.path)) +} + +func (r *FSRepo) SetAPIToken(token []byte) error { + return os.WriteFile(filepath.Join(r.path, apiToken), token, 0o600) +} + +func (r *FSRepo) APIToken() (string, error) { + tkBuff, err := os.ReadFile(filepath.Join(r.path, apiToken)) + if err != nil { + return "", err + } + return strings.TrimSpace(string(tkBuff)), nil +} + +func badgerOptions() *badgerds.Options { + result := &badgerds.DefaultOptions + result.Truncate = true + result.MaxTableSize = 64 << 21 + return result +} + +func (r *FSRepo) Repo() Repo { + return r +} diff --git a/pkg/repo/fsrepo_test.go b/pkg/repo/fsrepo_test.go new file mode 100644 index 0000000000..e4581fe8cc --- /dev/null +++ b/pkg/repo/fsrepo_test.go @@ -0,0 +1,353 @@ +// stm: #unit +package repo + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "testing" + + ds "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/config" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestInitRepoDirect(t *testing.T) { + tf.UnitTest(t) + cfg := config.NewDefaultConfig() + + // Inits a repo and opens it (ensuring it is openable) + initAndOpenRepoDirect := func(repoPath string, version uint, cfg *config.Config) (*FSRepo, error) { + // stm: @REPO_FSREPO_INIT_DIRECT_001 + if err := InitFSRepoDirect(repoPath, version, cfg); err != nil { + return nil, err + } + return OpenFSRepo(repoPath, version) + } + + t.Run("successfully creates when directory exists", func(t *testing.T) { + dir := t.TempDir() + + _, err := initAndOpenRepoDirect(dir, 42, cfg) + assert.NoError(t, err) + checkNewRepoFiles(t, dir, 42) + }) + + t.Run("successfully creates when directory does not exist", func(t *testing.T) { + dir := filepath.Join(t.TempDir(), "nested") + + _, err := initAndOpenRepoDirect(dir, 42, cfg) + assert.NoError(t, err) + checkNewRepoFiles(t, dir, 42) + }) + + t.Run("fails with error if directory is not writeable", func(t *testing.T) { + // make read only dir + dir := filepath.Join(t.TempDir(), "readonly") + err := os.Mkdir(dir, 0o444) + assert.NoError(t, err) + assert.False(t, ConfigExists(dir)) + + _, err = initAndOpenRepoDirect(dir, 42, cfg) + assert.Contains(t, err.Error(), "permission") + }) + + t.Run("fails with error if directory not empty", func(t *testing.T) { + dir := t.TempDir() + + err := os.WriteFile(filepath.Join(dir, "hi"), []byte("hello"), 0o644) + assert.NoError(t, err) + + _, err = initAndOpenRepoDirect(dir, 42, cfg) + assert.Contains(t, err.Error(), "empty") + }) +} + +func TestFSRepoOpen(t *testing.T) { + tf.UnitTest(t) + + t.Run("[fail] repo version newer than binary", func(t *testing.T) { + repoPath := path.Join(t.TempDir(), "repo") + + // stm: @REPO_FSREPO_INIT_001 + assert.NoError(t, InitFSRepo(repoPath, 1, config.NewDefaultConfig())) + // set wrong version + // stm:@REPO_FSREPO_READ_VERSION_001 + assert.NoError(t, WriteVersion(repoPath, 99)) + + _, err := OpenFSRepo(repoPath, 1) + expected := fmt.Sprintf("binary needs update to handle repo version, got 99 expected %d. Update binary to latest release", LatestVersion) + assert.EqualError(t, err, expected) + }) + + t.Run("[fail] version corrupt", func(t *testing.T) { + repoPath := path.Join(t.TempDir(), "repo") + + assert.NoError(t, InitFSRepo(repoPath, 1, config.NewDefaultConfig())) + // set wrong version + assert.NoError(t, os.WriteFile(filepath.Join(repoPath, versionFilename), []byte("v.8"), 0o644)) + + _, err := OpenFSRepo(repoPath, 1) + assert.EqualError(t, err, "failed to read version: strconv.ParseUint: parsing \"v.8\": invalid syntax") + }) +} + +func TestFSRepoRoundtrip(t *testing.T) { + tf.UnitTest(t) + + cfg := config.NewDefaultConfig() + cfg.API.APIAddress = "foo" // testing that what we get back isnt just the default + + repoPath := path.Join(t.TempDir(), "repo") + assert.NoError(t, InitFSRepo(repoPath, 42, cfg)) + + r, err := OpenFSRepo(repoPath, 42) + assert.NoError(t, err) + + assert.Equal(t, cfg, r.Config()) + assert.NoError(t, r.ChainDatastore().Put(context.Background(), ds.NewKey("beep"), []byte("boop"))) + assert.NoError(t, r.Close()) + + r2, err := OpenFSRepo(repoPath, 42) + assert.NoError(t, err) + + val, err := r2.ChainDatastore().Get(context.Background(), ds.NewKey("beep")) + assert.NoError(t, err) + assert.Equal(t, []byte("boop"), val) + + assert.NoError(t, r2.Close()) +} + +func TestFSRepoReplaceAndSnapshotConfig(t *testing.T) { + tf.UnitTest(t) + + repoPath := path.Join(t.TempDir(), "repo") + + cfg := config.NewDefaultConfig() + cfg.API.APIAddress = "foo" + assert.NoError(t, InitFSRepo(repoPath, 42, cfg)) + + expSnpsht, err := os.ReadFile(filepath.Join(repoPath, configFilename)) + require.NoError(t, err) + + r1, err := OpenFSRepo(repoPath, 42) + assert.NoError(t, err) + + newCfg := config.NewDefaultConfig() + newCfg.API.APIAddress = "bar" + + // stm: @REPO_FSREPO_REPLACE_CONFIG_001, @REPO_FSREPO_SNAPSHOT_CONFIG_001 + assert.NoError(t, r1.ReplaceConfig(newCfg)) + assert.Equal(t, "bar", r1.Config().API.APIAddress) + // stm: REPO_FSREPO_CLOSE_001 + assert.NoError(t, r1.Close()) + + r2, err := OpenFSRepo(repoPath, 42) + assert.NoError(t, err) + assert.Equal(t, "bar", r2.Config().API.APIAddress) + assert.NoError(t, r2.Close()) + + // assert that a single snapshot was created when replacing the config + // get the snapshot file name + snpFiles := getSnapshotFilenames(t, filepath.Join(repoPath, snapshotStorePrefix)) + require.Equal(t, 1, len(snpFiles)) + + snpsht, err := os.ReadFile(filepath.Join(repoPath, snapshotStorePrefix, snpFiles[0])) + require.NoError(t, err) + assert.Equal(t, string(expSnpsht), string(snpsht)) +} + +func TestRepoLock(t *testing.T) { + tf.UnitTest(t) + + repoPath := path.Join(t.TempDir(), "repo") + + cfg := config.NewDefaultConfig() + assert.NoError(t, InitFSRepo(repoPath, 42, cfg)) + + // stm: @REPO_FSREPO_OPEN_REPO_001 + r, err := OpenFSRepo(repoPath, 42) + assert.NoError(t, err) + assert.FileExists(t, filepath.Join(repoPath, lockFile)) + + // stm: @REPO_FSREPO_EXISTS_001 + exist, err := Exists(repoPath) + assert.NoError(t, err) + assert.True(t, exist) + + _, err = OpenFSRepo(repoPath, 42) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to take repo lock") + assert.NoError(t, r.Close()) + + _, err = os.Lstat(filepath.Join(repoPath, lockFile)) + assert.True(t, os.IsNotExist(err)) +} + +func TestRepoLockFail(t *testing.T) { + tf.UnitTest(t) + + repoPath := path.Join(t.TempDir(), "repo") + + cfg := config.NewDefaultConfig() + assert.NoError(t, InitFSRepo(repoPath, 42, cfg)) + + // set invalid version, to make opening the repo fail + assert.NoError(t, + os.WriteFile(filepath.Join(repoPath, versionFilename), []byte("hello"), 0o644), + ) + + _, err := OpenFSRepo(repoPath, 42) + assert.Error(t, err) + + _, err = os.Lstat(filepath.Join(repoPath, lockFile)) + assert.True(t, os.IsNotExist(err)) +} + +func TestRepoAPIFile(t *testing.T) { + tf.UnitTest(t) + + t.Run("APIAddr returns last value written to API file", func(t *testing.T) { + withFSRepo(t, func(r *FSRepo) { + // stm: @REPO_FSREPO_SET_API_ADDRESS + mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") + + rpcAPI := mustGetAPIAddr(t, r) + assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", rpcAPI) + + mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/4567") + + rpcAPI = mustGetAPIAddr(t, r) + assert.Equal(t, "/ip4/127.0.0.1/tcp/4567", rpcAPI) + }) + }) + + t.Run("SetAPIAddr is idempotent", func(t *testing.T) { + withFSRepo(t, func(r *FSRepo) { + mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") + + rpcAPI := mustGetAPIAddr(t, r) + assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", rpcAPI) + + mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") + + rpcAPI = mustGetAPIAddr(t, r) + assert.Equal(t, "/ip4/127.0.0.1/tcp/1234", rpcAPI) + }) + }) + + t.Run("APIAddr fails if called before SetAPIAddr", func(t *testing.T) { + withFSRepo(t, func(r *FSRepo) { + addr, err := r.APIAddr() + assert.Error(t, err) + assert.Equal(t, "", addr) + }) + }) + + t.Run("Close deletes API file", func(t *testing.T) { + withFSRepo(t, func(r *FSRepo) { + mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") + + info, err := os.Stat(filepath.Join(r.path, apiFile)) + assert.NoError(t, err) + assert.Equal(t, apiFile, info.Name()) + + require.NoError(t, r.Close()) + + _, err = os.Stat(filepath.Join(r.path, apiFile)) + assert.Error(t, err) + }) + }) + + t.Run("Close will succeed in spite of missing API file", func(t *testing.T) { + withFSRepo(t, func(r *FSRepo) { + mustSetAPIAddr(t, r, "/ip4/127.0.0.1/tcp/1234") + + err := os.Remove(filepath.Join(r.path, apiFile)) + assert.NoError(t, err) + + assert.NoError(t, r.Close()) + }) + }) + + t.Run("SetAPI fails if unable to create API file", func(t *testing.T) { + withFSRepo(t, func(r *FSRepo) { + // create a file with permission bits that prevent us from truncating + err := os.WriteFile(filepath.Join(r.path, apiFile), []byte("/ip4/127.0.0.1/tcp/9999"), 0o000) + assert.NoError(t, err) + + // try to os.Create to same path - will see a failure + err = r.SetAPIAddr("/ip4/127.0.0.1/tcp/1234") + assert.Error(t, err) + }) + }) +} + +func checkNewRepoFiles(t *testing.T, path string, version uint) { + content, err := os.ReadFile(filepath.Join(path, configFilename)) + assert.NoError(t, err) + + t.Log("snapshot path was created during FSRepo Init") + exists, err := fileExists(filepath.Join(path, snapshotStorePrefix)) + assert.NoError(t, err) + assert.True(t, exists) + + // Asserting the exact content here is gonna get old real quick + t.Log("config file matches expected value") + config.SanityCheck(t, string(content)) + + actualVersion, err := os.ReadFile(filepath.Join(path, versionFilename)) + assert.NoError(t, err) + assert.Equal(t, strconv.FormatUint(uint64(version), 10), string(actualVersion)) +} + +func getSnapshotFilenames(t *testing.T, dir string) []string { + files, err := os.ReadDir(dir) + require.NoError(t, err) + + var snpFiles []string + for _, f := range files { + if strings.Contains(f.Name(), "snapshot") { + snpFiles = append(snpFiles, f.Name()) + } + } + return snpFiles +} + +func withFSRepo(t *testing.T, f func(*FSRepo)) { + dir := t.TempDir() + + cfg := config.NewDefaultConfig() + require.NoError(t, InitFSRepoDirect(dir, 42, cfg)) + + r, err := OpenFSRepo(dir, 42) + require.NoError(t, err) + + f(r) +} + +func mustGetAPIAddr(t *testing.T, r *FSRepo) string { + rpcAddr, err := r.APIAddr() + require.NoError(t, err) + + return rpcAddr +} + +func mustSetAPIAddr(t *testing.T, r *FSRepo, addr string) { + require.NoError(t, r.SetAPIAddr(addr)) +} + +func ConfigExists(dir string) bool { + _, err := os.Stat(filepath.Join(dir, "config.json")) + if os.IsNotExist(err) { + return false + } + return err == nil +} diff --git a/pkg/repo/mem.go b/pkg/repo/mem.go new file mode 100644 index 0000000000..8e41978683 --- /dev/null +++ b/pkg/repo/mem.go @@ -0,0 +1,157 @@ +package repo + +import ( + "errors" + "sync" + + "github.com/filecoin-project/venus/pkg/repo/fskeystore" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + + "github.com/ipfs/go-datastore" + dss "github.com/ipfs/go-datastore/sync" + + "github.com/filecoin-project/venus/app/paths" + "github.com/filecoin-project/venus/pkg/config" +) + +// MemRepo is an in-memory implementation of the repo interface. +type MemRepo struct { + // lk guards the config + lk sync.RWMutex + C *config.Config + D blockstoreutil.Blockstore + Ks fskeystore.Keystore + W Datastore + Chain Datastore + Meta Datastore + Paych Datastore + // Market Datastore + version uint + apiAddress string + token []byte +} + +var _ Repo = (*MemRepo)(nil) + +// NewInMemoryRepo makes a new instance of MemRepo +func NewInMemoryRepo() *MemRepo { + defConfig := config.NewDefaultConfig() + // Reduce the time it takes to encrypt wallet password, default ScryptN is 1 << 21 + // for test + defConfig.Wallet.PassphraseConfig = config.TestPassphraseConfig() + return &MemRepo{ + C: defConfig, + D: blockstoreutil.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())), + Ks: fskeystore.MutexWrap(fskeystore.NewMemKeystore()), + W: dss.MutexWrap(datastore.NewMapDatastore()), + Chain: dss.MutexWrap(datastore.NewMapDatastore()), + Meta: dss.MutexWrap(datastore.NewMapDatastore()), + Paych: dss.MutexWrap(datastore.NewMapDatastore()), + // Market: dss.MutexWrap(datastore.NewMapDatastore()), + version: LatestVersion, + } +} + +// configModule returns the configuration object. +func (mr *MemRepo) Config() *config.Config { + mr.lk.RLock() + defer mr.lk.RUnlock() + + return mr.C +} + +// ReplaceConfig replaces the current config with the newly passed in one. +func (mr *MemRepo) ReplaceConfig(cfg *config.Config) error { + mr.lk.Lock() + defer mr.lk.Unlock() + + mr.C = cfg + + return nil +} + +// Datastore returns the datastore. +func (mr *MemRepo) Datastore() blockstoreutil.Blockstore { + return mr.D +} + +// Keystore returns the keystore. +func (mr *MemRepo) Keystore() fskeystore.Keystore { + return mr.Ks +} + +// WalletDatastore returns the wallet datastore. +func (mr *MemRepo) WalletDatastore() Datastore { + return mr.W +} + +// ChainDatastore returns the chain datastore. +func (mr *MemRepo) ChainDatastore() Datastore { + return mr.Chain +} + +// ChainDatastore returns the chain datastore. +func (mr *MemRepo) PaychDatastore() Datastore { + return mr.Paych +} + +/*// ChainDatastore returns the chain datastore. +func (mr *MemRepo) MarketDatastore() Datastore { + return mr.Market +} +*/ +// ChainDatastore returns the chain datastore. +func (mr *MemRepo) MetaDatastore() Datastore { + return mr.Meta +} + +// Version returns the version of the repo. +func (mr *MemRepo) Version() uint { + return mr.version +} + +// Close deletes the temporary directories which hold staged piece data and +// sealed sectors. +func (mr *MemRepo) Close() error { + return nil +} + +// SetAPIAddr writes the address of the running API to memory. +func (mr *MemRepo) SetAPIAddr(addr string) error { + mr.apiAddress = addr + return nil +} + +// APIAddr reads the address of the running API from memory. +func (mr *MemRepo) APIAddr() (string, error) { + return mr.apiAddress, nil +} + +func (mr *MemRepo) SetAPIToken(token []byte) error { + if len(mr.token) == 0 { + mr.token = token + } + return nil +} + +func (mr *MemRepo) APIToken() (string, error) { + if len(mr.token) == 0 { + return "", errors.New("token not exists") + } + return string(mr.token), nil +} + +// Path returns the default path. +func (mr *MemRepo) Path() (string, error) { + return paths.GetRepoPath("") +} + +// JournalPath returns a string to satisfy the repo interface. +func (mr *MemRepo) JournalPath() string { + return "in_memory_filecoin_journal_path" +} + +// repo return the repo +func (mr *MemRepo) Repo() Repo { + return mr +} diff --git a/pkg/repo/repo.go b/pkg/repo/repo.go new file mode 100644 index 0000000000..6cd625fbff --- /dev/null +++ b/pkg/repo/repo.go @@ -0,0 +1,62 @@ +package repo + +import ( + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/repo/fskeystore" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/ipfs/go-datastore" +) + +// Datastore is the datastore interface provided by the repo +type Datastore datastore.Batching + +// repo is a representation of all persistent data in a filecoin node. +type Repo interface { + Config() *config.Config + // ReplaceConfig replaces the current config, with the newly passed in one. + ReplaceConfig(cfg *config.Config) error + + // Datastore is a general storage solution for things like blocks. + Datastore() blockstoreutil.Blockstore + + Keystore() fskeystore.Keystore + + // WalletDatastore is a specific storage solution, only used to store sensitive wallet information. + WalletDatastore() Datastore + + // ChainDatastore is a specific storage solution, only used to store already validated chain data. + ChainDatastore() Datastore + + // MetaDatastore is a specific storage solution, only used to store mpool data. + MetaDatastore() Datastore + + // MarketDatastore() Datastore + + PaychDatastore() Datastore + // SetJsonrpcAPIAddr sets the address of the running jsonrpc API. + SetAPIAddr(maddr string) error + + // APIAddr returns the address of the running API. + APIAddr() (string, error) + + // SetAPIToken set api token + SetAPIToken(token []byte) error + + // APIToken get api token + APIToken() (string, error) + + // Version returns the current repo version. + Version() uint + + // Path returns the repo path. + Path() (string, error) + + // JournalPath returns the journal path. + JournalPath() string + + // Close shuts down the repo. + Close() error + + // repo return the repo + Repo() Repo +} diff --git a/pkg/repo/testing.go b/pkg/repo/testing.go new file mode 100644 index 0000000000..b8c7093427 --- /dev/null +++ b/pkg/repo/testing.go @@ -0,0 +1,31 @@ +package repo + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +// RequireMakeTempDir ensures that a temporary directory is created +func RequireMakeTempDir(t *testing.T, dirname string) string { + newdir, err := os.MkdirTemp("", dirname) + require.NoError(t, err) + return newdir +} + +// RequireOpenTempFile is a shortcut for opening a given temp file with a given +// suffix, then returning both a filename and a file pointer. +func RequireOpenTempFile(t *testing.T, suffix string) (*os.File, string) { + file, err := os.CreateTemp("", suffix) + require.NoError(t, err) + name := file.Name() + return file, name +} + +// RequireReadLink reads a symlink that is expected to resolve successfully. +func RequireReadLink(t *testing.T, path string) string { + target, err := os.Readlink(path) + require.NoError(t, err) + return target +} diff --git a/pkg/state/power_table_view.go b/pkg/state/power_table_view.go new file mode 100644 index 0000000000..07d73892db --- /dev/null +++ b/pkg/state/power_table_view.go @@ -0,0 +1,90 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" +) + +// PowerStateView is a view of chain state for election computations, typically at some lookback from the +// immediate parent state. +// This type isn't doing much that the state view doesn't already do, consider removing it. +type PowerStateView interface { + ResolveToKeyAddr(ctx context.Context, maddr address.Address) (address.Address, error) + GetMinerWorkerRaw(ctx context.Context, maddr address.Address) (address.Address, error) + MinerInfo(ctx context.Context, maddr address.Address, nv network.Version) (*miner.MinerInfo, error) + MinerSectorInfo(ctx context.Context, maddr address.Address, sectorNum abi.SectorNumber) (*miner.SectorOnChainInfo, error) + PowerNetworkTotal(ctx context.Context) (*NetworkPower, error) + MinerClaimedPower(ctx context.Context, miner address.Address) (raw, qa abi.StoragePower, err error) + GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, maddr address.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) +} + +type NetworkPower struct { + RawBytePower abi.StoragePower + QualityAdjustedPower abi.StoragePower + MinerCount int64 + MinPowerMinerCount int64 +} + +// FaultStateView is a view of chain state for adjustment of miner power claims based on changes since the +// power state's lookback (primarily, the miner ceasing to be registered). +type FaultStateView interface { + MinerExists(ctx context.Context, maddr address.Address) (bool, error) +} + +// An interface to the network power table for elections. +// Elections use the quality-adjusted power, rather than raw byte power. +type PowerTableView struct { + state PowerStateView + faultState FaultStateView +} + +func NewPowerTableView(state PowerStateView, faultState FaultStateView) PowerTableView { + return PowerTableView{ + state: state, + faultState: faultState, + } +} + +// Returns the network's total quality-adjusted power. +func (v PowerTableView) NetworkTotalPower(ctx context.Context) (abi.StoragePower, error) { + total, err := v.state.PowerNetworkTotal(ctx) + if err != nil { + return big.Zero(), err + } + return total.QualityAdjustedPower, nil +} + +// Returns a miner's claimed quality-adjusted power. +func (v PowerTableView) MinerClaimedPower(ctx context.Context, mAddr address.Address) (abi.StoragePower, error) { + _, qa, err := v.state.MinerClaimedPower(ctx, mAddr) + if err != nil { + return big.Zero(), err + } + // Only return claim if fault state still tracks miner + exists, err := v.faultState.MinerExists(ctx, mAddr) + if err != nil { + return big.Zero(), err + } + if !exists { // miner was slashed + return big.Zero(), nil + } + return qa, nil +} + +// WorkerAddr returns the worker address for a miner actor. +func (v PowerTableView) WorkerAddr(ctx context.Context, mAddr address.Address, nv network.Version) (address.Address, error) { + minerInfo, err := v.state.MinerInfo(ctx, mAddr, nv) + return minerInfo.Worker, err +} + +// SignerAddress returns the public key address associated with the given address. +func (v PowerTableView) SignerAddress(ctx context.Context, addr address.Address) (address.Address, error) { + return v.state.ResolveToKeyAddr(ctx, addr) +} diff --git a/pkg/state/power_table_view_test.go b/pkg/state/power_table_view_test.go new file mode 100644 index 0000000000..737b2d77c7 --- /dev/null +++ b/pkg/state/power_table_view_test.go @@ -0,0 +1,145 @@ +package state_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/repo" + "github.com/filecoin-project/venus/pkg/state" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + gengen "github.com/filecoin-project/venus/tools/gengen/util" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func TestTotal(t *testing.T) { + // todo think a way to mock power directly + t.Skipf("skip it due to cant mock power directly ") + tf.UnitTest(t) + + ctx := context.Background() + numCommittedSectors := uint64(19) + numMiners := 3 + kis := testhelpers.MustGenerateBLSKeyInfo(numMiners, 0) + + cst, _, root := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) + + table := state.NewPowerTableView(state.NewView(cst, root), state.NewView(cst, root)) + networkPower, err := table.NetworkTotalPower(ctx) + require.NoError(t, err) + + // TODO: test that the QA power is used when it differs from raw byte power after gengen computes it properly + // https://github.com/filecoin-project/venus/issues/4011 + expected := big.NewIntUnsigned(uint64(constants.DevSectorSize) * numCommittedSectors * uint64(numMiners)) + assert.True(t, expected.Equals(networkPower)) +} + +func TestMiner(t *testing.T) { + // todo think a way to mock power directly + t.Skipf("skip it due to cant mock power directly ") + tf.UnitTest(t) + + ctx := context.Background() + kis := testhelpers.MustGenerateBLSKeyInfo(1, 0) + + numCommittedSectors := uint64(10) + cst, addrs, root := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) + addr := addrs[0] + + table := state.NewPowerTableView(state.NewView(cst, root), state.NewView(cst, root)) + actual, err := table.MinerClaimedPower(ctx, addr) + require.NoError(t, err) + + expected := abi.NewStoragePower(int64(uint64(constants.DevSectorSize) * numCommittedSectors)) + assert.True(t, expected.Equals(actual)) + assert.Equal(t, expected, actual) +} + +func TestNoPowerAfterSlash(t *testing.T) { + // todo think a way to mock power directly + t.Skipf("skip it due to cant mock power directly ") + tf.UnitTest(t) + // setup lookback state with 3 miners + ctx := context.Background() + numCommittedSectors := uint64(19) + numMiners := 3 + kis := testhelpers.MustGenerateBLSKeyInfo(numMiners, 0) + cstPower, addrsPower, rootPower := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) + cstFaults, _, rootFaults := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis[0:2]) // drop the third key + table := state.NewPowerTableView(state.NewView(cstPower, rootPower), state.NewView(cstFaults, rootFaults)) + + // verify that faulted miner claim is 0 power + claim, err := table.MinerClaimedPower(ctx, addrsPower[2]) + require.NoError(t, err) + assert.Equal(t, abi.NewStoragePower(0), claim) +} + +func TestTotalPowerUnaffectedBySlash(t *testing.T) { + // todo think a way to mock power directly + t.Skipf("skip it due to cant mock power directly ") + tf.UnitTest(t) + ctx := context.Background() + numCommittedSectors := uint64(19) + numMiners := 3 + kis := testhelpers.MustGenerateBLSKeyInfo(numMiners, 0) + cstPower, _, rootPower := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) + cstFaults, _, rootFaults := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis[0:2]) // drop the third key + table := state.NewPowerTableView(state.NewView(cstPower, rootPower), state.NewView(cstFaults, rootFaults)) + + // verify that faulted miner claim is 0 power + total, err := table.NetworkTotalPower(ctx) + require.NoError(t, err) + expected := abi.NewStoragePower(int64(uint64(constants.DevSectorSize) * numCommittedSectors * uint64(numMiners))) + + assert.Equal(t, expected, total) +} + +// nolint +func requireMinerWithNumCommittedSectors(ctx context.Context, t *testing.T, numCommittedSectors uint64, ownerKeys []crypto.KeyInfo) (cbor.IpldStore, []address.Address, cid.Cid) { + // todo think a way to mock power directly + r := repo.NewInMemoryRepo() + bs := r.Datastore() + cst := cbor.NewCborStore(bs) + + numMiners := len(ownerKeys) + minerConfigs := make([]*gengen.CreateStorageMinerConfig, numMiners) + for i := 0; i < numMiners; i++ { + commCfgs, err := gengen.MakeCommitCfgs(int(numCommittedSectors)) + require.NoError(t, err) + minerConfigs[i] = &gengen.CreateStorageMinerConfig{ + Owner: i, + CommittedSectors: commCfgs, + SealProofType: constants.DevSealProofType, + MarketBalance: abi.NewTokenAmount(0), + } + } + + // set up genesis block containing some miners with non-zero power + genCfg := &gengen.GenesisCfg{} + require.NoError(t, gengen.MinerConfigs(minerConfigs)(genCfg)) + require.NoError(t, gengen.NetworkName("ptvtest")(genCfg)) + require.NoError(t, gengen.ImportKeys(ownerKeys, "1000000")(genCfg)) + + info, err := gengen.GenGen(ctx, genCfg, bs) + require.NoError(t, err) + + var genesis types.BlockHeader + require.NoError(t, cst.Get(ctx, info.GenesisCid, &genesis)) + retAddrs := make([]address.Address, numMiners) + for i := 0; i < numMiners; i++ { + retAddrs[i] = info.Miners[i].Address + } + return cst, retAddrs, genesis.ParentStateRoot +} diff --git a/pkg/state/signer.go b/pkg/state/signer.go new file mode 100644 index 0000000000..0dd1c32106 --- /dev/null +++ b/pkg/state/signer.go @@ -0,0 +1,65 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// todo remove Account view a nd headsignerview +type AccountView interface { + ResolveToKeyAddr(ctx context.Context, address address.Address) (address.Address, error) +} + +type tipSignerView interface { + GetHead() *types.TipSet + ResolveToKeyAddr(ctx context.Context, ts *types.TipSet, address address.Address) (address.Address, error) +} + +// Signer looks up non-signing addresses before signing +type Signer struct { + wallet *wallet.Wallet + signerView AccountView +} + +// NewSigner creates a new signer +func NewSigner(signerView AccountView, wallet *wallet.Wallet) *Signer { + return &Signer{ + signerView: signerView, + wallet: wallet, + } +} + +// SignBytes creates a signature for the given data using either the given addr or its associated signing address +func (s *Signer) SignBytes(ctx context.Context, data []byte, addr address.Address) (*crypto.Signature, error) { + signingAddr, err := s.signerView.ResolveToKeyAddr(ctx, addr) + if err != nil { + return nil, err + } + return s.wallet.SignBytes(ctx, data, signingAddr) +} + +// HasAddress returns whether this signer can sign with the given address +func (s *Signer) HasAddress(ctx context.Context, addr address.Address) (bool, error) { + signingAddr, err := s.signerView.ResolveToKeyAddr(ctx, addr) + if err != nil { + return false, err + } + return s.wallet.HasAddress(ctx, signingAddr), nil +} + +type HeadSignView struct { + tipSignerView +} + +func NewHeadSignView(tipSignerView tipSignerView) *HeadSignView { + return &HeadSignView{tipSignerView: tipSignerView} +} + +func (headSignView *HeadSignView) ResolveToKeyAddr(ctx context.Context, addr address.Address) (address.Address, error) { + head := headSignView.GetHead() + return headSignView.tipSignerView.ResolveToKeyAddr(ctx, head, addr) // nil will use latest +} diff --git a/pkg/state/signer_test.go b/pkg/state/signer_test.go new file mode 100644 index 0000000000..57f3bda0fb --- /dev/null +++ b/pkg/state/signer_test.go @@ -0,0 +1,43 @@ +// stm: #unit +package state + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/wallet" + "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" +) + +type mockAccountView struct { +} + +func (mac *mockAccountView) ResolveToKeyAddr(_ context.Context, addr address.Address) (address.Address, error) { + return addr, nil +} + +func TestSigner(t *testing.T) { + ctx := context.Background() + ds := datastore.NewMapDatastore() + fs, err := wallet.NewDSBackend(ctx, ds, config.TestPassphraseConfig(), wallet.TestPassword) + assert.NoError(t, err) + wallet := wallet.New(fs) + walletAddr, err := wallet.NewAddress(ctx, address.SECP256K1) + if err != nil { + t.Fatal(err) + } + + signer := NewSigner(&mockAccountView{}, wallet) + // stm: @STATE_VIEW_SIGN_BYTES_001 + _, err = signer.SignBytes(ctx, []byte("to sign data"), walletAddr) + assert.NoError(t, err) + + // stm: @STATE_VIEW_HAS_ADDRESS_001 + has, err := signer.HasAddress(ctx, walletAddr) + assert.NoError(t, err) + assert.True(t, has) + +} diff --git a/pkg/state/sigval.go b/pkg/state/sigval.go new file mode 100644 index 0000000000..797fe6d553 --- /dev/null +++ b/pkg/state/sigval.go @@ -0,0 +1,66 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// SignatureValidator resolves account actor addresses to their pubkey-style address for signature validation. +type SignatureValidator struct { + signerView AccountView +} + +func NewSignatureValidator(signerView AccountView) *SignatureValidator { + return &SignatureValidator{signerView: signerView} +} + +// ValidateSignature check the signature is valid or not +func (v *SignatureValidator) ValidateSignature(ctx context.Context, data []byte, signer address.Address, sig crypto.Signature) error { + signerAddress, err := v.signerView.ResolveToKeyAddr(ctx, signer) + if err != nil { + return errors.Wrapf(err, "failed to load signer address for %v", signer) + } + return crypto.Verify(&sig, signerAddress, data) +} + +// ValidateSignature check the signature of message is valid or not. first get the cid of message and than checkout signature of messager cid and address +func (v *SignatureValidator) ValidateMessageSignature(ctx context.Context, msg *types.SignedMessage) error { + mCid := msg.Message.Cid() + return v.ValidateSignature(ctx, mCid.Bytes(), msg.Message.From, msg.Signature) +} + +// ValidateBLSMessageAggregate validate bls aggregate message +func (v *SignatureValidator) ValidateBLSMessageAggregate(ctx context.Context, msgs []*types.Message, sig *crypto.Signature) error { + if sig == nil { + if len(msgs) > 0 { + return errors.New("Invalid empty BLS sig over messages") + } + return nil + } + + if len(msgs) == 0 { + return nil + } + + var pubKeys [][]byte + var encodedMsgCids [][]byte + for _, msg := range msgs { + signerAddress, err := v.signerView.ResolveToKeyAddr(ctx, msg.From) + if err != nil { + return errors.Wrapf(err, "failed to load signer address for %v", msg.From) + } + pubKeys = append(pubKeys, signerAddress.Payload()) + mCid := msg.Cid() + encodedMsgCids = append(encodedMsgCids, mCid.Bytes()) + } + + if crypto.VerifyAggregate(pubKeys, encodedMsgCids, sig.Data) != nil { + return errors.New("BLS signature invalid") + } + return nil +} diff --git a/pkg/state/sigval_test.go b/pkg/state/sigval_test.go new file mode 100644 index 0000000000..98635032c0 --- /dev/null +++ b/pkg/state/sigval_test.go @@ -0,0 +1,162 @@ +package state_test + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/filecoin-project/venus/pkg/testhelpers" + + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" +) + +type fakeStateView struct { + keys map[address.Address]address.Address +} + +func (f *fakeStateView) ResolveToKeyAddr(_ context.Context, a address.Address) (address.Address, error) { + if a.Protocol() == address.SECP256K1 || a.Protocol() == address.BLS { + return a, nil + } + resolved, ok := f.keys[a] + if !ok { + return address.Undef, fmt.Errorf("not found") + } + return resolved, nil +} + +func TestSignMessageOk(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + ms, kis := testhelpers.NewMockSignersAndKeyInfo(1) + keyAddr, err := kis[0].Address() + require.NoError(t, err) + + t.Run("no resolution", func(t *testing.T) { + v := state.NewSignatureValidator(&fakeStateView{}) // No resolution needed. + msg := testhelpers.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroFIL, builtin.MethodSend, nil, types.FromFil(0), types.FromFil(0), 1) + smsg, err := testhelpers.NewSignedMessage(ctx, *msg, ms) + require.NoError(t, err) + assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) + }) + t.Run("resolution required", func(t *testing.T) { + idAddress := testhelpers.RequireIDAddress(t, 1) + // Use ID address in message but sign with corresponding key address. + stateView := &fakeStateView{keys: map[address.Address]address.Address{ + idAddress: keyAddr, + }} + v := state.NewSignatureValidator(stateView) + msg := testhelpers.NewMeteredMessage(idAddress, idAddress, 1, types.ZeroFIL, builtin.MethodSend, nil, types.FromFil(0), types.FromFil(0), 1) + msgCid := msg.Cid() + sig, err := ms.SignBytes(ctx, msgCid.Bytes(), keyAddr) + require.NoError(t, err) + smsg := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + + assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) + }) +} + +// Signature is valid but signer does not match From address. +func TestBadFrom(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + signer, kis := testhelpers.NewMockSignersAndKeyInfo(2) + keyAddr, err := kis[0].Address() + require.NoError(t, err) + otherAddr, err := kis[1].Address() + require.NoError(t, err) + + t.Run("no resolution", func(t *testing.T) { + v := state.NewSignatureValidator(&fakeStateView{}) + + // Can't use NewSignedMessage constructor as it always signs with msg.From. + msg := testhelpers.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroFIL, builtin.MethodSend, nil, types.FromFil(0), types.FromFil(0), 1) + buf := new(bytes.Buffer) + err = msg.MarshalCBOR(buf) + require.NoError(t, err) + sig, err := signer.SignBytes(ctx, buf.Bytes(), otherAddr) // sign with addr != msg.From + require.NoError(t, err) + smsg := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) + }) + t.Run("resolution required", func(t *testing.T) { + idAddress := testhelpers.RequireIDAddress(t, 1) + // Use ID address in message but sign with corresponding key address. + stateView := &fakeStateView{keys: map[address.Address]address.Address{ + idAddress: keyAddr, + }} + v := state.NewSignatureValidator(stateView) + + // Can't use NewSignedMessage constructor as it always signs with msg.From. + msg := testhelpers.NewMeteredMessage(idAddress, idAddress, 1, types.ZeroFIL, builtin.MethodSend, nil, types.FromFil(0), types.FromFil(0), 1) + buf := new(bytes.Buffer) + err = msg.MarshalCBOR(buf) + require.NoError(t, err) + sig, err := signer.SignBytes(ctx, buf.Bytes(), otherAddr) // sign with addr != msg.From (resolved) + require.NoError(t, err) + smsg := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) + }) +} + +// Signature corrupted. +func TestSignedMessageBadSignature(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + signer, kis := testhelpers.NewMockSignersAndKeyInfo(1) + keyAddr, err := kis[0].Address() + require.NoError(t, err) + + v := state.NewSignatureValidator(&fakeStateView{}) // no resolution needed + msg := testhelpers.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroFIL, builtin.MethodSend, nil, types.FromFil(0), types.FromFil(0), 1) + smsg, err := testhelpers.NewSignedMessage(ctx, *msg, signer) + require.NoError(t, err) + + assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) + smsg.Signature.Data[0] = smsg.Signature.Data[0] ^ 0xFF + assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) +} + +// Message corrupted. +func TestSignedMessageCorrupted(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + + signer, kis := testhelpers.NewMockSignersAndKeyInfo(1) + keyAddr, err := kis[0].Address() + require.NoError(t, err) + + v := state.NewSignatureValidator(&fakeStateView{}) // no resolution needed + msg := testhelpers.NewMeteredMessage(keyAddr, keyAddr, 1, types.ZeroFIL, builtin.MethodSend, nil, types.FromFil(0), types.FromFil(0), 1) + smsg, err := testhelpers.NewSignedMessage(ctx, *msg, signer) + require.NoError(t, err) + + assert.NoError(t, v.ValidateMessageSignature(ctx, smsg)) + smsg.Message.Nonce = uint64(42) + assert.Error(t, v.ValidateMessageSignature(ctx, smsg)) +} diff --git a/pkg/state/testing.go b/pkg/state/testing.go new file mode 100644 index 0000000000..5eabe427f2 --- /dev/null +++ b/pkg/state/testing.go @@ -0,0 +1,151 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" +) + +// FakeStateView is a fake state view. +type FakeStateView struct { + NetworkName string + Power *NetworkPower + Miners map[address.Address]*FakeMinerState +} + +// NewFakeStateView creates a new fake state view. +func NewFakeStateView(rawBytePower, qaPower abi.StoragePower, minerCount, minPowerMinerCount int64) *FakeStateView { + return &FakeStateView{ + Power: &NetworkPower{ + RawBytePower: rawBytePower, + QualityAdjustedPower: qaPower, + MinerCount: minerCount, + MinPowerMinerCount: minPowerMinerCount, + }, + Miners: make(map[address.Address]*FakeMinerState), + } +} + +// FakeMinerState is fake state for a single miner. +type FakeMinerState struct { + Owner address.Address + Worker address.Address + PeerID peer.ID + ProvingPeriodStart abi.ChainEpoch + ProvingPeriodEnd abi.ChainEpoch + PoStFailures int + Sectors []miner.SectorOnChainInfo + Deadlines []*bitfield.BitField + ClaimedRawPower abi.StoragePower + ClaimedQAPower abi.StoragePower + PledgeRequirement abi.TokenAmount + PledgeBalance abi.TokenAmount +} + +// FakeSectorInfo fakes a subset of sector onchain info +type FakeSectorInfo struct { + ID abi.SectorNumber + SealedCID cid.Cid +} + +func (v *FakeStateView) InitNetworkName(_ context.Context) (string, error) { + return v.NetworkName, nil +} + +// MinerSectorCount reports the number of sectors a miner has pledged +func (v *FakeStateView) MinerSectorCount(ctx context.Context, maddr address.Address) (uint64, error) { + m, ok := v.Miners[maddr] + if !ok { + return 0, errors.Errorf("no miner %s", maddr) + } + + return uint64(len(m.Sectors)), nil +} + +func (v *FakeStateView) MinerSectorInfo(_ context.Context, maddr address.Address, sectorNum abi.SectorNumber) (*miner.SectorOnChainInfo, error) { + m, ok := v.Miners[maddr] + if !ok { + return nil, errors.Errorf("no miner %s", maddr) + } + for _, s := range m.Sectors { + if s.SectorNumber == sectorNum { + return &s, nil + } + } + return nil, nil +} + +func (v *FakeStateView) MinerExists(_ context.Context, _ address.Address) (bool, error) { + return true, nil +} + +func (v *FakeStateView) MinerProvingPeriod(ctx context.Context, maddr address.Address) (start abi.ChainEpoch, end abi.ChainEpoch, failureCount int, err error) { + m, ok := v.Miners[maddr] + if !ok { + return 0, 0, 0, errors.Errorf("no miner %s", maddr) + } + return m.ProvingPeriodStart, m.ProvingPeriodEnd, m.PoStFailures, nil +} + +func (v *FakeStateView) PowerNetworkTotal(_ context.Context) (*NetworkPower, error) { + return v.Power, nil +} + +func (v *FakeStateView) MinerClaimedPower(ctx context.Context, miner address.Address) (abi.StoragePower, abi.StoragePower, error) { + m, ok := v.Miners[miner] + if !ok { + return big.Zero(), big.Zero(), errors.Errorf("no miner %s", miner) + } + return m.ClaimedRawPower, m.ClaimedQAPower, nil +} + +func (v *FakeStateView) GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, maddr address.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) { + _, ok := v.Miners[maddr] + if !ok { + return nil, errors.Errorf("no miner %s", maddr) + } + return []builtin.ExtendedSectorInfo{}, nil +} + +func (v *FakeStateView) MinerPledgeCollateral(_ context.Context, maddr address.Address) (locked abi.TokenAmount, total abi.TokenAmount, err error) { + m, ok := v.Miners[maddr] + if !ok { + return big.Zero(), big.Zero(), errors.Errorf("no miner %s", maddr) + } + return m.PledgeRequirement, m.PledgeBalance, nil +} + +func (v *FakeStateView) MinerInfo(ctx context.Context, maddr address.Address, nv network.Version) (*miner.MinerInfo, error) { + m, ok := v.Miners[maddr] + if !ok { + return nil, errors.Errorf("no miner %s", maddr) + } + return &miner.MinerInfo{ + Owner: m.Owner, + Worker: m.Worker, + PeerId: []byte(m.PeerID), + }, nil +} + +func (v *FakeStateView) GetMinerWorkerRaw(ctx context.Context, maddr address.Address) (address.Address, error) { + m, ok := v.Miners[maddr] + if !ok { + return address.Undef, errors.Errorf("no miner %s", maddr) + } + return m.Worker, nil +} + +func (v *FakeStateView) ResolveToKeyAddr(ctx context.Context, addr address.Address) (address.Address, error) { + return addr, nil +} diff --git a/pkg/state/tree/cbor_gen.go b/pkg/state/tree/cbor_gen.go new file mode 100644 index 0000000000..be3e247100 --- /dev/null +++ b/pkg/state/tree/cbor_gen.go @@ -0,0 +1,118 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package tree + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufStateRoot = []byte{131} + +func (t *StateRoot) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufStateRoot); err != nil { + return err + } + + // t.Version (tree.StateTreeVersion) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Version)); err != nil { + return err + } + + // t.Actors (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Actors); err != nil { + return xerrors.Errorf("failed to write cid field t.Actors: %w", err) + } + + // t.Info (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Info); err != nil { + return xerrors.Errorf("failed to write cid field t.Info: %w", err) + } + + return nil +} + +func (t *StateRoot) UnmarshalCBOR(r io.Reader) (err error) { + *t = StateRoot{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Version (tree.StateTreeVersion) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Version = StateTreeVersion(extra) + + } + // t.Actors (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Actors: %w", err) + } + + t.Actors = c + + } + // t.Info (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Info: %w", err) + } + + t.Info = c + + } + return nil +} diff --git a/pkg/state/tree/snapshot.go b/pkg/state/tree/snapshot.go new file mode 100644 index 0000000000..de79f76d03 --- /dev/null +++ b/pkg/state/tree/snapshot.go @@ -0,0 +1,106 @@ +package tree + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type stateSnaps struct { + layers []*stateSnapLayer + lastMaybeNonEmptyResolveCache int +} + +type stateSnapLayer struct { + actors map[address.Address]streeOp + resolveCache map[address.Address]address.Address +} + +func newStateSnapLayer() *stateSnapLayer { + return &stateSnapLayer{ + actors: make(map[address.Address]streeOp), + resolveCache: make(map[address.Address]address.Address), + } +} + +type streeOp struct { + Act types.Actor + Delete bool +} + +func newStateSnaps() *stateSnaps { + ss := &stateSnaps{} + ss.addLayer() + return ss +} + +func (ss *stateSnaps) addLayer() { + ss.layers = append(ss.layers, newStateSnapLayer()) +} + +func (ss *stateSnaps) dropLayer() { + ss.layers[len(ss.layers)-1] = nil // allow it to be GCed + + ss.layers = ss.layers[:len(ss.layers)-1] + + if ss.lastMaybeNonEmptyResolveCache == len(ss.layers) { + ss.lastMaybeNonEmptyResolveCache = len(ss.layers) - 1 + } +} + +func (ss *stateSnaps) mergeLastLayer() { + last := ss.layers[len(ss.layers)-1] + nextLast := ss.layers[len(ss.layers)-2] + + for k, v := range last.actors { + nextLast.actors[k] = v + } + + for k, v := range last.resolveCache { + nextLast.resolveCache[k] = v + } + + ss.dropLayer() +} + +func (ss *stateSnaps) resolveAddress(addr address.Address) (address.Address, bool) { + for i := ss.lastMaybeNonEmptyResolveCache; i >= 0; i-- { + if len(ss.layers[i].resolveCache) == 0 { + if ss.lastMaybeNonEmptyResolveCache == i { + ss.lastMaybeNonEmptyResolveCache = i - 1 + } + continue + } + resa, ok := ss.layers[i].resolveCache[addr] + if ok { + return resa, true + } + } + return address.Undef, false +} + +func (ss *stateSnaps) cacheResolveAddress(addr, resa address.Address) { + ss.layers[len(ss.layers)-1].resolveCache[addr] = resa + ss.lastMaybeNonEmptyResolveCache = len(ss.layers) - 1 +} + +func (ss *stateSnaps) getActor(addr address.Address) (*types.Actor, error) { + for i := len(ss.layers) - 1; i >= 0; i-- { + act, ok := ss.layers[i].actors[addr] + if ok { + if act.Delete { + return nil, types.ErrActorNotFound + } + + return &act.Act, nil + } + } + return nil, nil +} + +func (ss *stateSnaps) setActor(addr address.Address, act *types.Actor) { + ss.layers[len(ss.layers)-1].actors[addr] = streeOp{Act: *act} +} + +func (ss *stateSnaps) deleteActor(addr address.Address) { + ss.layers[len(ss.layers)-1].actors[addr] = streeOp{Delete: true} +} diff --git a/pkg/state/tree/state.go b/pkg/state/tree/state.go new file mode 100644 index 0000000000..67134366e5 --- /dev/null +++ b/pkg/state/tree/state.go @@ -0,0 +1,597 @@ +package tree + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/trace" + + states0 "github.com/filecoin-project/specs-actors/actors/states" + states2 "github.com/filecoin-project/specs-actors/v2/actors/states" + states3 "github.com/filecoin-project/specs-actors/v3/actors/states" + states4 "github.com/filecoin-project/specs-actors/v4/actors/states" + states5 "github.com/filecoin-project/specs-actors/v5/actors/states" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type StateTreeVersion uint64 //nolint + +type ActorKey = address.Address + +type Root = cid.Cid + +// Review: can we get rid of this? +type Tree interface { + GetActor(ctx context.Context, addr ActorKey) (*types.Actor, bool, error) + SetActor(ctx context.Context, addr ActorKey, act *types.Actor) error + DeleteActor(ctx context.Context, addr ActorKey) error + LookupID(addr ActorKey) (address.Address, error) + + Flush(ctx context.Context) (cid.Cid, error) + Snapshot(ctx context.Context) error + ClearSnapshot() + Revert() error + At(Root) error + RegisterNewAddress(addr ActorKey) (address.Address, error) + + MutateActor(addr ActorKey, f func(*types.Actor) error) error + ForEach(f func(ActorKey, *types.Actor) error) error + GetStore() cbor.IpldStore +} + +var stateLog = logging.Logger("vm.statetree") + +const ( + // StateTreeVersion0 corresponds to actors < v2. + StateTreeVersion0 StateTreeVersion = iota + // StateTreeVersion1 corresponds to actors v2 + StateTreeVersion1 + // StateTreeVersion2 corresponds to actors v3. + StateTreeVersion2 + // StateTreeVersion3 corresponds to actors v4. + StateTreeVersion3 + // StateTreeVersion4 corresponds to actors v5, v6. + StateTreeVersion4 +) + +type StateRoot struct { //nolint + // State tree version. + Version StateTreeVersion + // Actors tree. The structure depends on the state root version. + Actors cid.Cid + // Info. The structure depends on the state root version. + Info cid.Cid +} + +// TODO: version this. +type StateInfo0 struct{} //nolint + +var lengthBufStateInfo0 = []byte{128} + +func (t *StateInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStateInfo0); err != nil { + return err + } + + return nil +} + +func (t *StateInfo0) UnmarshalCBOR(r io.Reader) error { + *t = StateInfo0{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 0 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + return nil +} + +// state stores actors state by their ID. +type State struct { + root adt.Map + version StateTreeVersion + info cid.Cid + Store cbor.IpldStore + lookupIDFun func(address.Address) (address.Address, error) + + snaps *stateSnaps +} + +// VersionForNetwork returns the state tree version for the given network +// version. +func VersionForNetwork(ver network.Version) (StateTreeVersion, error) { + switch ver { + case network.Version0, network.Version1, network.Version2, network.Version3: + return StateTreeVersion0, nil + case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9: + return StateTreeVersion1, nil + case network.Version10, network.Version11: + return StateTreeVersion2, nil + case network.Version12: + return StateTreeVersion3, nil + + /* inline-gen template + {{$lastNv := .latestNetworkVersion}} + case{{range .networkVersions}} {{if (ge . 13.)}} network.Version{{.}}{{if (lt . $lastNv)}},{{end}}{{end}}{{end}}: + /* inline-gen start */ + + case network.Version13, network.Version14, network.Version15, network.Version16, network.Version17: + /* inline-gen end */ + + return StateTreeVersion4, nil + default: + panic(fmt.Sprintf("unsupported network version %d", ver)) + } +} + +func NewState(cst cbor.IpldStore, ver StateTreeVersion) (*State, error) { + var info cid.Cid + switch ver { + case StateTreeVersion0: + // info is undefined + case StateTreeVersion1, StateTreeVersion2, StateTreeVersion3, StateTreeVersion4: + var err error + info, err = cst.Put(context.TODO(), new(StateInfo0)) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported state tree version: %d", ver) + } + + store := adt.WrapStore(context.TODO(), cst) + var hamt adt.Map + switch ver { + case StateTreeVersion0: + tree, err := states0.NewTree(store) + if err != nil { + return nil, fmt.Errorf("failed to create state tree: %v", err) + } + hamt = tree.Map + case StateTreeVersion1: + tree, err := states2.NewTree(store) + if err != nil { + return nil, fmt.Errorf("failed to create state tree: %v", err) + } + hamt = tree.Map + case StateTreeVersion2: + tree, err := states3.NewTree(store) + if err != nil { + return nil, fmt.Errorf("failed to create state tree: %v", err) + } + hamt = tree.Map + case StateTreeVersion3: + tree, err := states4.NewTree(store) + if err != nil { + return nil, fmt.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + case StateTreeVersion4: + tree, err := states5.NewTree(store) + if err != nil { + return nil, fmt.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + default: + return nil, fmt.Errorf("unsupported state tree version: %d", ver) + } + + s := &State{ + root: hamt, + info: info, + version: ver, + Store: cst, + snaps: newStateSnaps(), + } + s.lookupIDFun = s.lookupIDinternal + return s, nil +} + +func LoadState(ctx context.Context, cst cbor.IpldStore, c cid.Cid) (*State, error) { + var root StateRoot + // Try loading as a new-style state-tree (version/actors tuple). + if err := cst.Get(context.TODO(), c, &root); err != nil { + // We failed to decode as the new version, must be an old version. + root.Actors = c + root.Version = StateTreeVersion0 + } + + store := adt.WrapStore(context.TODO(), cst) + + var ( + hamt adt.Map + err error + ) + switch root.Version { + case StateTreeVersion0: + var tree *states0.Tree + tree, err = states0.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case StateTreeVersion1: + var tree *states2.Tree + tree, err = states2.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case StateTreeVersion2: + var tree *states3.Tree + tree, err = states3.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case StateTreeVersion3: + var tree *states4.Tree + tree, err = states4.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case StateTreeVersion4: + var tree *states5.Tree + tree, err = states5.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + default: + return nil, fmt.Errorf("unsupported state tree version: %d", root.Version) + } + if err != nil { + return nil, fmt.Errorf("failed to load state tree: %v %v", c, err) + } + + s := &State{ + root: hamt, + info: root.Info, + version: root.Version, + Store: cst, + snaps: newStateSnaps(), + } + s.lookupIDFun = s.lookupIDinternal + + return s, nil +} + +func (st *State) SetActor(ctx context.Context, addr ActorKey, act *types.Actor) error { + stateLog.Debugf("set actor addr:", addr.String(), " Balance:", act.Balance.String(), " Head:", act.Head, " Nonce:", act.Nonce) + iaddr, err := st.LookupID(addr) + if err != nil { + return fmt.Errorf("ID lookup failed: %v", err) + } + addr = iaddr + + st.snaps.setActor(addr, act) + return nil +} + +func (st *State) lookupIDinternal(addr address.Address) (address.Address, error) { + act, found, err := st.GetActor(context.Background(), init_.Address) + if !found || err != nil { + return address.Undef, fmt.Errorf("getting init actor: %v", err) + } + + ias, err := init_.Load(&AdtStore{st.Store}, act) + if err != nil { + return address.Undef, fmt.Errorf("loading init actor state: %v", err) + } + + a, found, err := ias.ResolveAddress(addr) + if err == nil && !found { + err = types.ErrActorNotFound + } + if err != nil { + return address.Undef, fmt.Errorf("resolve address %s: %w", addr, err) // ends with %w implements an Unwrap method + } + return a, err +} + +// LookupID gets the ID address of this actor's `addr` stored in the `InitActor`. +func (st *State) LookupID(addr ActorKey) (address.Address, error) { + if addr.Protocol() == address.ID { + return addr, nil + } + resa, ok := st.snaps.resolveAddress(addr) + if ok { + return resa, nil + } + a, err := st.lookupIDFun(addr) + if err != nil { + return a, err + } + st.snaps.cacheResolveAddress(addr, a) + + return a, nil +} + +// ToDo Return nil if it is actor not found[ErrActorNotFound],Because the basis for judgment is: err != nil ==> panic ??? +// GetActor returns the actor from any type of `addr` provided. +func (st *State) GetActor(ctx context.Context, addr ActorKey) (*types.Actor, bool, error) { + var err error + if addr.Protocol() != address.ID { + if addr, err = st.LookupID(addr); err != nil { + if errors.Is(err, types.ErrActorNotFound) { + return nil, false, nil + } + return nil, false, fmt.Errorf("address resolution: %v", err) + } + } + + snapAct, err := st.snaps.getActor(addr) + if err != nil { + if errors.Is(err, types.ErrActorNotFound) { + return nil, false, nil + } + return nil, false, err + } + + if snapAct != nil { + return snapAct, true, nil + } + + var act types.Actor + if found, err := st.root.Get(abi.AddrKey(addr), &act); err != nil { + return nil, false, fmt.Errorf("hamt find failed: %v", err) + } else if !found { + return nil, false, nil + } + + st.snaps.setActor(addr, &act) + + return &act, true, nil +} + +func (st *State) DeleteActor(ctx context.Context, addr ActorKey) error { + stateLog.Debugf("Delete Actor ", addr) + if addr == address.Undef { + return fmt.Errorf("DeleteActor called on undefined address") + } + + iaddr, err := st.LookupID(addr) + if err != nil { + if errors.Is(err, types.ErrActorNotFound) { + return fmt.Errorf("resolution lookup failed (%s): %v", addr, err) + } + return fmt.Errorf("address resolution: %v", err) + } + + addr = iaddr + + _, found, err := st.GetActor(ctx, addr) + if err != nil { + return err + } + if !found { + return fmt.Errorf("resolution lookup failed (%s): %v", addr, err) + } + + st.snaps.deleteActor(addr) + + return nil +} + +func (st *State) Flush(ctx context.Context) (cid.Cid, error) { + ctx, span := trace.StartSpan(ctx, "stateTree.Flush") //nolint:staticcheck + defer span.End() + if len(st.snaps.layers) != 1 { + return cid.Undef, fmt.Errorf("tried to flush state tree with snapshots on the stack") + } + + for addr, sto := range st.snaps.layers[0].actors { + if sto.Delete { + if err := st.root.Delete(abi.AddrKey(addr)); err != nil { + return cid.Undef, err + } + } else { + if err := st.root.Put(abi.AddrKey(addr), &sto.Act); err != nil { + return cid.Undef, err + } + } + } + + root, err := st.root.Root() + if err != nil { + return cid.Undef, fmt.Errorf("failed to flush state-tree hamt: %v", err) + } + // If we're version 0, return a raw tree. + if st.version == StateTreeVersion0 { + return root, nil + } + // Otherwise, return a versioned tree. + return st.Store.Put(ctx, &StateRoot{Version: st.version, Actors: root, Info: st.info}) +} + +func (st *State) Snapshot(ctx context.Context) error { + ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") //nolint:staticcheck + defer span.End() + + st.snaps.addLayer() + + return nil +} + +func (st *State) ClearSnapshot() { + st.snaps.mergeLastLayer() +} + +func (st *State) RegisterNewAddress(addr ActorKey) (address.Address, error) { + var out address.Address + err := st.MutateActor(init_.Address, func(initact *types.Actor) error { + ias, err := init_.Load(&AdtStore{st.Store}, initact) + if err != nil { + return err + } + + oaddr, err := ias.MapAddressToNewID(addr) + if err != nil { + return err + } + out = oaddr + + ncid, err := st.Store.Put(context.TODO(), ias) + if err != nil { + return err + } + + initact.Head = ncid + return nil + }) + if err != nil { + return address.Undef, err + } + + return out, nil +} + +type AdtStore struct{ cbor.IpldStore } + +func (a *AdtStore) Context() context.Context { + return context.TODO() +} + +var _ adt.Store = (*AdtStore)(nil) + +func (st *State) Revert() error { + st.snaps.dropLayer() + st.snaps.addLayer() + + return nil +} + +func (st *State) MutateActor(addr ActorKey, f func(*types.Actor) error) error { + act, found, err := st.GetActor(context.Background(), addr) + if !found || err != nil { + return err + } + + if err := f(act); err != nil { + return err + } + + return st.SetActor(context.Background(), addr, act) +} + +func (st *State) ForEach(f func(ActorKey, *types.Actor) error) error { + // Walk through layers, if any. + seen := make(map[address.Address]struct{}) + for i := len(st.snaps.layers) - 1; i >= 0; i-- { + for addr, op := range st.snaps.layers[i].actors { + if _, ok := seen[addr]; ok { + continue + } + seen[addr] = struct{}{} + if op.Delete { + continue + } + act := op.Act // copy + if err := f(addr, &act); err != nil { + return err + } + } + } + + // Now walk through the saved actors. + var act types.Actor + return st.root.ForEach(&act, func(k string) error { + act := act // copy + addr, err := address.NewFromBytes([]byte(k)) + if err != nil { + return fmt.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err) + } + + // no need to record anything here, there are no duplicates in the actors HAMT + // iself. + if _, ok := seen[addr]; ok { + return nil + } + + return f(addr, &act) + }) +} + +// Version returns the version of the StateTree data structure in use. +func (st *State) Version() StateTreeVersion { + return st.version +} + +func (st *State) GetStore() cbor.IpldStore { + return st.Store +} + +func (st *State) At(root Root) error { + newState, err := LoadState(context.Background(), st.Store, root) + if err != nil { + return err + } + + *st = *newState + return nil +} + +func Diff(oldTree, newTree *State) (map[string]types.Actor, error) { + out := map[string]types.Actor{} + + var ( + ncval, ocval cbg.Deferred + buf = bytes.NewReader(nil) + ) + if err := newTree.root.ForEach(&ncval, func(k string) error { + var act types.Actor + + addr, err := address.NewFromBytes([]byte(k)) + if err != nil { + return fmt.Errorf("address in state tree was not valid: %v", err) + } + + found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) + if err != nil { + return err + } + + if found && bytes.Equal(ocval.Raw, ncval.Raw) { + return nil // not changed + } + + buf.Reset(ncval.Raw) + err = act.UnmarshalCBOR(buf) + buf.Reset(nil) + + if err != nil { + return err + } + + out[addr.String()] = act + + return nil + }); err != nil { + return nil, err + } + return out, nil +} diff --git a/pkg/state/tree/state_test.go b/pkg/state/tree/state_test.go new file mode 100644 index 0000000000..fff3918414 --- /dev/null +++ b/pkg/state/tree/state_test.go @@ -0,0 +1,236 @@ +// stm: #unit +package tree + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/venus/pkg/testhelpers" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/repo" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" +) + +func TestStatePutGet(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + + bs := repo.NewInMemoryRepo().Datastore() + cst := cbor.NewCborStore(bs) + tree, err := NewStateWithBuiltinActor(t, cst, StateTreeVersion1) + if err != nil { + t.Fatal(err) + } + + addrGetter := testhelpers.NewForTestGetter() + addr1 := addrGetter() + addr2 := addrGetter() + AddAccount(t, tree, cst, addr1) + AddAccount(t, tree, cst, addr2) + + UpdateAccount(t, tree, addr1, func(act1 *types.Actor) { + act1.IncrementSeqNum() + }) + + UpdateAccount(t, tree, addr2, func(act2 *types.Actor) { + act2.IncrementSeqNum() + act2.IncrementSeqNum() + }) + + // stm: @STATE_VIEW_GET_ACTOR_001 + act1out, found, err := tree.GetActor(ctx, addr1) + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, uint64(1), act1out.Nonce) + act2out, found, err := tree.GetActor(ctx, addr2) + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, uint64(2), act2out.Nonce) + + // now test it persists across recreation of tree + tcid, err := tree.Flush(ctx) + assert.NoError(t, err) + + tree2, err := LoadState(context.Background(), cst, tcid) + assert.NoError(t, err) + + act1out2, found, err := tree2.GetActor(ctx, addr1) + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, uint64(1), act1out2.Nonce) + act2out2, found, err := tree2.GetActor(ctx, addr2) + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, uint64(2), act2out2.Nonce) + + // stm: @STATE_VIEW_DELETE_ACTOR_001 + assert.NoError(t, tree2.DeleteActor(ctx, addr2)) + _, found, err = tree2.GetActor(ctx, addr2) + assert.NoError(t, err) + assert.False(t, found) +} + +func TestStateErrors(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + bs := repo.NewInMemoryRepo().Datastore() + cst := cbor.NewCborStore(bs) + tree, err := NewStateWithBuiltinActor(t, cst, StateTreeVersion1) + if err != nil { + t.Fatal(err) + } + + AddAccount(t, tree, cst, testhelpers.NewForTestGetter()()) + + c, err := constants.DefaultCidBuilder.Sum([]byte("cats")) + assert.NoError(t, err) + + // stm: @STATE_TREE_LOAD_STATE_001 + tr2, err := LoadState(ctx, cst, c) + assert.Error(t, err) + assert.Nil(t, tr2) +} + +func TestGetAllActors(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + bs := repo.NewInMemoryRepo().Datastore() + cst := cbor.NewCborStore(bs) + tree, err := NewStateWithBuiltinActor(t, cst, StateTreeVersion1) + if err != nil { + t.Fatal(err) + } + addr := testhelpers.NewForTestGetter()() + + newActor := types.Actor{Code: builtin2.AccountActorCodeID, Nonce: 1234, Balance: abi.NewTokenAmount(123)} + AddAccount(t, tree, cst, addr) + // stm: @STATE_VIEW_FLUSH_001 + _, err = tree.Flush(ctx) + require.NoError(t, err) + + // stm: @STATE_VIEW_FOR_EACH_001 + err = tree.ForEach(func(key ActorKey, result *types.Actor) error { + if addr != key { + return nil + } + assert.Equal(t, addr, key) + assert.Equal(t, newActor.Code, result.Code) + assert.Equal(t, newActor.Nonce, result.Nonce) + assert.Equal(t, newActor.Balance, result.Balance) + return nil + }) + if err != nil { + t.Error(err) + } +} + +func TestSnapshot(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + bs := repo.NewInMemoryRepo().Datastore() + cst := cbor.NewCborStore(bs) + tree, err := NewState(cst, StateTreeVersion1) + if err != nil { + t.Fatal(err) + } + + // stm: @STATE_VIEW_SNAPSHOT_001 + assert.NoError(t, tree.Snapshot(ctx)) + assert.Equal(t, len(tree.snaps.layers), 2) + + randomCid, err := cid.Decode("bafy2bzacecu7n7wbtogznrtuuvf73dsz7wasgyneqasksdblxupnyovmtwxxu") + if err != nil { + t.Fatal(err) + } + + addr, err := address.NewIDAddress(uint64(1007)) + assert.NoError(t, err) + + actor := &types.Actor{ + Code: randomCid, + Head: randomCid, + Balance: abi.NewTokenAmount(int64(10000)), + Nonce: 100, + } + + if err := tree.SetActor(ctx, addr, actor); err != nil { + t.Fatal(err) + } + + resActor, find, err := tree.GetActor(ctx, addr) + assert.NoError(t, err) + assert.True(t, find) + assert.Equal(t, actor, resActor) + + // stm: @STATE_VIEW_REVERT_001 + assert.NoError(t, tree.Revert()) + + resActor, find, err = tree.GetActor(ctx, addr) + assert.NoError(t, err) + assert.Nil(t, resActor) + assert.False(t, find) + + // stm: @STATE_VIEW_CLEAR_SNAPSHOT_001 + tree.ClearSnapshot() + assert.Equal(t, len(tree.snaps.layers), 1) +} + +func TestStateTreeConsistency(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + bs := repo.NewInMemoryRepo().Datastore() + cst := cbor.NewCborStore(bs) + // stm: @STATE_TREE_NEW_STATE_001 + tree, err := NewState(cst, StateTreeVersion1) + if err != nil { + t.Fatal(err) + } + + var addrs []address.Address + for i := 100; i < 150; i++ { + a, err := address.NewIDAddress(uint64(i)) + if err != nil { + t.Fatal(err) + } + + addrs = append(addrs, a) + } + + randomCid, err := cid.Decode("bafy2bzacecu7n7wbtogznrtuuvf73dsz7wasgyneqasksdblxupnyovmtwxxu") + if err != nil { + t.Fatal(err) + } + + for i, a := range addrs { + // stm: @STATE_VIEW_SET_ACTOR_001, @STATE_VIEW_LOOKUP_ID_001 + if err := tree.SetActor(ctx, a, &types.Actor{ + Code: randomCid, + Head: randomCid, + Balance: abi.NewTokenAmount(int64(10000 + i)), + Nonce: uint64(1000 - i), + }); err != nil { + t.Fatal(err) + } + } + + root, err := tree.Flush(ctx) + if err != nil { + t.Fatal(err) + } + if root.String() != "bafy2bzaceamis23jp44ofm4fh6jwc4gkxlzhnvxrdw4zsn3v2fj6at6pf2m4y" { + t.Fatalf("state state Mismatch. Expected: bafy2bzaceamis23jp44ofm4fh6jwc4gkxlzhnvxrdw4zsn3v2fj6at6pf2m4y Actual: %s", root.String()) + } +} diff --git a/pkg/state/tree/testing.go b/pkg/state/tree/testing.go new file mode 100644 index 0000000000..7d8667e2bb --- /dev/null +++ b/pkg/state/tree/testing.go @@ -0,0 +1,147 @@ +package tree + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/builtin/account" + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/require" +) + +// NewFromString sets a state tree based on an int. +// +// TODO: we could avoid this if write a test cborStore that can map test cids to test states. +func NewFromString(t *testing.T, s string, store cbor.IpldStore) *State { + tree, err := NewStateWithBuiltinActor(t, store, StateTreeVersion0) + require.NoError(t, err) + + // create account + strAddr, err := address.NewSecp256k1Address([]byte(s)) + require.NoError(t, err) + + // add a account for t3 + AddAccount(t, tree, store, strAddr) + require.NoError(t, err) + return tree +} + +func NewStateWithBuiltinActor(t *testing.T, store cbor.IpldStore, ver StateTreeVersion) (*State, error) { + ctx := context.TODO() + tree, err := NewState(store, StateTreeVersion0) + require.NoError(t, err) + adtStore := &AdtStore{store} + + // create builtin init account + emptyMap := adt.MakeEmptyMap(adtStore) + emptyMapRoot, err := emptyMap.Root() + require.NoError(t, err) + initState := &init0.State{ + AddressMap: emptyMapRoot, + NextID: 20, + NetworkName: "test-net", + } + + initCodeID, err := store.Put(ctx, initState) + require.NoError(t, err) + initActor := &types.Actor{ + Code: builtin0.InitActorCodeID, + Head: initCodeID, + Nonce: 0, + Balance: abi.TokenAmount{}, + } + err = tree.SetActor(ctx, builtin0.InitActorAddr, initActor) + require.NoError(t, err) + return tree, nil +} + +func AddAccount(t *testing.T, tree *State, store cbor.IpldStore, addr address.Address) { + ctx := context.TODO() + adtStore := &AdtStore{store} + + initActor, _, err := tree.GetActor(ctx, builtin0.InitActorAddr) + require.NoError(t, err) + initState := &init0.State{} + err = adtStore.Get(ctx, initActor.Head, initState) + require.NoError(t, err) + // add a account for t3 + idAddr, err := initState.MapAddressToNewID(adtStore, addr) + require.NoError(t, err) + newInitStateId, err := store.Put(ctx, initState) //nolint + require.NoError(t, err) + initActor.Head = newInitStateId + err = tree.SetActor(ctx, builtin0.InitActorAddr, initActor) + require.NoError(t, err) + + emptyObject, err := store.Put(context.TODO(), []struct{}{}) + if err != nil { + panic(err) + } + accountActor := &types.Actor{ + Code: builtin0.AccountActorCodeID, + Head: emptyObject, + Nonce: 0, + Balance: abi.TokenAmount{}, + } + err = tree.SetActor(ctx, idAddr, accountActor) + require.NoError(t, err) + + // save t3 address + accountState := &account.State{Address: addr} + accountRoot, err := store.Put(context.TODO(), accountState) + if err != nil { + panic(err) + } + addrActor := &types.Actor{ + Code: builtin0.AccountActorCodeID, + Head: accountRoot, + Nonce: 0, + Balance: abi.TokenAmount{}, + } + err = tree.SetActor(context.Background(), addr, addrActor) + require.NoError(t, err) +} + +func UpdateAccount(t *testing.T, tree *State, addr address.Address, fn func(*types.Actor)) { + ctx := context.TODO() + actor, _, err := tree.GetActor(ctx, addr) + require.NoError(t, err) + fn(actor) + err = tree.SetActor(context.Background(), addr, actor) + require.NoError(t, err) +} + +// MustCommit flushes the state or panics if it can't. +func MustCommit(st State) cid.Cid { + cid, err := st.Flush(context.Background()) + if err != nil { + panic(err) + } + return cid +} + +// MustGetActor gets the actor or panics if it can't. +func MustGetActor(st State, a address.Address) (*types.Actor, bool) { + actor, found, err := st.GetActor(context.Background(), a) + if err != nil { + panic(err) + } + return actor, found +} + +// MustSetActor sets the actor or panics if it can't. +func MustSetActor(st State, address address.Address, actor *types.Actor) cid.Cid { + err := st.SetActor(context.Background(), address, actor) + if err != nil { + panic(err) + } + return MustCommit(st) +} diff --git a/pkg/state/view.go b/pkg/state/view.go new file mode 100644 index 0000000000..045d90b7d0 --- /dev/null +++ b/pkg/state/view.go @@ -0,0 +1,854 @@ +package state + +import ( + "context" + "fmt" + "strconv" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/pkg/errors" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + vmstate "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/account" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/datacap" + notinit "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + lminer "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + paychActor "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/verifreg" +) + +// Viewer builds state views from state root CIDs. +// remove tipset argument in many function of viewer +type Viewer struct { + ipldStore cbor.IpldStore +} + +// NewViewer creates a new state +func NewViewer(store cbor.IpldStore) *Viewer { + return &Viewer{store} +} + +// StateView returns a new state view. +func (c *Viewer) StateView(root cid.Cid) *View { + return NewView(c.ipldStore, root) +} + +// View is a read-only interface to a snapshot of application-level actor state. +// This object interprets the actor state, abstracting the concrete on-chain structures so as to +// hide the complications of protocol versions. +// Exported methods on this type avoid exposing concrete state structures (which may be subject to versioning) +// where possible. +type View struct { + ipldStore cbor.IpldStore + root cid.Cid +} + +// NewView creates a new state view +func NewView(store cbor.IpldStore, root cid.Cid) *View { + return &View{ + ipldStore: store, + root: root, + } +} + +// InitNetworkName Returns the network name from the init actor state. +func (v *View) InitNetworkName(ctx context.Context) (string, error) { + initState, err := v.LoadInitState(ctx) + if err != nil { + return "", err + } + return initState.NetworkName() +} + +// InitResolveAddress Returns ID address if public key address is given. +func (v *View) InitResolveAddress(ctx context.Context, a addr.Address) (addr.Address, error) { + if a.Protocol() == addr.ID { + return a, nil + } + + initState, err := v.LoadInitState(ctx) + if err != nil { + return addr.Undef, err + } + rAddr, found, err := initState.ResolveAddress(a) + if err != nil { + return addr.Undef, err + } + + if !found { + return addr.Undef, fmt.Errorf("not found resolve address") + } + + return rAddr, nil +} + +// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`. +func (v *View) GetMinerWorkerRaw(ctx context.Context, maddr addr.Address) (addr.Address, error) { + minerState, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return addr.Undef, err + } + + minerInfo, err := minerState.Info() + if err != nil { + return addr.Undef, err + } + return v.ResolveToKeyAddr(ctx, minerInfo.Worker) +} + +// MinerInfo returns info about the indicated miner +func (v *View) MinerInfo(ctx context.Context, maddr addr.Address, nv network.Version) (*lminer.MinerInfo, error) { + minerState, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, err + } + + info, err := minerState.Info() + if err != nil { + return nil, err + } + + return &info, nil +} + +// Loads sector info from miner state. +func (v *View) MinerSectorInfo(ctx context.Context, maddr addr.Address, sectorNum abi.SectorNumber) (*types.SectorOnChainInfo, error) { + minerState, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, err + } + + info, err := minerState.GetSector(sectorNum) + if err != nil { + return nil, err + } + + return info, nil +} + +// GetSectorsForWinningPoSt return sector of winning post challenge result +func (v *View) GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, maddr addr.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) { + mas, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %s", err) + } + + var provingSectors bitfield.BitField + if nv < network.Version7 { + allSectors, err := lminer.AllPartSectors(mas, lminer.Partition.AllSectors) + if err != nil { + return nil, fmt.Errorf("get all sectors: %v", err) + } + + faultySectors, err := lminer.AllPartSectors(mas, lminer.Partition.FaultySectors) + if err != nil { + return nil, fmt.Errorf("get faulty sectors: %v", err) + } + + provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors) + if err != nil { + return nil, fmt.Errorf("calc proving sectors: %v", err) + } + } else { + provingSectors, err = lminer.AllPartSectors(mas, lminer.Partition.ActiveSectors) + if err != nil { + return nil, fmt.Errorf("get active sectors sectors: %v", err) + } + } + + numProvSect, err := provingSectors.Count() + if err != nil { + return nil, fmt.Errorf("failed to count bits: %s", err) + } + + // TODO(review): is this right? feels fishy to me + if numProvSect == 0 { + return nil, nil + } + + info, err := mas.Info() + if err != nil { + return nil, fmt.Errorf("getting miner info: %s", err) + } + + mid, err := addr.IDFromAddress(maddr) + if err != nil { + return nil, fmt.Errorf("getting miner ID: %s", err) + } + + proofType, err := lminer.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType) + if err != nil { + return nil, fmt.Errorf("determining winning post proof type: %v", err) + } + + ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect) + if err != nil { + return nil, fmt.Errorf("generating winning post challenges: %s", err) + } + + iter, err := provingSectors.BitIterator() + if err != nil { + return nil, fmt.Errorf("iterating over proving sectors: %s", err) + } + + // Select winning sectors by _index_ in the all-sectors bitfield. + selectedSectors := bitfield.New() + prev := uint64(0) + for _, n := range ids { + sno, err := iter.Nth(n - prev) + if err != nil { + return nil, fmt.Errorf("iterating over proving sectors: %s", err) + } + selectedSectors.Set(sno) + prev = n + } + + sectors, err := mas.LoadSectors(&selectedSectors) + if err != nil { + return nil, fmt.Errorf("loading proving sectors: %s", err) + } + + out := make([]builtin.ExtendedSectorInfo, len(sectors)) + for i, sinfo := range sectors { + out[i] = builtin.ExtendedSectorInfo{ + SealProof: sinfo.SealProof, + SectorNumber: sinfo.SectorNumber, + SealedCID: sinfo.SealedCID, + SectorKey: sinfo.SectorKeyCID, + } + } + + return out, nil +} + +// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector +func (v *View) SectorPreCommitInfo(ctx context.Context, maddr addr.Address, sid abi.SectorNumber) (*types.SectorPreCommitOnChainInfo, error) { + mas, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("(get sset) failed to load miner actor: %v", err) + } + + return mas.GetPrecommittedSector(sid) +} + +// StateSectorPartition finds deadline/partition with the specified sector +func (v *View) StateSectorPartition(ctx context.Context, maddr addr.Address, sectorNumber abi.SectorNumber) (*lminer.SectorLocation, error) { + mas, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("(get sset) failed to load miner actor: %v", err) + } + + return mas.FindSector(sectorNumber) +} + +// MinerDeadlineInfo returns information relevant to the current proving deadline +func (v *View) MinerDeadlineInfo(ctx context.Context, maddr addr.Address, epoch abi.ChainEpoch) (index uint64, open, close, challenge abi.ChainEpoch, _ error) { + minerState, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return 0, 0, 0, 0, err + } + + deadlineInfo, err := minerState.DeadlineInfo(epoch) + if err != nil { + return 0, 0, 0, 0, err + } + + return deadlineInfo.Index, deadlineInfo.Open, deadlineInfo.Close, deadlineInfo.Challenge, nil +} + +// MinerExists Returns true iff the miner exists. +func (v *View) MinerExists(ctx context.Context, maddr addr.Address) (bool, error) { + _, err := v.LoadMinerState(ctx, maddr) + if err == nil { + return true, nil + } + if err == types.ErrActorNotFound { + return false, nil + } + return false, err +} + +// MinerGetPrecommittedSector Looks up info for a miners precommitted sector. +// NOTE: exposes on-chain structures directly for storage FSM API. +func (v *View) MinerGetPrecommittedSector(ctx context.Context, maddr addr.Address, sectorNum abi.SectorNumber) (*types.SectorPreCommitOnChainInfo, bool, error) { + minerState, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, false, err + } + + info, err := minerState.GetPrecommittedSector(sectorNum) + if err != nil { + return nil, false, err + } + return info, true, nil +} + +// MarketEscrowBalance looks up a token amount in the escrow table for the given address +func (v *View) MarketEscrowBalance(ctx context.Context, addr addr.Address) (found bool, amount abi.TokenAmount, err error) { + marketState, err := v.LoadMarketState(ctx) + if err != nil { + return false, abi.NewTokenAmount(0), err + } + + state, err := marketState.EscrowTable() + if err != nil { + return false, abi.NewTokenAmount(0), err + } + + amount, err = state.Get(addr) + if err != nil { + return false, abi.NewTokenAmount(0), err + } + + return true, amount, nil +} + +// NOTE: exposes on-chain structures directly for storage FSM interface. +func (v *View) MarketDealProposal(ctx context.Context, dealID abi.DealID) (market.DealProposal, error) { + marketState, err := v.LoadMarketState(ctx) + if err != nil { + return market.DealProposal{}, err + } + + proposals, err := marketState.Proposals() + if err != nil { + return market.DealProposal{}, err + } + + // map deals to pieceInfo + proposal, bFound, err := proposals.Get(dealID) + if err != nil { + return market.DealProposal{}, err + } + + if !bFound { + return market.DealProposal{}, fmt.Errorf("deal %d not found", dealID) + } + return *proposal, nil +} + +// NOTE: exposes on-chain structures directly for storage FSM and market module interfaces. +func (v *View) MarketDealState(ctx context.Context, dealID abi.DealID) (*market.DealState, bool, error) { + marketState, err := v.LoadMarketState(ctx) + if err != nil { + return nil, false, err + } + + deals, err := marketState.States() + if err != nil { + return nil, false, err + } + + return deals.Get(dealID) +} + +// NOTE: exposes on-chain structures directly for market interface. +// The callback receives a pointer to a transient object; take a copy or drop the reference outside the callback. +func (v *View) MarketDealStatesForEach(ctx context.Context, f func(id abi.DealID, state *market.DealState) error) error { + marketState, err := v.LoadMarketState(ctx) + if err != nil { + return err + } + + deals, err := marketState.States() + if err != nil { + return err + } + + ff := func(id abi.DealID, ds market.DealState) error { + return f(id, &ds) + } + return deals.ForEach(ff) +} + +// StateVerifiedClientStatus returns the data cap for the given address. +// Returns nil if there is no entry in the data cap table for the +// address. +func (v *View) StateVerifiedClientStatus(ctx context.Context, addr addr.Address) (abi.StoragePower, error) { + act, err := v.loadActor(ctx, verifreg.Address) + if err != nil { + return abi.NewStoragePower(0), err + } + + state, err := verifreg.Load(adt.WrapStore(ctx, v.ipldStore), act) + if err != nil { + return abi.NewStoragePower(0), err + } + + found, storagePower, err := state.VerifiedClientDataCap(addr) + if err != nil { + return abi.NewStoragePower(0), err + } + + if !found { + return abi.NewStoragePower(0), errors.New("address not found") + } + + return storagePower, nil +} + +// StateMarketStorageDeal returns information about the indicated deal +func (v *View) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID) (*types.MarketDeal, error) { + state, err := v.LoadMarketState(ctx) + if err != nil { + return nil, err + } + + dealProposals, err := state.Proposals() + if err != nil { + return nil, err + } + + dealProposal, found, err := dealProposals.Get(dealID) + if err != nil { + return nil, err + } + + if !found { + return nil, errors.New("deal proposal not found") + } + + dealStates, err := state.States() + if err != nil { + return nil, err + } + + dealState, found, err := dealStates.Get(dealID) + if err != nil { + return nil, err + } + + if !found { + return nil, errors.New("deal state not found") + } + + return &types.MarketDeal{ + Proposal: *dealProposal, + State: *dealState, + }, nil +} + +// Returns the storage power actor's values for network total power. +func (v *View) PowerNetworkTotal(ctx context.Context) (*NetworkPower, error) { + st, err := v.LoadPowerActor(ctx) + if err != nil { + return nil, err + } + + tp, err := st.TotalPower() + if err != nil { + return nil, err + } + + minPowerMinerCount, minerCount, err := st.MinerCounts() + if err != nil { + return nil, err + } + + return &NetworkPower{ + RawBytePower: tp.RawBytePower, + QualityAdjustedPower: tp.QualityAdjPower, + MinerCount: int64(minerCount), + MinPowerMinerCount: int64(minPowerMinerCount), + }, nil +} + +// Returns the power of a miner's committed sectors. +func (v *View) MinerClaimedPower(ctx context.Context, miner addr.Address) (raw, qa abi.StoragePower, err error) { + st, err := v.LoadPowerActor(ctx) + if err != nil { + return big.Zero(), big.Zero(), err + } + + p, found, err := st.MinerPower(miner) + if err != nil { + return big.Zero(), big.Zero(), err + } + + if !found { + return big.Zero(), big.Zero(), errors.New("miner not found") + } + + return p.RawBytePower, p.QualityAdjPower, nil +} + +// MinerNominalPowerMeetsConsensusMinimum return whether miner meet consensus minmum power +func (v *View) MinerNominalPowerMeetsConsensusMinimum(ctx context.Context, addr addr.Address) (bool, error) { + st, err := v.LoadPowerActor(ctx) + if err != nil { + return false, err + } + + return st.MinerNominalPowerMeetsConsensusMinimum(addr) +} + +// PaychActorParties returns the From and To addresses for the given payment channel +func (v *View) PaychActorParties(ctx context.Context, paychAddr addr.Address) (from, to addr.Address, err error) { + a, err := v.loadActor(ctx, paychAddr) + if err != nil { + return addr.Undef, addr.Undef, err + } + + state, err := paychActor.Load(adt.WrapStore(ctx, v.ipldStore), a) + if err != nil { + return addr.Undef, addr.Undef, err + } + + from, err = state.From() + if err != nil { + return addr.Undef, addr.Undef, err + } + + to, err = state.To() + if err != nil { + return addr.Undef, addr.Undef, err + } + + return from, to, nil +} + +// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period +// and returns the deadline-related calculations. +func (v *View) StateMinerProvingDeadline(ctx context.Context, addr addr.Address, ts *types.TipSet) (*dline.Info, error) { + mas, err := v.LoadMinerState(ctx, addr) + if err != nil { + return nil, errors.WithMessage(err, "failed to get proving dealline") + } + + height := ts.Height() + di, err := mas.DeadlineInfo(height) + if err != nil { + return nil, fmt.Errorf("failed to get deadline info: %v", err) + } + + return di.NextNotElapsed(), nil +} + +// StateSectorExpiration returns epoch at which given sector will expire +func (v *View) StateSectorExpiration(ctx context.Context, maddr addr.Address, sectorNumber abi.SectorNumber, key types.TipSetKey) (*lminer.SectorExpiration, error) { + mas, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, err + } + return mas.GetSectorExpiration(sectorNumber) +} + +// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent +func (v *View) StateMinerAvailableBalance(ctx context.Context, maddr addr.Address, ts *types.TipSet) (big.Int, error) { + resolvedAddr, err := v.InitResolveAddress(ctx, maddr) + if err != nil { + return big.Int{}, err + } + actor, err := v.loadActor(ctx, resolvedAddr) + if err != nil { + return big.Int{}, err + } + + mas, err := lminer.Load(adt.WrapStore(context.TODO(), v.ipldStore), actor) + if err != nil { + return big.Int{}, fmt.Errorf("failed to load miner actor state: %v", err) + } + + height := ts.Height() + vested, err := mas.VestedFunds(height) + if err != nil { + return big.Int{}, err + } + + abal, err := mas.AvailableBalance(actor.Balance) + if err != nil { + return big.Int{}, err + } + + return big.Add(abal, vested), nil +} + +// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor +func (v *View) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]addr.Address, error) { + powState, err := v.LoadPowerActor(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load power actor state: %v", err) + } + + return powState.ListAllMiners() +} + +// StateMinerPower returns the power of the indicated miner +func (v *View) StateMinerPower(ctx context.Context, maddr addr.Address, tsk types.TipSetKey) (power.Claim, power.Claim, bool, error) { + pas, err := v.LoadPowerActor(ctx) + if err != nil { + return power.Claim{}, power.Claim{}, false, fmt.Errorf("(get sset) failed to load power actor state: %v", err) + } + + tpow, err := pas.TotalPower() + if err != nil { + return power.Claim{}, power.Claim{}, false, err + } + + var mpow power.Claim + var minpow bool + if maddr != addr.Undef { + var found bool + mpow, found, err = pas.MinerPower(maddr) + if err != nil || !found { + // TODO: return an error when not found? + return power.Claim{}, tpow, false, err + } + + minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr) + if err != nil { + return power.Claim{}, power.Claim{}, false, err + } + } + + return mpow, tpow, minpow, nil +} + +// StateMarketDeals returns information about every deal in the Storage Market +func (v *View) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]*types.MarketDeal, error) { + out := map[string]*types.MarketDeal{} + + state, err := v.LoadMarketState(ctx) + if err != nil { + return nil, err + } + + da, err := state.Proposals() + if err != nil { + return nil, err + } + + sa, err := state.States() + if err != nil { + return nil, err + } + + if err := da.ForEach(func(dealID abi.DealID, d market.DealProposal) error { + s, found, err := sa.Get(dealID) + if err != nil { + return fmt.Errorf("failed to get state for deal in proposals array: %v", err) + } else if !found { + s = market.EmptyDealState() + } + out[strconv.FormatInt(int64(dealID), 10)] = &types.MarketDeal{ + Proposal: d, + State: *s, + } + return nil + }); err != nil { + return nil, err + } + return out, nil +} + +// StateMinerActiveSectors returns info about sectors that a given miner is actively proving. +func (v *View) StateMinerActiveSectors(ctx context.Context, maddr addr.Address, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) { + mas, err := v.LoadMinerState(ctx, maddr) + if err != nil { + return nil, fmt.Errorf("failed to load miner actor state: %v", err) + } + activeSectors, err := lminer.AllPartSectors(mas, lminer.Partition.ActiveSectors) + if err != nil { + return nil, fmt.Errorf("merge partition active sets: %v", err) + } + return mas.LoadSectors(&activeSectors) +} + +// GetFilLocked return all locked fil amount +func (v *View) GetFilLocked(ctx context.Context, st vmstate.Tree) (abi.TokenAmount, error) { + filMarketLocked, err := getFilMarketLocked(ctx, v.ipldStore, st) + if err != nil { + return big.Zero(), fmt.Errorf("failed to get filMarketLocked: %v", err) + } + + powerState, err := v.LoadPowerActor(ctx) + if err != nil { + return big.Zero(), fmt.Errorf("failed to get filPowerLocked: %v", err) + } + + filPowerLocked, err := powerState.TotalLocked() + if err != nil { + return big.Zero(), fmt.Errorf("failed to get filPowerLocked: %v", err) + } + + return big.Add(filMarketLocked, filPowerLocked), nil +} + +// LoadActor load actor from tree +func (v *View) LoadActor(ctx context.Context, address addr.Address) (*types.Actor, error) { + return v.loadActor(ctx, address) +} + +// ResolveToKeyAddress is similar to `vm.ResolveToKeyAddr` but does not allow `Actor` type of addresses. +// Uses the `TipSet` `ts` to generate the VM state. +func (v *View) ResolveToKeyAddr(ctx context.Context, address addr.Address) (addr.Address, error) { + if address.Protocol() == addr.BLS || address.Protocol() == addr.SECP256K1 { + return address, nil + } + + act, err := v.LoadActor(context.TODO(), address) + if err != nil { + return addr.Undef, fmt.Errorf("failed to find actor: %s", address) + } + + aast, err := account.Load(adt.WrapStore(context.TODO(), v.ipldStore), act) + if err != nil { + return addr.Undef, fmt.Errorf("failed to get account actor state for %s: %v", address, err) + } + + return aast.PubkeyAddress() +} + +func (v *View) LoadInitState(ctx context.Context) (notinit.State, error) { + actr, err := v.loadActor(ctx, notinit.Address) + if err != nil { + return nil, err + } + + return notinit.Load(adt.WrapStore(ctx, v.ipldStore), actr) +} + +// LoadPaychState get pay channel state for actor +func (v *View) LoadPaychState(ctx context.Context, actor *types.Actor) (paychActor.State, error) { + return paychActor.Load(adt.WrapStore(context.TODO(), v.ipldStore), actor) +} + +// LoadMinerState return miner state +func (v *View) LoadMinerState(ctx context.Context, maddr addr.Address) (lminer.State, error) { + resolvedAddr, err := v.InitResolveAddress(ctx, maddr) + if err != nil { + return nil, err + } + actr, err := v.loadActor(ctx, resolvedAddr) + if err != nil { + return nil, err + } + + return lminer.Load(adt.WrapStore(context.TODO(), v.ipldStore), actr) +} + +func (v *View) LoadPowerActor(ctx context.Context) (power.State, error) { + actr, err := v.loadActor(ctx, power.Address) + if err != nil { + return nil, err + } + + return power.Load(adt.WrapStore(ctx, v.ipldStore), actr) +} + +func (v *View) LoadVerifregActor(ctx context.Context) (verifreg.State, error) { + actr, err := v.loadActor(ctx, verifreg.Address) + if err != nil { + return nil, err + } + + return verifreg.Load(adt.WrapStore(ctx, v.ipldStore), actr) +} + +// nolint +func (v *View) LoadRewardState(ctx context.Context) (reward.State, error) { + actr, err := v.loadActor(ctx, reward.Address) + if err != nil { + return nil, err + } + + return reward.Load(adt.WrapStore(ctx, v.ipldStore), actr) +} + +// nolint +func (v *View) LoadPowerState(ctx context.Context) (power.State, error) { + actr, err := v.loadActor(ctx, power.Address) + if err != nil { + return nil, err + } + + return power.Load(adt.WrapStore(ctx, v.ipldStore), actr) +} + +func (v *View) LoadMarketState(ctx context.Context) (market.State, error) { + actr, err := v.loadActor(ctx, market.Address) + if err != nil { + return nil, err + } + + return market.Load(adt.WrapStore(ctx, v.ipldStore), actr) +} + +func (v *View) LoadDatacapState(ctx context.Context) (datacap.State, error) { + actr, err := v.loadActor(ctx, datacap.Address) + if err != nil { + return nil, err + } + + return datacap.Load(adt.WrapStore(ctx, v.ipldStore), actr) +} + +// nolint +func (v *View) LoadAccountState(ctx context.Context, a addr.Address) (account.State, error) { + resolvedAddr, err := v.InitResolveAddress(ctx, a) + if err != nil { + return nil, err + } + actr, err := v.loadActor(ctx, resolvedAddr) + if err != nil { + return nil, err + } + + return account.Load(adt.WrapStore(context.TODO(), v.ipldStore), actr) +} + +// loadActor load actor of address in db +func (v *View) loadActor(ctx context.Context, address addr.Address) (*types.Actor, error) { + tree, err := vmstate.LoadState(ctx, v.ipldStore, v.root) + if err != nil { + return nil, err + } + actor, found, err := tree.GetActor(ctx, address) + if err != nil { + return nil, err + } + if !found { + return nil, errors.Wrapf(types.ErrActorNotFound, "address is :%s", address) + } + + return actor, err +} + +func getFilMarketLocked(ctx context.Context, ipldStore cbor.IpldStore, st vmstate.Tree) (abi.TokenAmount, error) { + mactor, found, err := st.GetActor(ctx, market.Address) + if !found || err != nil { + return big.Zero(), fmt.Errorf("failed to load market actor: %v", err) + } + + mst, err := market.Load(adt.WrapStore(ctx, ipldStore), mactor) + if err != nil { + return big.Zero(), fmt.Errorf("failed to load market state: %v", err) + } + + return mst.TotalLocked() +} + +// LookupID retrieves the ID address of the given address +func (v *View) LookupID(ctx context.Context, address addr.Address) (addr.Address, error) { + sTree, err := vmstate.LoadState(ctx, v.ipldStore, v.root) + if err != nil { + return addr.Address{}, err + } + + return sTree.LookupID(address) +} diff --git a/pkg/state/view_test.go b/pkg/state/view_test.go new file mode 100644 index 0000000000..2b14a3fe29 --- /dev/null +++ b/pkg/state/view_test.go @@ -0,0 +1,84 @@ +// stm: #unit +package state_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/stretchr/testify/assert" +) + +func setupTestMinerView(t *testing.T, numMiners int) (*state.View, map[address.Address]*crypto.KeyInfo) { + tf.UnitTest(t) + ctx := context.Background() + numCommittedSectors := uint64(19) + kis := testhelpers.MustGenerateBLSKeyInfo(numMiners, 0) + + kiMap := make(map[address.Address]*crypto.KeyInfo) + for _, k := range kis { + addr, err := k.Address() + assert.NoError(t, err) + kiMap[addr] = &k + } + + store, _, root := requireMinerWithNumCommittedSectors(ctx, t, numCommittedSectors, kis) + return state.NewView(store, root), kiMap +} + +func TestView(t *testing.T) { + numMiners := 2 + view, keyMap := setupTestMinerView(t, numMiners) + ctx := context.Background() + + miners, err := view.StateListMiners(ctx, types.EmptyTSK) + assert.NoError(t, err) + assert.Equal(t, len(miners), numMiners) + + for _, m := range miners { + // stm: @STATE_VIEW_MINER_EXISTS_001 + exist, err := view.MinerExists(ctx, m) + assert.NoError(t, err) + assert.True(t, exist) + + // stm: @STATE_VIEW_GET_MINER_INFO_001 + minerInfo, err := view.MinerInfo(ctx, m, network.Version17) + assert.NoError(t, err) + + ownerPkAddress, err := view.ResolveToKeyAddr(ctx, minerInfo.Owner) + assert.NoError(t, err) + _, find := keyMap[ownerPkAddress] + assert.True(t, find) + + // stm: @STATE_VIEW_GET_MINER_SECTOR_INFO_001 + sectorInfo, err := view.MinerSectorInfo(ctx, m, 0) + assert.NoError(t, err) + assert.Equal(t, sectorInfo.SectorNumber, abi.SectorNumber(0)) + + // stm: @STATE_VIEW_SECTOR_PRE_COMMIT_INFO_001 + _, err = view.SectorPreCommitInfo(ctx, m, 0) + assert.NoError(t, err) + + // stm: @STATE_VIEW_MINER_GET_PRECOMMITED_SECTOR + _, find, err = view.MinerGetPrecommittedSector(ctx, m, abi.SectorNumber(0)) + assert.NoError(t, err) + assert.True(t, find) + + // stm: @STATE_VIEW_STATE_SECTOR_PARTITION_001 + _, err = view.StateSectorPartition(ctx, m, 0) + assert.NoError(t, err) + + // stm: @STATE_VIEW_DEADLINE_INFO_001 + _, _, _, _, err = view.MinerDeadlineInfo(ctx, m, abi.ChainEpoch(1)) + assert.NoError(t, err) + } +} diff --git a/pkg/statemanger/call.go b/pkg/statemanger/call.go new file mode 100644 index 0000000000..8b9d1e81b3 --- /dev/null +++ b/pkg/statemanger/call.go @@ -0,0 +1,276 @@ +package statemanger + +import ( + "context" + "errors" + "fmt" + + "github.com/filecoin-project/venus/pkg/fvm" + + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + acrypto "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "go.opencensus.io/trace" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm" +) + +// CallWithGas used to estimate message gaslimit, for each incoming message ,should execute after priorMsg in mpool +func (s *Stmgr) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet) (*vm.Ret, error) { + var ( + err error + stateRoot cid.Cid + view *state.View + ) + + // Copy the message as we'll be modifying the nonce. + msgCopy := *msg + msg = &msgCopy + + if ts == nil { + ts = s.cs.GetHead() + // Search back till we find a height with no fork, or we reach the beginning. + // We need the _previous_ height to have no fork, because we'll + // run the fork logic in `sm.TipSetState`. We need the _current_ + // height to have no fork, because we'll run it inside this + // function before executing the given message. + for ts.Height() > 0 { + pts, err := s.cs.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to find a non-forking epoch: %w", err) + } + if !s.fork.HasExpensiveForkBetween(pts.Height(), ts.Height()+1) { + break + } + + ts = pts + } + } else if ts.Height() > 0 { + pts, err := s.cs.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to find a non-forking epoch: %w", err) + } + if s.fork.HasExpensiveForkBetween(pts.Height(), ts.Height()+1) { + return nil, fork.ErrExpensiveFork + } + } + + // Since we're simulating a future message, pretend we're applying it in the "next" tipset + vmHeight := ts.Height() + 1 + + if stateRoot, view, err = s.StateView(ctx, ts); err != nil { + return nil, err + } + + // Technically, the tipset we're passing in here should be ts+1, but that may not exist + stateRoot, err = s.fork.HandleStateForks(ctx, stateRoot, ts.Height(), ts) + if err != nil { + return nil, fmt.Errorf("failed to handle fork: %w", err) + } + + buffStore := blockstoreutil.NewTieredBstore(s.cs.Blockstore(), blockstoreutil.NewTemporarySync()) + vmOption := vm.VmOption{ + CircSupplyCalculator: func(ctx context.Context, epoch abi.ChainEpoch, tree tree.Tree) (abi.TokenAmount, error) { + cs, err := s.cs.GetCirculatingSupplyDetailed(ctx, epoch, tree) + if err != nil { + return abi.TokenAmount{}, err + } + return cs.FilCirculating, nil + }, + LookbackStateGetter: vmcontext.LookbackStateGetterForTipset(ctx, s.cs, s.fork, ts), + NetworkVersion: s.fork.GetNetworkVersion(ctx, ts.Height()+1), + Rnd: consensus.NewHeadRandomness(s.rnd, ts.Key()), + BaseFee: ts.At(0).ParentBaseFee, + Epoch: vmHeight, + GasPriceSchedule: s.gasSchedule, + PRoot: stateRoot, + Bsstore: buffStore, + SysCallsImpl: s.syscallsImpl, + Fork: s.fork, + Tracing: true, + } + + vmi, err := fvm.NewVM(ctx, vmOption) + if err != nil { + return nil, err + } + + for i, m := range priorMsgs { + _, err := vmi.ApplyMessage(ctx, m) + if err != nil { + return nil, fmt.Errorf("applying prior message (%d): %v", i, err) + } + } + + // We flush to get the VM's view of the state tree after applying the above messages + // This is needed to get the correct nonce from the actor state to match the VM + stateRoot, err = vmi.Flush(ctx) + if err != nil { + return nil, fmt.Errorf("flushing vm: %w", err) + } + + stTree, err := tree.LoadState(ctx, cbor.NewCborStore(buffStore), stateRoot) + if err != nil { + return nil, fmt.Errorf("loading state tree: %w", err) + } + + fromActor, found, err := stTree.GetActor(ctx, msg.VMMessage().From) + if err != nil { + return nil, fmt.Errorf("get actor failed: %s", err) + } + if !found { + return nil, errors.New("actor not found") + } + msg.Nonce = fromActor.Nonce + + fromKey, err := view.ResolveToKeyAddr(ctx, msg.VMMessage().From) + if err != nil { + return nil, fmt.Errorf("could not resolve key: %v", err) + } + + var msgApply types.ChainMsg + switch fromKey.Protocol() { + case address.BLS: + msgApply = msg + case address.SECP256K1: + msgApply = &types.SignedMessage{ + Message: *msg, + Signature: acrypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: make([]byte, 65), + }, + } + } + + // If the fee cap is set to zero, make gas free. + if msg.GasFeeCap.NilOrZero() { + // Now estimate with a new VM with no base fee. + vmOption.BaseFee = big.Zero() + vmOption.PRoot = stateRoot + + vmi, err = fvm.NewVM(ctx, vmOption) + if err != nil { + return nil, fmt.Errorf("failed to set up estimation vm: %w", err) + } + } + + return vmi.ApplyMessage(ctx, msgApply) +} + +// Call used for api invoke to compute a msg base on specify tipset, if the tipset is null, use latest tipset in db +func (s *Stmgr) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*vm.Ret, error) { + ctx, span := trace.StartSpan(ctx, "statemanager.Call") + defer span.End() + + var pheight abi.ChainEpoch = -1 + + // If no tipset is provided, try to find one without a fork. + var err error + if ts == nil { + ts = s.cs.GetHead() + + // Search back till we find a height with no fork, or we reach the beginning. + for ts.Height() > 0 { + pts, err := s.cs.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to find a non-forking epoch: %w", err) + } + if !s.fork.HasExpensiveFork(ctx, pts.Height()) { + pheight = pts.Height() + break + } + ts = pts + } + } else if ts.Height() > 0 { + pts, err := s.cs.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to load parent tipset: %w", err) + } + pheight = pts.Height() + if s.fork.HasExpensiveFork(ctx, pheight) { + return nil, fork.ErrExpensiveFork + } + } else { + // We can't get the parent tipset in this case. + pheight = ts.Height() - 1 + } + + // Since we're simulating a future message, pretend we're applying it in the "next" tipset + vmHeight := pheight + 1 + bstate := ts.At(0).ParentStateRoot + + // Run the (not expensive) migration. + bstate, err = s.fork.HandleStateForks(ctx, bstate, pheight, ts) + if err != nil { + return nil, fmt.Errorf("failed to handle fork: %v", err) + } + + vmOption := vm.VmOption{ + CircSupplyCalculator: func(ctx context.Context, epoch abi.ChainEpoch, tree tree.Tree) (abi.TokenAmount, error) { + dertail, err := s.cs.GetCirculatingSupplyDetailed(ctx, epoch, tree) + if err != nil { + return abi.TokenAmount{}, err + } + return dertail.FilCirculating, nil + }, + LookbackStateGetter: vmcontext.LookbackStateGetterForTipset(ctx, s.cs, s.fork, ts), + NetworkVersion: s.fork.GetNetworkVersion(ctx, pheight+1), + Rnd: consensus.NewHeadRandomness(s.rnd, ts.Key()), + BaseFee: types.NewInt(0), + Epoch: vmHeight, + GasPriceSchedule: s.gasSchedule, + Fork: s.fork, + PRoot: ts.At(0).ParentStateRoot, + Bsstore: s.cs.Blockstore(), + SysCallsImpl: s.syscallsImpl, + Tracing: true, + } + + v, err := fvm.NewVM(ctx, vmOption) + if err != nil { + return nil, err + } + + if msg.GasLimit == 0 { + msg.GasLimit = constants.BlockGasLimit + } + + if msg.GasFeeCap == types.EmptyTokenAmount { + msg.GasFeeCap = abi.NewTokenAmount(0) + } + + if msg.GasPremium == types.EmptyTokenAmount { + msg.GasPremium = abi.NewTokenAmount(0) + } + + if msg.Value == types.EmptyTokenAmount { + msg.Value = abi.NewTokenAmount(0) + } + + st, err := tree.LoadState(ctx, cbor.NewCborStore(s.cs.Blockstore()), bstate) + if err != nil { + return nil, fmt.Errorf("loading state: %v", err) + } + + fromActor, found, err := st.GetActor(ctx, msg.From) + if err != nil || !found { + return nil, fmt.Errorf("call raw get actor: %s", err) + } + + msg.Nonce = fromActor.Nonce + + return v.ApplyImplicitMessage(ctx, msg) +} diff --git a/pkg/statemanger/state_manger.go b/pkg/statemanger/state_manger.go new file mode 100644 index 0000000000..de8b99bc5b --- /dev/null +++ b/pkg/statemanger/state_manger.go @@ -0,0 +1,453 @@ +package statemanger + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/fork" + appstate "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/market" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "go.opencensus.io/trace" +) + +// stateManagerAPI defines the methods needed from StateManager +// todo remove this code and add private interface in market and paychanel package +type IStateManager interface { + ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) + GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) + Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*vm.Ret, error) + GetMarketState(ctx context.Context, ts *types.TipSet) (market.State, error) +} + +type stateComputeResult struct { + stateRoot, receipt cid.Cid +} + +var _ IStateManager = &Stmgr{} + +type Stmgr struct { + cs *chain.Store + cp consensus.StateTransformer + rnd consensus.ChainRandomness + + fork fork.IFork + gasSchedule *gas.PricesSchedule + syscallsImpl vm.SyscallsImpl + + // Compute StateRoot parallel safe + stCache map[types.TipSetKey]stateComputeResult + chsWorkingOn map[types.TipSetKey]chan struct{} + stLk sync.Mutex + + fStop chan struct{} + fStopLk sync.Mutex + + log *logging.ZapEventLogger +} + +func NewStateManger(cs *chain.Store, cp consensus.StateTransformer, + rnd consensus.ChainRandomness, fork fork.IFork, gasSchedule *gas.PricesSchedule, + syscallsImpl vm.SyscallsImpl, +) *Stmgr { + logName := "statemanager" + + defer func() { + _ = logging.SetLogLevel(logName, "info") + }() + + return &Stmgr{ + cs: cs, fork: fork, cp: cp, rnd: rnd, + gasSchedule: gasSchedule, + syscallsImpl: syscallsImpl, + log: logging.Logger(logName), + stCache: make(map[types.TipSetKey]stateComputeResult), + chsWorkingOn: make(map[types.TipSetKey]chan struct{}, 1), + } +} + +func (s *Stmgr) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + switch addr.Protocol() { + case address.BLS, address.SECP256K1: + return addr, nil + case address.Actor: + return address.Undef, errors.New("cannot resolve actor address to key address") + default: + } + if ts == nil { + ts = s.cs.GetHead() + } + _, view, err := s.ParentStateView(ctx, ts) + if err != nil { + return address.Undef, err + } + return view.ResolveToKeyAddr(ctx, addr) +} + +func (s *Stmgr) GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) { + _, view, err := s.ParentStateView(ctx, ts) + if err != nil { + return nil, nil, err + } + act, err := view.LoadActor(ctx, addr) + if err != nil { + return nil, nil, err + } + actState, err := view.LoadPaychState(ctx, act) + if err != nil { + return nil, nil, err + } + return act, actState, nil +} + +func (s *Stmgr) GetMarketState(ctx context.Context, ts *types.TipSet) (market.State, error) { + _, view, err := s.ParentStateView(ctx, ts) + if err != nil { + return nil, err + } + actState, err := view.LoadMarketState(ctx) + if err != nil { + return nil, err + } + return actState, nil +} + +func (s *Stmgr) ParentStateTsk(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, *tree.State, error) { + ts, err := s.cs.GetTipSet(ctx, tsk) + if err != nil { + return nil, nil, fmt.Errorf("loading tipset %s: %w", tsk, err) + } + return s.ParentState(ctx, ts) +} + +func (s *Stmgr) ParentState(ctx context.Context, ts *types.TipSet) (*types.TipSet, *tree.State, error) { + if ts == nil { + ts = s.cs.GetHead() + } + parent, err := s.cs.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, nil, fmt.Errorf("find tipset(%s) parent failed:%w", + ts.Key().String(), err) + } + + if stateRoot, _, err := s.RunStateTransition(ctx, parent); err != nil { + return nil, nil, fmt.Errorf("runstateTransition failed:%w", err) + } else if !stateRoot.Equals(ts.At(0).ParentStateRoot) { + return nil, nil, fmt.Errorf("runstateTransition error, %w", consensus.ErrStateRootMismatch) + } + + state, err := tree.LoadState(ctx, s.cs.Store(ctx), ts.At(0).ParentStateRoot) + return parent, state, err +} + +func (s *Stmgr) TipsetStateTsk(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, *tree.State, error) { + ts, err := s.cs.GetTipSet(ctx, tsk) + if err != nil { + return nil, nil, fmt.Errorf("load tipset(%s) failed:%v", + tsk.String(), err) + } + stat, err := s.TipsetState(ctx, ts) + if err != nil { + return nil, nil, fmt.Errorf("load tipset(%s, %d) state failed:%v", + ts.String(), ts.Height(), err) + } + return ts, stat, nil +} + +func (s *Stmgr) TipsetState(ctx context.Context, ts *types.TipSet) (*tree.State, error) { + root, _, err := s.RunStateTransition(ctx, ts) + if err != nil { + return nil, err + } + return tree.LoadState(ctx, s.cs.Store(ctx), root) +} + +// deprecated: this implementation needs more considerations +func (s *Stmgr) Rollback(ctx context.Context, pts, cts *types.TipSet) error { + s.log.Infof("rollback chain head from(%d) to a valid tipset", pts.Height()) +redo: + s.stLk.Lock() + if err := s.cs.DeleteTipSetMetadata(ctx, pts); err != nil { + s.stLk.Unlock() + return err + } + if err := s.cs.SetHead(ctx, pts); err != nil { + s.stLk.Unlock() + return err + } + s.stLk.Unlock() + + if root, _, err := s.RunStateTransition(ctx, pts); err != nil { + return err + } else if !root.Equals(cts.At(0).ParentStateRoot) { + cts = pts + if pts, err = s.cs.GetTipSet(ctx, cts.Parents()); err != nil { + return err + } + goto redo + } + return nil +} + +func (s *Stmgr) RunStateTransition(ctx context.Context, ts *types.TipSet) (root cid.Cid, receipts cid.Cid, err error) { + if nil != s.stopFlag(false) { + return cid.Undef, cid.Undef, fmt.Errorf("state manager is stopping") + } + ctx, span := trace.StartSpan(ctx, "Exected.RunStateTransition") + defer span.End() + + key := ts.Key() + s.stLk.Lock() + + workingCh, exist := s.chsWorkingOn[key] + + if exist { + s.stLk.Unlock() + waitDur := time.Second * 10 + i := 0 + longTimeWait: + select { + case <-workingCh: + s.stLk.Lock() + case <-ctx.Done(): + return cid.Undef, cid.Undef, ctx.Err() + case <-time.After(waitDur): + i++ + s.log.Warnf("waiting runstatetransition(%d, %s) for %s", ts.Height(), ts.Key().String(), (waitDur * time.Duration(i)).String()) + goto longTimeWait + } + } + + if meta, _ := s.cs.GetTipsetMetadata(ctx, ts); meta != nil { + s.stLk.Unlock() + return meta.TipSetStateRoot, meta.TipSetReceipts, nil + } + + workingCh = make(chan struct{}) + s.chsWorkingOn[key] = workingCh + s.stLk.Unlock() + + defer func() { + s.stLk.Lock() + delete(s.chsWorkingOn, key) + if f := s.stopFlag(false); f != nil && len(s.chsWorkingOn) == 0 { + f <- struct{}{} + } + if err == nil { + err = s.cs.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ + TipSetStateRoot: root, TipSet: ts, TipSetReceipts: receipts, + }) + } + s.stLk.Unlock() + close(workingCh) + }() + + if ts.Height() == 0 { + return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil + } + + if root, receipts, err = s.cp.RunStateTransition(ctx, ts); err != nil { + return cid.Undef, cid.Undef, err + } + + return root, receipts, nil +} + +// ctx context.Context, ts *types.TipSet, addr address.Address +func (s *Stmgr) GetActorAtTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { + ts, err := s.cs.GetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + return s.GetActorAt(ctx, addr, ts) +} + +func (s *Stmgr) GetActorAt(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, error) { + if addr.Empty() { + return nil, types.ErrActorNotFound + } + + _, state, err := s.ParentState(ctx, ts) + if err != nil { + return nil, err + } + + actor, find, err := state.GetActor(ctx, addr) + if err != nil { + return nil, err + } + + if !find { + return nil, types.ErrActorNotFound + } + return actor, nil +} + +// deprecated: in future use. +func (s *Stmgr) RunStateTransitionV2(ctx context.Context, ts *types.TipSet) (cid.Cid, cid.Cid, error) { + ctx, span := trace.StartSpan(ctx, "Exected.RunStateTransition") + defer span.End() + + var state stateComputeResult + var err error + key := ts.Key() + s.stLk.Lock() + + cmptCh, exist := s.chsWorkingOn[key] + + if exist { + s.stLk.Unlock() + select { + case <-cmptCh: + s.stLk.Lock() + case <-ctx.Done(): + return cid.Undef, cid.Undef, ctx.Err() + } + } + + if state, exist = s.stCache[key]; exist { + s.stLk.Unlock() + return state.stateRoot, state.receipt, nil + } + + if meta, _ := s.cs.GetTipsetMetadata(ctx, ts); meta != nil { + s.stLk.Unlock() + return meta.TipSetStateRoot, meta.TipSetReceipts, nil + } + + cmptCh = make(chan struct{}) + s.chsWorkingOn[key] = cmptCh + s.stLk.Unlock() + + defer func() { + s.stLk.Lock() + delete(s.chsWorkingOn, key) + if !state.stateRoot.Equals(cid.Undef) { + s.stCache[key] = state + } + s.stLk.Unlock() + close(cmptCh) + }() + + if ts.Height() == 0 { + return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil + } + + if state.stateRoot, state.receipt, err = s.cp.RunStateTransition(ctx, ts); err != nil { + return cid.Undef, cid.Undef, err + } else if err = s.cs.PutTipSetMetadata(ctx, &chain.TipSetMetadata{ + TipSet: ts, + TipSetStateRoot: state.stateRoot, + TipSetReceipts: state.receipt, + }); err != nil { + return cid.Undef, cid.Undef, err + } + + return state.stateRoot, state.receipt, nil +} + +func (s *Stmgr) ParentStateViewTsk(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, *appstate.View, error) { + ts, err := s.cs.GetTipSet(ctx, tsk) + if err != nil { + return nil, nil, err + } + return s.ParentStateView(ctx, ts) +} + +func (s *Stmgr) ParentStateView(ctx context.Context, ts *types.TipSet) (*types.TipSet, *appstate.View, error) { + if ts == nil { + ts = s.cs.GetHead() + } + parent, err := s.cs.GetTipSet(ctx, ts.Parents()) + if err != nil { + return nil, nil, err + } + + _, view, err := s.StateView(ctx, parent) + if err != nil { + return nil, nil, fmt.Errorf("StateView failed:%w", err) + } + return parent, view, nil +} + +func (s *Stmgr) StateViewTsk(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, cid.Cid, *appstate.View, error) { + ts, err := s.cs.GetTipSet(ctx, tsk) + if err != nil { + return nil, cid.Undef, nil, err + } + root, view, err := s.StateView(ctx, ts) + return ts, root, view, err +} + +func (s *Stmgr) StateView(ctx context.Context, ts *types.TipSet) (cid.Cid, *appstate.View, error) { + stateCid, _, err := s.RunStateTransition(ctx, ts) + if err != nil { + return cid.Undef, nil, err + } + + view, err := s.cs.StateView(ctx, ts) + if err != nil { + return cid.Undef, nil, err + } + return stateCid, view, nil +} + +func (s *Stmgr) GetNetworkVersion(ctx context.Context, h abi.ChainEpoch) network.Version { + return s.fork.GetNetworkVersion(ctx, h) +} + +func (s *Stmgr) FlushChainHead() (*types.TipSet, error) { + head := s.cs.GetHead() + _, _, err := s.RunStateTransition(context.TODO(), head) + return head, err +} + +func (s *Stmgr) Close(ctx context.Context) { + s.log.Info("waiting state manager stop...") + + if _, err := s.FlushChainHead(); err != nil { + s.log.Errorf("state manager flush chain head failed:%s", err.Error()) + } else { + s.log.Infof("state manager flush chain head successfully...") + } + + s.log.Info("waiting state manager stopping...") + f := s.stopFlag(true) + select { + case <-f: + s.log.Info("state manager stopped...") + case <-time.After(time.Minute): + s.log.Info("waiting state manager stop timeout...") + } +} + +func (s *Stmgr) stopFlag(setFlag bool) chan struct{} { + s.fStopLk.Lock() + defer s.fStopLk.Unlock() + + if s.fStop == nil && setFlag { + s.fStop = make(chan struct{}, 1) + + s.stLk.Lock() + if len(s.chsWorkingOn) == 0 { + s.fStop <- struct{}{} + } + s.stLk.Unlock() + } + + return s.fStop +} diff --git a/pkg/testhelpers/address.go b/pkg/testhelpers/address.go new file mode 100644 index 0000000000..0045415733 --- /dev/null +++ b/pkg/testhelpers/address.go @@ -0,0 +1,31 @@ +package testhelpers + +import ( + "fmt" + "testing" + + "github.com/filecoin-project/go-address" +) + +func RequireIDAddress(t *testing.T, i int) address.Address { + a, err := address.NewIDAddress(uint64(i)) + if err != nil { + t.Fatalf("failed to make address: %v", err) + } + return a +} + +// NewForTestGetter returns a closure that returns an address unique to that invocation. +// The address is unique wrt the closure returned, not globally. +func NewForTestGetter() func() address.Address { + i := 0 + return func() address.Address { + s := fmt.Sprintf("address%d", i) + i++ + newAddr, err := address.NewSecp256k1Address([]byte(s)) + if err != nil { + panic(err) + } + return newAddr + } +} diff --git a/pkg/testhelpers/cid.go b/pkg/testhelpers/cid.go new file mode 100644 index 0000000000..42e71960cf --- /dev/null +++ b/pkg/testhelpers/cid.go @@ -0,0 +1,68 @@ +package testhelpers + +import ( + "context" + "testing" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/venus/pkg/constants" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// EmptyMessagesCID is the cid of an empty collection of messages. +var EmptyMessagesCID cid.Cid + +// EmptyReceiptsCID is the cid of an empty collection of receipts. +var EmptyReceiptsCID cid.Cid + +// EmptyTxMetaCID is the cid of a TxMeta wrapping empty cids +var EmptyTxMetaCID cid.Cid + +func init() { + tmpCst := cbor.NewCborStore(blockstoreutil.NewBlockstore(datastore.NewMapDatastore())) + emptyAmt := adt.MakeEmptyArray(adt.WrapStore(context.Background(), tmpCst)) + emptyAMTCid, err := emptyAmt.Root() + if err != nil { + panic("could not create CID for empty AMT") + } + + EmptyMessagesCID = emptyAMTCid + EmptyReceiptsCID = emptyAMTCid + EmptyTxMetaCID, err = tmpCst.Put(context.Background(), &types.MessageRoot{SecpkRoot: EmptyMessagesCID, BlsRoot: EmptyMessagesCID}) + if err != nil { + panic("could not create CID for empty TxMeta") + } +} + +// CidFromString generates Cid from string input +func CidFromString(t *testing.T, input string) cid.Cid { + c, err := constants.DefaultCidBuilder.Sum([]byte(input)) + require.NoError(t, err) + return c +} + +// HasCid allows two values with CIDs to be compared. +type HasCid interface { + Cid() cid.Cid +} + +// AssertHaveSameCid asserts that two values have identical CIDs. +func AssertHaveSameCid(t *testing.T, m HasCid, n HasCid) { + if !m.Cid().Equals(n.Cid()) { + assert.Fail(t, "CIDs don't match", "not equal %v %v", m.Cid(), n.Cid()) + } +} + +// AssertCidsEqual asserts that two CIDS are identical. +func AssertCidsEqual(t *testing.T, m cid.Cid, n cid.Cid) { + if !m.Equals(n) { + assert.Fail(t, "CIDs don't match", "not equal %v %v", m, n) + } +} diff --git a/pkg/testhelpers/consensus.go b/pkg/testhelpers/consensus.go new file mode 100644 index 0000000000..082de7d688 --- /dev/null +++ b/pkg/testhelpers/consensus.go @@ -0,0 +1,70 @@ +package testhelpers + +import ( + "context" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +// FakeBlockValidator passes everything as valid +type FakeBlockValidator struct{} + +// NewFakeBlockValidator createas a FakeBlockValidator that passes everything as valid. +func NewFakeBlockValidator() *FakeBlockValidator { + return &FakeBlockValidator{} +} + +// ValidateHeaderSemantic does nothing. +func (fbv *FakeBlockValidator) ValidateHeaderSemantic(ctx context.Context, child *types.BlockHeader, parents types.TipSet) error { + return nil +} + +// ValidateSyntax does nothing. +func (fbv *FakeBlockValidator) ValidateSyntax(ctx context.Context, blk *types.BlockHeader) error { + return nil +} + +// ValidateMessagesSyntax does nothing +func (fbv *FakeBlockValidator) ValidateMessagesSyntax(ctx context.Context, messages []*types.SignedMessage) error { + return nil +} + +// ValidateUnsignedMessagesSyntax does nothing +func (fbv *FakeBlockValidator) ValidateUnsignedMessagesSyntax(ctx context.Context, messages []*types.Message) error { + return nil +} + +// ValidateReceiptsSyntax does nothing +func (fbv *FakeBlockValidator) ValidateReceiptsSyntax(ctx context.Context, receipts []types.MessageReceipt) error { + return nil +} + +// StubBlockValidator is a mockable block validator. +type StubBlockValidator struct { + syntaxStubs map[cid.Cid]error +} + +// NewStubBlockValidator creates a StubBlockValidator that allows errors to configured +// for blocks passed to the Validate* methods. +func NewStubBlockValidator() *StubBlockValidator { + return &StubBlockValidator{ + syntaxStubs: make(map[cid.Cid]error), + } +} + +// ValidateSyntax return nil or error for stubbed block `blk`. +func (mbv *StubBlockValidator) ValidateBlockMsg(ctx context.Context, blk *types.BlockMsg) pubsub.ValidationResult { + if mbv.syntaxStubs[blk.Header.Cid()] == nil { + return pubsub.ValidationAccept + } + return pubsub.ValidationReject +} + +// StubSyntaxValidationForBlock stubs an error when the ValidateSyntax is called +// on the with the given block. +func (mbv *StubBlockValidator) StubSyntaxValidationForBlock(blk *types.BlockHeader, err error) { + mbv.syntaxStubs[blk.Cid()] = err +} diff --git a/pkg/testhelpers/core.go b/pkg/testhelpers/core.go new file mode 100644 index 0000000000..26c8cd312b --- /dev/null +++ b/pkg/testhelpers/core.go @@ -0,0 +1,98 @@ +package testhelpers + +import ( + "context" + "errors" + "math/rand" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + fbig "github.com/filecoin-project/go-state-types/big" + acrypto "github.com/filecoin-project/go-state-types/crypto" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +// RequireMakeStateTree takes a map of addresses to actors and stores them on +// the state tree, requiring that all its steps succeed. +//func RequireMakeStateTree(t *testing.T, cst cbor.IpldStore, acts map[address.Address]*types.Actor) (cid.Cid, *tree.State) { +// ctx := context.Background() +// tree, err := tree.NewState(cst, tree.StateTreeVersion0) +// if err != nil { +// t.Fatal(err) +// } +// +// for addr, act := range acts { +// err := tree.SetActor(ctx, addr, act) +// require.NoError(t, err) +// } +// +// c, err := tree.Flush(ctx) +// require.NoError(t, err) +// +// return c, tree +//} + +// RequireRandomPeerID returns a new libp2p peer ID or panics. +func RequireRandomPeerID(t *testing.T) peer.ID { + pid, err := RandPeerID() + require.NoError(t, err) + return pid +} + +// MockMessagePoolValidator is a mock validator +type MockMessagePoolValidator struct { + Valid bool +} + +// NewMockMessagePoolValidator creates a MockMessagePoolValidator +func NewMockMessagePoolValidator() *MockMessagePoolValidator { + return &MockMessagePoolValidator{Valid: true} +} + +// Validate returns true if the mock validator is set to validate the message +func (v *MockMessagePoolValidator) ValidateSignedMessageSyntax(ctx context.Context, msg *types.SignedMessage) error { + if v.Valid { + return nil + } + return errors.New("mock validation error") +} + +// RequireTipset is a helper that constructs a tipset +func RequireTipset(t *testing.T) *types.TipSet { + return RequireTipsetWithHeight(t, abi.ChainEpoch(rand.Int())) +} + +func RequireTipsetWithHeight(t *testing.T, height abi.ChainEpoch) *types.TipSet { + newAddress := NewForTestGetter() + blk := &types.BlockHeader{ + Miner: newAddress(), + Ticket: &types.Ticket{VRFProof: []byte{0x03, 0x01, 0x02}}, + ElectionProof: &types.ElectionProof{VRFProof: []byte{0x0c, 0x0d}}, + BeaconEntries: []types.BeaconEntry{ + { + Round: 44, + Data: []byte{0xc0}, + }, + }, + Height: height, + Messages: CidFromString(t, "someothercid"), + ParentMessageReceipts: CidFromString(t, "someothercid"), + Parents: []cid.Cid{CidFromString(t, "someothercid")}, + ParentWeight: fbig.NewInt(1), + ForkSignaling: 2, + ParentStateRoot: CidFromString(t, "someothercid"), + Timestamp: 4, + ParentBaseFee: abi.NewTokenAmount(20), + BlockSig: &acrypto.Signature{ + Type: acrypto.SigTypeBLS, + Data: []byte{0x4}, + }, + } + b, _ := types.NewTipSet([]*types.BlockHeader{blk}) + return b +} diff --git a/pkg/testhelpers/message.go b/pkg/testhelpers/message.go new file mode 100644 index 0000000000..4c83fb02bc --- /dev/null +++ b/pkg/testhelpers/message.go @@ -0,0 +1,295 @@ +package testhelpers + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/crypto" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" +) + +// NewMessage creates a new message. +func NewMessage(from, to address.Address, nonce uint64, value abi.TokenAmount, method abi.MethodNum, params []byte) *types.Message { + return &types.Message{ + Version: 0, + To: to, + From: from, + Nonce: nonce, + Value: value, + Method: method, + Params: params, + } +} + +// NewMeteredMessage adds gas price and gas limit to the message +func NewMeteredMessage(from, to address.Address, nonce uint64, value abi.TokenAmount, method abi.MethodNum, params []byte, gasFeeCap, gasPremium abi.TokenAmount, limit int64) *types.Message { + return &types.Message{ + Version: 0, + To: to, + From: from, + Nonce: nonce, + Value: value, + GasFeeCap: gasFeeCap, + GasPremium: gasPremium, + GasLimit: limit, + Method: method, + Params: params, + } +} + +// NewSignedMessage accepts a message `msg` and a signer `s`. NewSignedMessage returns a `SignedMessage` containing +// a signature derived from the serialized `msg` and `msg.From` +// NOTE: this method can only sign message with From being a public-key type address, not an ID address. +// We should deprecate this and move to more explicit signing via an address resolver. +func NewSignedMessage(ctx context.Context, msg types.Message, s types.Signer) (*types.SignedMessage, error) { + msgCid := msg.Cid() + + sig, err := s.SignBytes(ctx, msgCid.Bytes(), msg.From) + if err != nil { + return nil, err + } + + return &types.SignedMessage{ + Message: msg, + Signature: *sig, + }, nil +} + +// NewSignedMessageForTestGetter returns a closure that returns a SignedMessage unique to that invocation. +// The message is unique wrt the closure returned, not globally. You can use this function +// in tests instead of manually creating messages -- it both reduces duplication and gives us +// exactly one place to create valid messages for tests if messages require validation in the +// future. +// TODO support chosing from address +func NewSignedMessageForTestGetter(ms MockSigner) func(uint64) *types.SignedMessage { + i := 0 + return func(nonce uint64) *types.SignedMessage { + s := fmt.Sprintf("smsg%d", i) + i++ + newAddr, err := address.NewSecp256k1Address([]byte(s + "-to")) + if err != nil { + panic(err) + } + msg := NewMeteredMessage( + ms.Addresses[0], // from needs to be an address from the signer + newAddr, + nonce, + types.ZeroFIL, + 0, + []byte("params"), + types.ZeroFIL, + types.ZeroFIL, + 0, + ) + smsg, err := NewSignedMessage(context.TODO(), *msg, &ms) + if err != nil { + panic(err) + } + return smsg + } +} + +// NewCidForTestGetter returns a closure that returns a Cid unique to that invocation. +// The Cid is unique wrt the closure returned, not globally. You can use this function +// in tests. +func NewCidForTestGetter() func() cid.Cid { + i := 31337 + return func() cid.Cid { + obj, err := cbor.WrapObject([]int{i}, constants.DefaultHashFunction, -1) + if err != nil { + panic(err) + } + i++ + return obj.Cid() + } +} + +// NewMessageForTestGetter returns a closure that returns a message unique to that invocation. +// The message is unique wrt the closure returned, not globally. You can use this function +// in tests instead of manually creating messages -- it both reduces duplication and gives us +// exactly one place to create valid messages for tests if messages require validation in the +// future. +func NewMessageForTestGetter() func() *types.Message { + i := 0 + return func() *types.Message { + s := fmt.Sprintf("msg%d", i) + i++ + from, err := address.NewSecp256k1Address([]byte(s + "-from")) + if err != nil { + panic(err) + } + to, err := address.NewSecp256k1Address([]byte(s + "-to")) + if err != nil { + panic(err) + } + return NewMessage( + from, + to, + 0, + types.ZeroFIL, + abi.MethodNum(10000+i), + nil) + } +} + +// NewMsgs returns n messages. The messages returned are unique to this invocation +// but are not unique globally (ie, a second call to NewMsgs will return the same +// set of messages). +func NewMsgs(n int) []*types.Message { + newMsg := NewMessageForTestGetter() + msgs := make([]*types.Message, n) + for i := 0; i < n; i++ { + msgs[i] = newMsg() + msgs[i].Nonce = uint64(i) + } + return msgs +} + +// NewSignedMsgs returns n signed messages. The messages returned are unique to this invocation +// but are not unique globally (ie, a second call to NewSignedMsgs will return the same +// set of messages). +func NewSignedMsgs(n uint, ms MockSigner) []*types.SignedMessage { + var err error + newMsg := NewMessageForTestGetter() + smsgs := make([]*types.SignedMessage, n) + for i := uint(0); i < n; i++ { + msg := newMsg() + msg.From = ms.Addresses[0] + msg.Nonce = uint64(i) + msg.GasFeeCap = types.ZeroFIL + msg.GasPremium = types.ZeroFIL + msg.GasLimit = 0 + smsgs[i], err = NewSignedMessage(context.TODO(), *msg, ms) + if err != nil { + panic(err) + } + } + return smsgs +} + +// SignMsgs returns a slice of signed messages where the original messages +// are `msgs`, if signing one of the `msgs` fails an error is returned +func SignMsgs(ms MockSigner, msgs []*types.Message) ([]*types.SignedMessage, error) { + var smsgs []*types.SignedMessage + for _, m := range msgs { + s, err := NewSignedMessage(context.TODO(), *m, ms) + if err != nil { + return nil, err + } + smsgs = append(smsgs, s) + } + return smsgs, nil +} + +// NewMsgsWithAddrs returns a slice of `n` messages who's `From` field's are pulled +// from `a`. This method should be used when the addresses returned are to be signed +// at a later point. +func NewMsgsWithAddrs(n int, a []address.Address) []*types.Message { + if n > len(a) { + panic("cannot create more messages than there are addresess for") + } + newMsg := NewMessageForTestGetter() + msgs := make([]*types.Message, n) + for i := 0; i < n; i++ { + msgs[i] = newMsg() + msgs[i].From = a[i] + } + return msgs +} + +// MessageMaker creates unique, signed messages for use in tests. +type MessageMaker struct { + DefaultGasFeeCap types.BigInt + DefaultGasPremium types.BigInt + DefaultGasUnits int64 + + signer *MockSigner + seq uint + t *testing.T +} + +// NewMessageMaker creates a new message maker with a set of signing keys. +func NewMessageMaker(t *testing.T, keys []crypto.KeyInfo) *MessageMaker { + addresses := make([]address.Address, len(keys)) + signer := NewMockSigner(keys) + + for i, key := range keys { + addr, _ := key.Address() + addresses[i] = addr + } + + return &MessageMaker{types.ZeroFIL, types.ZeroFIL, 0, &signer, 0, t} +} + +// Addresses returns the addresses for which this maker can sign messages. +func (mm *MessageMaker) Addresses() []address.Address { + return mm.signer.Addresses +} + +// Signer returns the signer with which this maker signs messages. +func (mm *MessageMaker) Signer() *MockSigner { + return mm.signer +} + +// NewUnsignedMessage creates a new message. +func (mm *MessageMaker) NewUnsignedMessage(from address.Address, nonce uint64) *types.Message { + seq := mm.seq + mm.seq++ + to, err := address.NewSecp256k1Address([]byte("destination")) + require.NoError(mm.t, err) + return NewMeteredMessage( + from, + to, + nonce, + types.ZeroFIL, + abi.MethodNum(9000+seq), + []byte("params"), + mm.DefaultGasFeeCap, + mm.DefaultGasPremium, + mm.DefaultGasUnits) +} + +// NewSignedMessage creates a new signed message. +func (mm *MessageMaker) NewSignedMessage(from address.Address, nonce uint64) *types.SignedMessage { + msg := mm.NewUnsignedMessage(from, nonce) + signed, err := NewSignedMessage(context.TODO(), *msg, mm.signer) + require.NoError(mm.t, err) + return signed +} + +// EmptyReceipts returns a slice of n empty receipts. +func EmptyReceipts(n int) []*types.MessageReceipt { + out := make([]*types.MessageReceipt, n) + for i := 0; i < n; i++ { + out[i] = &types.MessageReceipt{} + } + return out +} + +// ReceiptMaker generates unique receipts +type ReceiptMaker struct { + seq uint +} + +// NewReceiptMaker creates a new receipt maker +func NewReceiptMaker() *ReceiptMaker { + return &ReceiptMaker{0} +} + +// NewReceipt creates a new distinct receipt. +func (rm *ReceiptMaker) NewReceipt() types.MessageReceipt { + seq := rm.seq + rm.seq++ + return types.MessageReceipt{ + Return: []byte(fmt.Sprintf("%d", seq)), + } +} diff --git a/pkg/testhelpers/net.go b/pkg/testhelpers/net.go new file mode 100644 index 0000000000..c08f3d9c68 --- /dev/null +++ b/pkg/testhelpers/net.go @@ -0,0 +1,284 @@ +package testhelpers + +import ( + "context" + "crypto/rand" + "encoding/binary" + "fmt" + "testing" + "time" + + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + inet "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + ma "github.com/multiformats/go-multiaddr" + mh "github.com/multiformats/go-multihash" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +var ( + _ host.Host = &FakeHost{} + _ connmgr.ConnManager = &FakeCMgr{} +) + +// FakeHost is a test host.Host +type FakeHost struct { + ConnectImpl func(context.Context, peer.AddrInfo) error +} + +// NewFakeHost constructs a FakeHost with no other parameters needed +func NewFakeHost() host.Host { + nopfunc := func(_ context.Context, _ peer.AddrInfo) error { return nil } + return &FakeHost{ + ConnectImpl: nopfunc, + } +} + +// minimal implementation of host.Host interface + +func (fh *FakeHost) Addrs() []ma.Multiaddr { panic("not implemented") } // nolint: golint +func (fh *FakeHost) Close() error { panic("not implemented") } // nolint: golint +func (fh *FakeHost) ConnManager() connmgr.ConnManager { + return &FakeCMgr{} +} // nolint: golint +func (fh *FakeHost) Connect(ctx context.Context, pi peer.AddrInfo) error { // nolint: golint + return fh.ConnectImpl(ctx, pi) +} +func (fh *FakeHost) EventBus() event.Bus { panic("not implemented") } //nolint: golint +func (fh *FakeHost) ID() peer.ID { panic("not implemented") } // nolint: golint +func (fh *FakeHost) Network() inet.Network { panic("not implemented") } // nolint: golint +func (fh *FakeHost) Mux() protocol.Switch { panic("not implemented") } // nolint: golint +func (fh *FakeHost) Peerstore() peerstore.Peerstore { panic("not implemented") } // nolint: golint +func (fh *FakeHost) RemoveStreamHandler(protocol.ID) { panic("not implemented") } // nolint: golint +func (fh *FakeHost) SetStreamHandler(protocol.ID, inet.StreamHandler) { panic("not implemented") } // nolint: golint +func (fh *FakeHost) SetStreamHandlerMatch(protocol.ID, func(string) bool, inet.StreamHandler) { // nolint: golint + panic("not implemented") +} + +type FakeCMgr struct{} + +func (f FakeCMgr) TagPeer(id peer.ID, s string, i int) {} + +func (f FakeCMgr) UntagPeer(p peer.ID, tag string) {} + +func (f FakeCMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) {} + +func (f FakeCMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo { + panic("implement me") +} + +func (f FakeCMgr) TrimOpenConns(ctx context.Context) { + panic("implement me") +} + +func (f FakeCMgr) Notifee() inet.Notifiee { + panic("implement me") +} + +func (f FakeCMgr) Protect(id peer.ID, tag string) { + panic("implement me") +} + +func (f FakeCMgr) Unprotect(id peer.ID, tag string) (protected bool) { + panic("implement me") +} + +func (f FakeCMgr) IsProtected(id peer.ID, tag string) (protected bool) { + panic("implement me") +} + +func (f FakeCMgr) Close() error { + panic("implement me") +} + +// NewStream is required for the host.Host interface; returns a new FakeStream. +func (fh *FakeHost) NewStream(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error) { // nolint: golint + return newFakeStream(), nil +} + +var _ inet.Dialer = &FakeDialer{} + +// FakeDialer is a test inet.Dialer +type FakeDialer struct { + PeersImpl func() []peer.ID +} + +// Minimal implementation of the inet.Dialer interface + +// Peers returns a fake inet.Dialer PeersImpl +func (fd *FakeDialer) Peers() []peer.ID { + return fd.PeersImpl() +} +func (fd *FakeDialer) Peerstore() peerstore.Peerstore { panic("not implemented") } +func (fd *FakeDialer) LocalPeer() peer.ID { panic("not implemented") } +func (fd *FakeDialer) DialPeer(context.Context, peer.ID) (inet.Conn, error) { panic("not implemented") } +func (fd *FakeDialer) ClosePeer(peer.ID) error { panic("not implemented") } +func (fd *FakeDialer) Connectedness(peer.ID) inet.Connectedness { panic("not implemented") } +func (fd *FakeDialer) Conns() []inet.Conn { panic("not implemented") } +func (fd *FakeDialer) ConnsToPeer(peer.ID) []inet.Conn { panic("not implemented") } +func (fd *FakeDialer) Notify(inet.Notifiee) { panic("not implemented") } +func (fd *FakeDialer) StopNotify(inet.Notifiee) { panic("not implemented") } + +// fakeStream is a test inet.Stream +type fakeStream struct { + _ inet.MuxedStream + pid protocol.ID +} + +var _ inet.Stream = &fakeStream{} + +func newFakeStream() *fakeStream { return &fakeStream{} } + +// Minimal implementation of the inet.Stream interface +func (fs *fakeStream) ID() string { return "" } +func (fs *fakeStream) Protocol() protocol.ID { return fs.pid } // nolint: golint +func (fs *fakeStream) SetProtocol(id protocol.ID) error { fs.pid = id; return nil } // nolint: golint +func (fs *fakeStream) Stat() inet.Stats { panic("not implemented") } // nolint: golint +func (fs *fakeStream) Conn() inet.Conn { panic("not implemented") } // nolint: golint +func (fs *fakeStream) Write(_ []byte) (int, error) { return 1, nil } // nolint: golint +func (fs *fakeStream) Read(_ []byte) (int, error) { return 1, nil } // nolint: golint +func (fs *fakeStream) Close() error { return nil } // nolint: golint +func (fs *fakeStream) Reset() error { return nil } // nolint: golint +func (fs *fakeStream) SetDeadline(_ time.Time) error { return nil } // nolint: golint +func (fs *fakeStream) SetReadDeadline(_ time.Time) error { return nil } // nolint: golint +func (fs *fakeStream) SetWriteDeadline(_ time.Time) error { return nil } // nolint: golint +func (fs *fakeStream) CloseWrite() error { panic("implement me") } +func (fs *fakeStream) CloseRead() error { panic("implement me") } +func (fs *fakeStream) Scope() inet.StreamScope { panic("implement me") } + +// RandPeerID is a libp2p random peer ID generator. +// These peer.ID generators were copied from libp2p/go-testutil. We didn't bring in the +// whole repo as a dependency because we only need this small bit. However if we find +// ourselves using more and more pieces we should just take a dependency on it. +func RandPeerID() (peer.ID, error) { + buf := make([]byte, 16) + if n, err := rand.Read(buf); n != 16 || err != nil { + if n != 16 && err == nil { + err = errors.New("couldnt read 16 random bytes") + } + panic(err) + } + h, _ := mh.Sum(buf, mh.SHA2_256, -1) + return peer.ID(h), nil +} + +// RequireIntPeerID takes in an integer and creates a unique peer id for it. +func RequireIntPeerID(t *testing.T, i int64) peer.ID { + buf := make([]byte, 16) + n := binary.PutVarint(buf, i) + h, err := mh.Sum(buf[:n], mh.IDENTITY, -1) + require.NoError(t, err) + pid, err := peer.IDFromBytes(h) + require.NoError(t, err) + return pid +} + +// TestFetcher is an object with the same method set as Fetcher plus a method +// for adding blocks to the source. It is used to implement an object that +// behaves like Fetcher but does not go to the network for use in tests. +type TestFetcher struct { + sourceBlocks map[string]*types.BlockHeader // sourceBlocks maps block cid strings to blocks. +} + +// NewTestFetcher returns a TestFetcher with no source blocks. +func NewTestFetcher() *TestFetcher { + return &TestFetcher{ + sourceBlocks: make(map[string]*types.BlockHeader), + } +} + +// AddSourceBlocks adds the input blocks to the fetcher source. +func (f *TestFetcher) AddSourceBlocks(blocks ...*types.BlockHeader) { + for _, block := range blocks { + f.sourceBlocks[block.Cid().String()] = block + } +} + +// FetchTipSets fetchs the tipset at `tsKey` from the network using the fetchers `sourceBlocks`. +func (f *TestFetcher) FetchTipSets(ctx context.Context, tsKey types.TipSetKey, from peer.ID, done func(t *types.TipSet) (bool, error)) ([]*types.TipSet, error) { + var out []*types.TipSet + cur := tsKey + for { + res, err := f.GetBlocks(ctx, cur.Cids()) + if err != nil { + return nil, err + } + + ts, err := types.NewTipSet(res) + if err != nil { + return nil, err + } + + out = append(out, ts) + ok, err := done(ts) + if err != nil { + return nil, err + } + if ok { + break + } + + cur = ts.Parents() + } + + return out, nil +} + +// FetchTipSetHeaders fetches the tipset at `tsKey` but not messages +func (f *TestFetcher) FetchTipSetHeaders(ctx context.Context, tsKey types.TipSetKey, from peer.ID, done func(t *types.TipSet) (bool, error)) ([]*types.TipSet, error) { + return f.FetchTipSets(ctx, tsKey, from, done) +} + +// GetBlocks returns any blocks in the source with matching cids. +func (f *TestFetcher) GetBlocks(ctx context.Context, cids []cid.Cid) ([]*types.BlockHeader, error) { + var ret []*types.BlockHeader + for _, c := range cids { + if block, ok := f.sourceBlocks[c.String()]; ok { + ret = append(ret, block) + } else { + return nil, fmt.Errorf("failed to fetch block: %s", c.String()) + } + } + return ret, nil +} + +func NewTestExchange() *TestExchange { + return &TestExchange{ + sourceBlocks: make(map[string]*types.BlockHeader), + } +} + +// AddSourceBlocks adds the input blocks to the fetcher source. +func (f *TestExchange) AddSourceBlocks(blocks ...*types.BlockHeader) { + for _, block := range blocks { + f.sourceBlocks[block.Cid().String()] = block + } +} + +type TestExchange struct { + sourceBlocks map[string]*types.BlockHeader // s +} + +func (f *TestExchange) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { + panic("implement me") +} + +func (f *TestExchange) GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*exchange.CompactedMessages, error) { + panic("implement me") +} + +func (f *TestExchange) GetFullTipSet(ctx context.Context, peer []peer.ID, tsk types.TipSetKey) (*types.FullTipSet, error) { + panic("implement me") +} + +func (f *TestExchange) AddPeer(peer peer.ID) {} + +func (f *TestExchange) RemovePeer(peer peer.ID) {} diff --git a/internal/pkg/testhelpers/output.go b/pkg/testhelpers/output.go similarity index 98% rename from internal/pkg/testhelpers/output.go rename to pkg/testhelpers/output.go index 96db0104d6..43c024af85 100644 --- a/internal/pkg/testhelpers/output.go +++ b/pkg/testhelpers/output.go @@ -2,7 +2,6 @@ package testhelpers import ( "io" - "io/ioutil" "strings" "testing" @@ -131,7 +130,7 @@ func (o *CmdOutput) requireNoError() { func readAllAsync(tb testing.TB, r io.Reader) chan []byte { ch := make(chan []byte, 1) go func() { - bytes, err := ioutil.ReadAll(r) + bytes, err := io.ReadAll(r) require.NoError(tb, err) if err == nil { ch <- bytes diff --git a/pkg/testhelpers/singer.go b/pkg/testhelpers/singer.go new file mode 100644 index 0000000000..135e033c20 --- /dev/null +++ b/pkg/testhelpers/singer.go @@ -0,0 +1,153 @@ +package testhelpers + +import ( + "bytes" + "context" + "crypto/rand" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/crypto" +) + +// MockSigner implements the Signer interface +type MockSigner struct { + AddrKeyInfo map[address.Address]crypto.KeyInfo + Addresses []address.Address + PubKeys [][]byte +} + +// NewMockSigner returns a new mock signer, capable of signing data with +// keys (addresses derived from) in keyinfo +func NewMockSigner(kis []crypto.KeyInfo) MockSigner { + var ms MockSigner + ms.AddrKeyInfo = make(map[address.Address]crypto.KeyInfo) + for _, k := range kis { + // extract public key + pub, err := k.PublicKey() + if err != nil { + panic(err) + } + + var newAddr address.Address + if k.SigType == crypto.SigTypeSecp256k1 { + newAddr, err = address.NewSecp256k1Address(pub) + } else if k.SigType == crypto.SigTypeBLS { + newAddr, err = address.NewBLSAddress(pub) + } + if err != nil { + panic(err) + } + ms.Addresses = append(ms.Addresses, newAddr) + ms.AddrKeyInfo[newAddr] = k + ms.PubKeys = append(ms.PubKeys, pub) + } + return ms +} + +// NewMockSignersAndKeyInfo is a convenience function to generate a mock +// signers with some keys. +func NewMockSignersAndKeyInfo(numSigners int) (MockSigner, []crypto.KeyInfo) { + ki := MustGenerateKeyInfo(numSigners, 42) + signer := NewMockSigner(ki) + return signer, ki +} + +// MustGenerateMixedKeyInfo produces m bls keys and n secp keys. +// BLS and Secp will be interleaved. The keys will be valid, but not deterministic. +func MustGenerateMixedKeyInfo(m int, n int) []crypto.KeyInfo { + info := []crypto.KeyInfo{} + for m > 0 && n > 0 { + if m > 0 { + ki, err := crypto.NewBLSKeyFromSeed(rand.Reader) + if err != nil { + panic(err) + } + info = append(info, ki) + m-- + } + + if n > 0 { + ki, err := crypto.NewSecpKeyFromSeed(rand.Reader) + if err != nil { + panic(err) + } + info = append(info, ki) + n-- + } + } + return info +} + +// MustGenerateBLSKeyInfo produces n distinct BLS keyinfos. +func MustGenerateBLSKeyInfo(n int, seed byte) []crypto.KeyInfo { + token := bytes.Repeat([]byte{seed}, 512) + var keyinfos []crypto.KeyInfo + for i := 0; i < n; i++ { + token[0] = byte(i) + ki, err := crypto.NewBLSKeyFromSeed(bytes.NewReader(token)) + if err != nil { + panic(err) + } + keyinfos = append(keyinfos, ki) + } + return keyinfos +} + +// MustGenerateKeyInfo generates `n` distinct keyinfos using seed `seed`. +// The result is deterministic (for stable tests), don't use this for real keys! +func MustGenerateKeyInfo(n int, seed byte) []crypto.KeyInfo { + token := bytes.Repeat([]byte{seed}, 512) + var keyinfos []crypto.KeyInfo + for i := 0; i < n; i++ { + token[0] = byte(i) + ki, err := crypto.NewSecpKeyFromSeed(bytes.NewReader(token)) + if err != nil { + panic(err) + } + keyinfos = append(keyinfos, ki) + } + return keyinfos +} + +// SignBytes cryptographically signs `data` using the `addr`. +func (ms MockSigner) SignBytes(_ context.Context, data []byte, addr address.Address) (*crypto.Signature, error) { + ki, ok := ms.AddrKeyInfo[addr] + if !ok { + return nil, errors.New("unknown address") + } + var sig *crypto.Signature + err := ki.UsePrivateKey(func(privateKey []byte) error { + var err error + sig, err = crypto.Sign(data, privateKey, ki.SigType) + + return err + }) + return sig, err +} + +// HasAddress returns whether the signer can sign with this address +func (ms MockSigner) HasAddress(_ context.Context, addr address.Address) (bool, error) { + return true, nil +} + +// GetAddressForPubKey looks up a KeyInfo address associated with a given PublicKeyForSecpSecretKey for a MockSigner +func (ms MockSigner) GetAddressForPubKey(pk []byte) (address.Address, error) { + var addr address.Address + + for _, ki := range ms.AddrKeyInfo { + testPk, err := ki.PublicKey() + if err != nil { + return address.Undef, err + } + + if bytes.Equal(testPk, pk) { + addr, err := ki.Address() + if err != nil { + return addr, errors.New("could not fetch address") + } + return addr, nil + } + } + return addr, errors.New("public key not found in wallet") +} diff --git a/internal/pkg/testhelpers/test_daemon.go b/pkg/testhelpers/test_daemon.go similarity index 84% rename from internal/pkg/testhelpers/test_daemon.go rename to pkg/testhelpers/test_daemon.go index 2390da9285..5f05eff151 100644 --- a/internal/pkg/testhelpers/test_daemon.go +++ b/pkg/testhelpers/test_daemon.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "os" "os/exec" @@ -21,24 +20,22 @@ import ( "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr-net" + manet "github.com/multiformats/go-multiaddr/net" "github.com/pkg/errors" - "github.com/filecoin-project/go-filecoin/build/project" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/venus-shared/types" ) const ( // DefaultDaemonCmdTimeout is the default timeout for executing commands. DefaultDaemonCmdTimeout = 1 * time.Minute repoName = "repo" - sectorsName = "sectors" ) // RunSuccessFirstLine executes the given command, asserts success and returns @@ -60,9 +57,10 @@ type TestDaemon struct { containerDir string // Path to directory containing repo and sectors genesisFile string keyFiles []string - withMiner address.Address autoSealInterval string + networkName string isRelay bool + offline bool initArgs []string firstRun bool @@ -85,14 +83,9 @@ func (td *TestDaemon) RepoDir() string { return path.Join(td.containerDir, repoName) } -// SectorDir returns the sector root directory of the test daemon. -func (td *TestDaemon) SectorDir() string { - return path.Join(td.containerDir, sectorsName) -} - // CmdAddr returns the command address of the test daemon (if it is running). func (td *TestDaemon) CmdAddr() (ma.Multiaddr, error) { - str, err := ioutil.ReadFile(filepath.Join(td.RepoDir(), "api")) + str, err := os.ReadFile(filepath.Join(td.RepoDir(), "api")) if err != nil { return nil, err } @@ -100,6 +93,15 @@ func (td *TestDaemon) CmdAddr() (ma.Multiaddr, error) { return ma.NewMultiaddr(strings.TrimSpace(string(str))) } +// CmdToken returns the command token of the test daemon (if it is running). +func (td *TestDaemon) CmdToken() (string, error) { + str, err := os.ReadFile(filepath.Join(td.RepoDir(), "token")) + if err != nil { + return "", err + } + return strings.TrimSpace(string(str)), nil +} + // Config is a helper to read out the config of the daemon. func (td *TestDaemon) Config() *config.Config { cfg, err := config.ReadFile(filepath.Join(td.RepoDir(), "config.json")) @@ -109,7 +111,8 @@ func (td *TestDaemon) Config() *config.Config { // GetMinerAddress returns the miner address for this daemon. func (td *TestDaemon) GetMinerAddress() address.Address { - return td.Config().Mining.MinerAddress + return td.Config().Wallet.DefaultAddress + // return td.Config().Mining.MinerAddress } // Run executes the given command against the test daemon. @@ -135,7 +138,7 @@ func (td *TestDaemon) RunWithStdin(stdin io.Reader, args ...string) *CmdOutput { args = strings.Split(args[0], " ") } - finalArgs := append(args, "--repodir="+td.RepoDir(), "--cmdapiaddr="+addr.String()) + finalArgs := append(args, "--repo="+td.RepoDir(), "--cmdapiaddr="+addr.String()) td.logRun(finalArgs...) cmd := exec.CommandContext(ctx, bin, finalArgs...) @@ -191,7 +194,7 @@ func (td *TestDaemon) RunFail(err string, args ...string) *CmdOutput { // GetID returns the id of the daemon. func (td *TestDaemon) GetID() string { - out := td.RunSuccess("id") + out := td.RunSuccess("swarm", "id") var parsed map[string]interface{} require.NoError(td.test, json.Unmarshal([]byte(out.ReadStdout()), &parsed)) @@ -200,7 +203,7 @@ func (td *TestDaemon) GetID() string { // GetAddresses returns all of the addresses of the daemon. func (td *TestDaemon) GetAddresses() []string { - out := td.RunSuccess("id") + out := td.RunSuccess("swarm", "id") var parsed map[string]interface{} require.NoError(td.test, json.Unmarshal([]byte(out.ReadStdout()), &parsed)) adders := parsed["Addresses"].([]interface{}) @@ -279,7 +282,7 @@ Outer: func (td *TestDaemon) ReadStdout() string { td.lk.Lock() defer td.lk.Unlock() - out, err := ioutil.ReadAll(td.Stdout) + out, err := io.ReadAll(td.Stdout) if err != nil { panic(err) } @@ -290,7 +293,7 @@ func (td *TestDaemon) ReadStdout() string { func (td *TestDaemon) ReadStderr() string { td.lk.Lock() defer td.lk.Unlock() - out, err := ioutil.ReadAll(td.Stderr) + out, err := io.ReadAll(td.Stderr) if err != nil { panic(err) } @@ -304,12 +307,6 @@ func (td *TestDaemon) Start() *TestDaemon { require.NoError(td.test, td.process.Start()) err := td.WaitForAPI() - if err != nil { - stdErr, _ := ioutil.ReadAll(td.Stderr) - stdOut, _ := ioutil.ReadAll(td.Stdout) - td.test.Errorf("%s\n%s", stdErr, stdOut) - } - require.NoError(td.test, err, "Daemon failed to start") // on first startup import key pairs, if defined @@ -407,7 +404,8 @@ func (td *TestDaemon) WaitForAPI() error { // CreateStorageMinerAddr issues a new message to the network, mines the message // and returns the address of the new miner // equivalent to: -// `go-filecoin miner create --from $TEST_ACCOUNT 20` +// +// `venus miner create --from $TEST_ACCOUNT 20` func (td *TestDaemon) CreateStorageMinerAddr(peer *TestDaemon, fromAddr address.Address) address.Address { var wg sync.WaitGroup var minerAddr address.Address @@ -464,10 +462,10 @@ func (td *TestDaemon) UpdatePeerID() { // WaitForMessageRequireSuccess accepts a message cid and blocks until a message with matching cid is included in a // block. The receipt is then inspected to ensure that the corresponding message receipt had a 0 exit code. -func (td *TestDaemon) WaitForMessageRequireSuccess(msgCid cid.Cid) *vm.MessageReceipt { +func (td *TestDaemon) WaitForMessageRequireSuccess(msgCid cid.Cid) *types.MessageReceipt { args := []string{"message", "wait", msgCid.String(), "--receipt=true", "--message=false"} trim := strings.Trim(td.RunSuccess(args...).ReadStdout(), "\n") - rcpt := &vm.MessageReceipt{} + rcpt := &types.MessageReceipt{} require.NoError(td.test, json.Unmarshal([]byte(trim), rcpt)) require.Equal(td.test, 0, int(rcpt.ExitCode)) return rcpt @@ -476,10 +474,11 @@ func (td *TestDaemon) WaitForMessageRequireSuccess(msgCid cid.Cid) *vm.MessageRe // CreateAddress adds a new address to the daemons wallet and // returns it. // equivalent to: -// `go-filecoin address new` +// +// `venus address new` func (td *TestDaemon) CreateAddress() string { td.test.Helper() - outNew := td.RunSuccess("address", "new") + outNew := td.RunSuccess("wallet", "new") addr := strings.Trim(outNew.ReadStdout(), "\n") require.NotEmpty(td.test, addr) return addr @@ -509,7 +508,7 @@ func (td *TestDaemon) MustHaveChainHeadBy(wait time.Duration, peers []*TestDaemo for _, blk := range expHeadBlks { expHeadCids = append(expHeadCids, blk.Cid()) } - expHeadKey := block.NewTipSetKey(expHeadCids...) + expHeadKey := types.NewTipSetKey(expHeadCids...) for _, p := range peers { wg.Add(1) @@ -520,7 +519,7 @@ func (td *TestDaemon) MustHaveChainHeadBy(wait time.Duration, peers []*TestDaemo for _, blk := range actHeadBlks { actHeadCids = append(actHeadCids, blk.Cid()) } - actHeadKey := block.NewTipSetKey(actHeadCids...) + actHeadKey := types.NewTipSetKey(actHeadCids...) if expHeadKey.Equals(actHeadKey) { wg.Done() return @@ -544,19 +543,19 @@ func (td *TestDaemon) MustHaveChainHeadBy(wait time.Duration, peers []*TestDaemo } // GetChainHead returns the blocks in the head tipset from `td` -func (td *TestDaemon) GetChainHead() []block.Block { +func (td *TestDaemon) GetChainHead() []types.BlockHeader { out := td.RunSuccess("chain", "ls", "--enc=json") bc := td.MustUnmarshalChain(out.ReadStdout()) return bc[0] } // MustUnmarshalChain unmarshals the chain from `input` into a slice of blocks -func (td *TestDaemon) MustUnmarshalChain(input string) [][]block.Block { +func (td *TestDaemon) MustUnmarshalChain(input string) [][]types.BlockHeader { chain := strings.Trim(input, "\n") - var bs [][]block.Block + var bs [][]types.BlockHeader for _, line := range bytes.Split([]byte(chain), []byte{'\n'}) { - var b []block.Block + var b []types.BlockHeader if err := json.Unmarshal(line, &b); err != nil { td.test.Fatal(err) } @@ -575,7 +574,7 @@ func (td *TestDaemon) MakeMoney(rewards int, peers ...*TestDaemon) { // GetDefaultAddress returns the default sender address for this daemon. func (td *TestDaemon) GetDefaultAddress() string { - addrs := td.RunSuccess("address", "default") + addrs := td.RunSuccess("wallet", "default") return addrs.ReadStdout() } @@ -585,13 +584,25 @@ func tryAPICheck(td *TestDaemon) error { return err } - _, host, err := manet.DialArgs(maddr) + _, host, err := manet.DialArgs(maddr) //nolint + if err != nil { + return err + } + token, err := td.CmdToken() if err != nil { return err } - url := fmt.Sprintf("http://%s/api/id", host) - resp, err := http.Get(url) + url := fmt.Sprintf("http://%s/rpc/v0", host) + reqData := `{"method": "Filecoin.ID","params":[], "id": 0}` + req, err := http.NewRequest("POST", url, strings.NewReader(reqData)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+token) + + resp, err := (&http.Client{}).Do(req) if err != nil { return err } @@ -602,9 +613,13 @@ func tryAPICheck(td *TestDaemon) error { return fmt.Errorf("liveness check failed: %s", err) } - _, ok := out["ID"] + peerStr, ok := out["result"].(string) + _, err = peer.Decode(peerStr) + if err != nil { + return err + } if !ok { - return fmt.Errorf("liveness check failed: ID field not present in output") + return fmt.Errorf("liveness check failed") } return nil @@ -617,14 +632,6 @@ func ContainerDir(dir string) func(*TestDaemon) { } } -// ShouldInit allows setting the `init` config option on the daemon. If -// set, `go-filecoin init` is run before starting up the daemon. -func ShouldInit(i bool) func(*TestDaemon) { - return func(td *TestDaemon) { - td.init = i - } -} - // CmdTimeout allows setting the `cmdTimeout` config option on the daemon. func CmdTimeout(t time.Duration) func(*TestDaemon) { return func(td *TestDaemon) { @@ -667,13 +674,6 @@ func InitArgs(a ...string) func(*TestDaemon) { } } -// WithMiner allows setting the --with-miner flag on init. -func WithMiner(m address.Address) func(*TestDaemon) { - return func(td *TestDaemon) { - td.withMiner = m - } -} - // IsRelay starts the daemon with the --is-relay option. func IsRelay(td *TestDaemon) { td.isRelay = true @@ -689,6 +689,8 @@ func NewDaemon(t *testing.T, options ...func(*TestDaemon)) *TestDaemon { test: t, init: true, // we want to init unless told otherwise firstRun: true, + offline: false, + networkName: "integrationnet", cmdTimeout: DefaultDaemonCmdTimeout, genesisFile: GenesisFilePath(), // default file includes all test addresses, } @@ -700,43 +702,14 @@ func NewDaemon(t *testing.T, options ...func(*TestDaemon)) *TestDaemon { // Allocate directory for repo and sectors. If set already it is assumed to exist. if td.containerDir == "" { - newDir, err := ioutil.TempDir("", "daemon-test") + newDir, err := os.MkdirTemp("", "daemon-test") if err != nil { t.Fatal(err) } td.containerDir = newDir } - repoDirFlag := fmt.Sprintf("--repodir=%s", td.RepoDir()) - sectorDirFlag := fmt.Sprintf("--sectordir=%s", td.SectorDir()) - - // build command options - initopts := []string{repoDirFlag, sectorDirFlag} - - if td.genesisFile != "" { - initopts = append(initopts, fmt.Sprintf("--genesisfile=%s", td.genesisFile)) - } - - if td.withMiner != address.Undef { - initopts = append(initopts, fmt.Sprintf("--with-miner=%s", td.withMiner)) - } - - if td.autoSealInterval != "" { - initopts = append(initopts, fmt.Sprintf("--auto-seal-interval-seconds=%s", td.autoSealInterval)) - } - - for _, arg := range td.initArgs { - initopts = append(initopts, arg) - } - - if td.init { - t.Logf("run: go-filecoin init %s", initopts) - out, err := RunInit(td, initopts...) - if err != nil { - t.Log(string(out)) - t.Fatal(err) - } - } + repoDirFlag := fmt.Sprintf("--repo=%s", td.RepoDir()) // Defer allocation of a command API port until listening. The node will write the // listening address to the "api" file in the repo, from where we can read it when issuing commands. @@ -746,31 +719,30 @@ func NewDaemon(t *testing.T, options ...func(*TestDaemon)) *TestDaemon { swarmAddr := "/ip4/127.0.0.1/tcp/0" swarmListenFlag := fmt.Sprintf("--swarmlisten=%s", swarmAddr) - blockTimeFlag := fmt.Sprintf("--block-time=%s", BlockTimeTest) + td.daemonArgs = []string{filecoinBin, "daemon", repoDirFlag, cmdAPIAddrFlag, swarmListenFlag} - td.daemonArgs = []string{filecoinBin, "daemon", repoDirFlag, cmdAPIAddrFlag, swarmListenFlag, blockTimeFlag} + if len(td.genesisFile) != 0 { + td.daemonArgs = append(td.daemonArgs, fmt.Sprintf("--genesisfile=%s", td.genesisFile)) + } + + if len(td.networkName) != 0 { + td.daemonArgs = append(td.daemonArgs, fmt.Sprintf("--network=%s", td.networkName)) + } if td.isRelay { td.daemonArgs = append(td.daemonArgs, "--is-relay") } - return td -} - -// RunInit is the equivalent of executing `go-filecoin init`. -func RunInit(td *TestDaemon, opts ...string) ([]byte, error) { - filecoinBin := MustGetFilecoinBinary() - - finalArgs := append([]string{"init"}, opts...) - td.logRun(finalArgs...) + if td.offline { + td.daemonArgs = append(td.daemonArgs, "--offline") + } - process := exec.Command(filecoinBin, finalArgs...) - return process.CombinedOutput() + return td } // GenesisFilePath returns the path to the test genesis func GenesisFilePath() string { - return project.Root("/fixtures/test/genesis.car") + return Root("/fixtures/test/genesis.car") } func (td *TestDaemon) createNewProcess() { @@ -778,7 +750,7 @@ func (td *TestDaemon) createNewProcess() { td.process = exec.Command(td.daemonArgs[0], td.daemonArgs[1:]...) // disable REUSEPORT, it creates problems in tests - td.process.Env = append(os.Environ(), "IPFS_REUSEPORT=false") + td.process.Env = append(os.Environ(), "LIBP2P_TCP_REUSEPORT=false") // setup process pipes var err error @@ -787,7 +759,7 @@ func (td *TestDaemon) createNewProcess() { td.test.Fatal(err) } // uncomment this and comment out the following 4 lines to output daemon stderr to os stderr - //td.process.Stderr = os.Stderr + // td.process.Stderr = os.Stderr td.Stderr, err = td.process.StderrPipe() if err != nil { td.test.Fatal(err) diff --git a/pkg/testhelpers/testflags/flags.go b/pkg/testhelpers/testflags/flags.go new file mode 100644 index 0000000000..d7fbcd0e0d --- /dev/null +++ b/pkg/testhelpers/testflags/flags.go @@ -0,0 +1,90 @@ +package testflags + +import ( + "flag" + "testing" +) + +// Test enablement flags +// Only run unit and integration tests by default, all others require their flags to be set. +var ( + integrationTest = flag.Bool("integration", true, "Run the integration go tests") + unitTest = flag.Bool("unit", true, "Run the unit go tests") + functionalTest = flag.Bool("functional", false, "Run the functional go tests") + deploymentTest = flag.String("deployment", "", "Run the deployment tests against a network") + binaryPath = flag.String("binary-path", "", "Run forked processes tests using provided binary") +) + +// BinaryPath will return the path to the user provided binary. The call is expected to check if +// the return path points to an actual file. If the user did not provide a value an empty string +// will be returned along with a false for the second return value. +func BinaryPath() (string, bool) { + if len(*binaryPath) == 0 { + return "", false + } + + return *binaryPath, true +} + +// DeploymentTest will run the test its called from iff the `-deployment` flag +// is passed when calling `go test`. Otherwise the test will be skipped. DeploymentTest +// will run the test its called from in parallel. +// The network under test will be returned. +func DeploymentTest(t *testing.T) string { + if len(*deploymentTest) == 0 { + t.SkipNow() + } + t.Parallel() + + return *deploymentTest +} + +// FunctionalTest will run the test its called from iff the `-functional` flag +// is passed when calling `go test`. Otherwise the test will be skipped. FunctionalTest +// will run the test its called from in parallel. +func FunctionalTest(t *testing.T) { + if !*functionalTest { + t.SkipNow() + } + t.Parallel() +} + +// IntegrationTest will run the test its called from iff the `-integration` flag +// is passed when calling `go test`. Otherwise the test will be skipped. IntegrationTest +// will run the test its called from in parallel. +func IntegrationTest(t *testing.T) { + if !*integrationTest { + t.SkipNow() + } + // t.Parallel() +} + +// UnitTest will run the test its called from iff the `-unit` or `-short` flag +// is passed when calling `go test`. Otherwise the test will be skipped. UnitTest +// will run the test its called from in parallel. +func UnitTest(t *testing.T) { + if !*unitTest && !testing.Short() { + t.SkipNow() + } + // t.Parallel() +} + +// BenchUnitTest will run the test its called from iff the `-unit` or `-short` flag +// is passed when calling `go test`. Otherwise the test will be skipped. UnitTest +// will run the test its called from in parallel. +func BenchUnitTest(t *testing.B) { + if !*unitTest && !testing.Short() { + t.SkipNow() + } + // t.Parallel() +} + +// BadUnitTestWithSideEffects will run the test its called from iff the +// `-unit` or `-short` flag is passed when calling `go test`. Otherwise the test +// will be skipped. BadUnitTestWithSideEffects will run the test its called +// serially. Tests that use this flag are bad an should feel bad. +func BadUnitTestWithSideEffects(t *testing.T) { + if !*unitTest && !testing.Short() { + t.SkipNow() + } +} diff --git a/pkg/testhelpers/tipset.go b/pkg/testhelpers/tipset.go new file mode 100644 index 0000000000..040ced2f94 --- /dev/null +++ b/pkg/testhelpers/tipset.go @@ -0,0 +1,16 @@ +package testhelpers + +import ( + "testing" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/require" +) + +// RequireNewTipSet instantiates and returns a new tipset of the given blocks +// and requires that the setup validation succeed. +func RequireNewTipSet(t *testing.T, blks ...*types.BlockHeader) *types.TipSet { + ts, err := types.NewTipSet(blks) + require.NoError(t, err) + return ts +} diff --git a/pkg/testhelpers/util.go b/pkg/testhelpers/util.go new file mode 100644 index 0000000000..69f56335c0 --- /dev/null +++ b/pkg/testhelpers/util.go @@ -0,0 +1,114 @@ +package testhelpers + +import ( + "fmt" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +// GetFreePort gets a free port from the kernel +// Credit: https://github.com/phayes/freeport +func GetFreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "0.0.0.0:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() // nolint: errcheck + return l.Addr().(*net.TCPAddr).Port, nil +} + +// MustGetFilecoinBinary returns the path where the filecoin binary will be if it has been built and panics otherwise. +func MustGetFilecoinBinary() string { + path, err := GetFilecoinBinary() + if err != nil { + panic(err) + } + + return path +} + +// GetFilecoinBinary returns the path where the filecoin binary will be if it has been built +func GetFilecoinBinary() (string, error) { + bin, provided := testflags.BinaryPath() + if !provided { + bin = Root("venus") + } + + _, err := os.Stat(bin) + if err != nil { + return "", err + } + + if os.IsNotExist(err) { + return "", err + } + + return bin, nil +} + +// WaitForIt waits until the given callback returns true. +func WaitForIt(count int, delay time.Duration, cb func() (bool, error)) error { + var done bool + var err error + for i := 0; i < count; i++ { + done, err = cb() + if err != nil { + return err + } + if done { + break + } + time.Sleep(delay) + } + + if !done { + return fmt.Errorf("timeout waiting for it") + } + + return nil +} + +// WaitTimeout waits for the waitgroup for the specified max timeout. +// Returns true if waiting timed out. +func WaitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { + c := make(chan struct{}) + go func() { + defer close(c) + wg.Wait() + }() + select { + case <-c: + return false // completed normally + case <-time.After(timeout): + return true // timed out + } +} + +// GetGitRoot return the project root joined with any path fragments +func GetGitRoot() string { + cmd := exec.Command("git", "rev-parse", "--show-toplevel") + out, err := cmd.CombinedOutput() + if err != nil { + panic("could not find git root") + } + + return strings.Trim(string(out), "\n") +} + +// Root return the project root joined with any path fragments +func Root(paths ...string) string { + allPaths := append([]string{GetGitRoot()}, paths...) + return filepath.Join(allPaths...) +} diff --git a/internal/pkg/util/convert/convert.go b/pkg/util/convert/convert.go similarity index 90% rename from internal/pkg/util/convert/convert.go rename to pkg/util/convert/convert.go index fd60fc5e61..2b2db3a397 100644 --- a/internal/pkg/util/convert/convert.go +++ b/pkg/util/convert/convert.go @@ -5,7 +5,7 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" "github.com/pkg/errors" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" + "github.com/filecoin-project/venus/pkg/constants" ) // ToCid gets the Cid for the argument passed in diff --git a/pkg/util/dag/dag.go b/pkg/util/dag/dag.go new file mode 100644 index 0000000000..e695c1001b --- /dev/null +++ b/pkg/util/dag/dag.go @@ -0,0 +1,152 @@ +package dag + +import ( + "context" + "fmt" + "io" + + path "github.com/filecoin-project/venus/pkg/util/dag/oldpath" + resolver "github.com/filecoin-project/venus/pkg/util/dag/oldpath/oldresolver" + "github.com/ipfs/go-cid" + chunk "github.com/ipfs/go-ipfs-chunker" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipfs/go-unixfs" + imp "github.com/ipfs/go-unixfs/importer" + uio "github.com/ipfs/go-unixfs/io" + "github.com/pkg/errors" +) + +// DAG is a service for accessing the merkledag +type DAG struct { + dserv ipld.DAGService // Provides access to state tree. +} + +// NewDAG creates a DAG with a given DAGService +func NewDAG(dserv ipld.DAGService) *DAG { + return &DAG{ + dserv: dserv, + } +} + +// GetNode returns the associated DAG node for the passed in CID. +func (dag *DAG) GetNode(ctx context.Context, ref string) (interface{}, error) { + parsedRef, err := path.ParsePath(ref) + if err != nil { + return nil, err + } + + resolver := resolver.NewBasicResolver(dag.dserv) + + objc, rem, err := resolver.ResolveToLastNode(ctx, parsedRef) + if err != nil { + return nil, err + } + + obj, err := dag.dserv.Get(ctx, objc) + if err != nil { + return nil, err + } + + var out interface{} = obj + if len(rem) > 0 { + final, _, err := obj.Resolve(rem) + if err != nil { + return nil, err + } + out = final + } + + return out, nil +} + +// GetFileSize returns the file size for a given Cid +func (dag *DAG) GetFileSize(ctx context.Context, c cid.Cid) (uint64, error) { + fnode, err := dag.dserv.Get(ctx, c) + if err != nil { + return 0, err + } + switch n := fnode.(type) { + case *merkledag.ProtoNode: + return unixfs.DataSize(n.Data()) + case *merkledag.RawNode: + return n.Size() + default: + return 0, fmt.Errorf("unrecognized node type: %T", fnode) + } +} + +// Cat returns an iostream with a piece of data stored on the merkeldag with +// the given cid. +// +// TODO: this goes back to 'how is data stored and referenced' +// For now, lets just do things the ipfs way. +// https://github.com/filecoin-project/specs/issues/136 +func (dag *DAG) Cat(ctx context.Context, c cid.Cid) (uio.DagReader, error) { + data, err := dag.dserv.Get(ctx, c) + if err != nil { + return nil, err + } + return uio.NewDagReader(ctx, data, dag.dserv) +} + +// ImportData adds data from an io stream to the merkledag and returns the Cid +// of the given data +func (dag *DAG) ImportData(ctx context.Context, data io.Reader) (ipld.Node, error) { + bufds := ipld.NewBufferedDAG(ctx, dag.dserv) + + spl := chunk.DefaultSplitter(data) + + nd, err := imp.BuildDagFromReader(bufds, spl) + if err != nil { + return nil, err + } + return nd, bufds.Commit() +} + +// RecursiveGet will walk the dag in order (depth first) starting at the given root `c`. +func (dag *DAG) RecursiveGet(ctx context.Context, c cid.Cid) ([]ipld.Node, error) { + collector := dagCollector{ + dagserv: dag.dserv, + } + return collector.collectState(ctx, c) +} + +// +// Helpers for recursive dag get. +// + +type dagCollector struct { + dagserv ipld.DAGService + state []ipld.Node +} + +// collectState recursively walks the state tree starting with `stateRoot` and returns it as a slice of IPLD nodes. +// Calling this method does not have any side effects. +func (dc *dagCollector) collectState(ctx context.Context, stateRoot cid.Cid) ([]ipld.Node, error) { + dagNd, err := dc.dagserv.Get(ctx, stateRoot) + if err != nil { + return nil, errors.Wrapf(err, "failed to load stateroot from dagservice %s", stateRoot) + } + dc.addState(dagNd) + seen := cid.NewSet() + for _, l := range dagNd.Links() { + if err := merkledag.Walk(ctx, dc.getLinks, l.Cid, seen.Visit); err != nil { + return nil, errors.Wrapf(err, "dag service failed walking stateroot %s", stateRoot) + } + } + return dc.state, nil +} + +func (dc *dagCollector) getLinks(ctx context.Context, c cid.Cid) ([]*ipld.Link, error) { + nd, err := dc.dagserv.Get(ctx, c) + if err != nil { + return nil, errors.Wrapf(err, "failed to load link from dagservice %s", c) + } + dc.addState(nd) + return nd.Links(), nil +} + +func (dc *dagCollector) addState(nd ipld.Node) { + dc.state = append(dc.state, nd) +} diff --git a/internal/app/go-filecoin/plumbing/dag/dag_test.go b/pkg/util/dag/dag_test.go similarity index 79% rename from internal/app/go-filecoin/plumbing/dag/dag_test.go rename to pkg/util/dag/dag_test.go index af13bbd6ee..3b314e3ca6 100644 --- a/internal/app/go-filecoin/plumbing/dag/dag_test.go +++ b/pkg/util/dag/dag_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/filecoin-project/venus/pkg/testhelpers" + "github.com/filecoin-project/go-address" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-datastore" @@ -14,9 +16,8 @@ import ( "github.com/ipfs/go-merkledag" "github.com/stretchr/testify/assert" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" + "github.com/filecoin-project/venus/pkg/chain" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" ) func TestDAGGet(t *testing.T) { @@ -47,10 +48,10 @@ func TestDAGGet(t *testing.T) { dserv := merkledag.NewDAGService(blkserv) dag := NewDAG(dserv) - someCid := types.CidFromString(t, "somecid") + someCid := testhelpers.CidFromString(t, "somecid") _, err := dag.GetNode(ctx, someCid.String()) - assert.EqualError(t, err, "merkledag: not found") + assert.True(t, format.IsNotFound(err)) }) t.Run("matching IPLD node is emitted", func(t *testing.T) { @@ -63,10 +64,10 @@ func TestDAGGet(t *testing.T) { dserv := merkledag.NewDAGService(blkserv) dag := NewDAG(dserv) - ipldnode := chain.NewBuilder(t, address.Undef).NewGenesis().At(0).ToNode() + ipldnode := chain.NewBuilder(t, address.Undef).Genesis().At(0).ToNode() - // put into out blockservice - assert.NoError(t, blkserv.AddBlock(ipldnode)) + // put into out dagservice + assert.NoError(t, blkserv.AddBlock(ctx, ipldnode)) res, err := dag.GetNode(ctx, ipldnode.Cid().String()) assert.NoError(t, err) diff --git a/pkg/util/dag/oldpath/oldresolver/resolver.go b/pkg/util/dag/oldpath/oldresolver/resolver.go new file mode 100644 index 0000000000..cacbb9de3a --- /dev/null +++ b/pkg/util/dag/oldpath/oldresolver/resolver.go @@ -0,0 +1,198 @@ +package oldresolver + +import ( + "context" + "errors" + "fmt" + "time" + + path "github.com/filecoin-project/venus/pkg/util/dag/oldpath" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" + dag "github.com/ipfs/go-merkledag" +) + +var log = logging.Logger("pathresolv") + +// ErrNoComponents is used when Paths after a protocol +// do not contain at least one component +var ErrNoComponents = errors.New( + "path must contain at least one component") + +// ErrNoLink is returned when a link is not found in a path +type ErrNoLink struct { + Name string + Node cid.Cid +} + +// Error implements the Error interface for ErrNoLink with a useful +// human readable message. +func (e ErrNoLink) Error() string { + return fmt.Sprintf("no link named %q under %s", e.Name, e.Node.String()) +} + +// ResolveOnce resolves path through a single node +type ResolveOnce func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) + +// Resolver provides path resolution to IPFS +// It has a pointer to a DAGService, which is uses to resolve nodes. +// TODO: now that this is more modular, try to unify this code with the +// +// the resolvers in namesys +type Resolver struct { + DAG ipld.NodeGetter + + ResolveOnce ResolveOnce +} + +// NewBasicResolver constructs a new basic resolver. +func NewBasicResolver(ds ipld.DAGService) *Resolver { + return &Resolver{ + DAG: ds, + ResolveOnce: ResolveSingle, + } +} + +// ResolveToLastNode walks the given path and returns the cid of the last node +// referenced by the path +func (r *Resolver) ResolveToLastNode(ctx context.Context, fpath path.Path) (cid.Cid, []string, error) { + c, p, err := path.SplitAbsPath(fpath) + if err != nil { + return cid.Cid{}, nil, err + } + + if len(p) == 0 { + return c, nil, nil + } + + nd, err := r.DAG.Get(ctx, c) + if err != nil { + return cid.Cid{}, nil, err + } + + for len(p) > 0 { + lnk, rest, err := r.ResolveOnce(ctx, r.DAG, nd, p) + + // Note: have to drop the error here as `ResolveOnce` doesn't handle 'leaf' + // paths (so e.g. for `echo '{"foo":123}' | ipfs dag put` we wouldn't be + // able to resolve `zdpu[...]/foo`) + if lnk == nil { + break + } + + if err != nil { + if err == dag.ErrLinkNotFound { + err = ErrNoLink{Name: p[0], Node: nd.Cid()} + } + return cid.Cid{}, nil, err + } + + next, err := lnk.GetNode(ctx, r.DAG) + if err != nil { + return cid.Cid{}, nil, err + } + nd = next + p = rest + } + + if len(p) == 0 { + return nd.Cid(), nil, nil + } + + // Confirm the path exists within the object + val, rest, err := nd.Resolve(p) + if err != nil { + if err == dag.ErrLinkNotFound { + err = ErrNoLink{Name: p[0], Node: nd.Cid()} + } + return cid.Cid{}, nil, err + } + + if len(rest) > 0 { + return cid.Cid{}, nil, errors.New("path failed to resolve fully") + } + switch val.(type) { + case *ipld.Link: + return cid.Cid{}, nil, errors.New("inconsistent ResolveOnce / nd.Resolve") + default: + return nd.Cid(), p, nil + } +} + +// ResolvePath fetches the node for given path. It returns the last item +// returned by ResolvePathComponents. +func (r *Resolver) ResolvePath(ctx context.Context, fpath path.Path) (ipld.Node, error) { + // validate path + if err := fpath.IsValid(); err != nil { + return nil, err + } + + nodes, err := r.ResolvePathComponents(ctx, fpath) + if err != nil || nodes == nil { + return nil, err + } + return nodes[len(nodes)-1], err +} + +// ResolveSingle simply resolves one hop of a path through a graph with no +// extra context (does not opaquely resolve through sharded nodes) +func ResolveSingle(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) { + return nd.ResolveLink(names) +} + +// ResolvePathComponents fetches the nodes for each segment of the given path. +// It uses the first path component as a hash (key) of the first node, then +// resolves all other components walking the links, with ResolveLinks. +func (r *Resolver) ResolvePathComponents(ctx context.Context, fpath path.Path) ([]ipld.Node, error) { + h, parts, err := path.SplitAbsPath(fpath) + if err != nil { + return nil, err + } + + log.Debug("resolve dag get") + nd, err := r.DAG.Get(ctx, h) + if err != nil { + return nil, err + } + + return r.ResolveLinks(ctx, nd, parts) +} + +// ResolveLinks iteratively resolves names by walking the link hierarchy. +// Every node is fetched from the DAGService, resolving the next name. +// Returns the list of nodes forming the path, starting with ndd. This list is +// guaranteed never to be empty. +// +// ResolveLinks(nd, []string{"foo", "bar", "baz"}) +// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links +func (r *Resolver) ResolveLinks(ctx context.Context, ndd ipld.Node, names []string) ([]ipld.Node, error) { + result := make([]ipld.Node, 0, len(names)+1) + result = append(result, ndd) + nd := ndd // dup arg workaround + + // for each of the path components + for len(names) > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Minute) + defer cancel() + + lnk, rest, err := r.ResolveOnce(ctx, r.DAG, nd, names) + if err == dag.ErrLinkNotFound { + return result, ErrNoLink{Name: names[0], Node: nd.Cid()} + } else if err != nil { + return result, err + } + + nextnode, err := lnk.GetNode(ctx, r.DAG) + if err != nil { + return result, err + } + + nd = nextnode + result = append(result, nextnode) + names = rest + } + return result, nil +} diff --git a/pkg/util/dag/oldpath/path.go b/pkg/util/dag/oldpath/path.go new file mode 100644 index 0000000000..dae45824a7 --- /dev/null +++ b/pkg/util/dag/oldpath/path.go @@ -0,0 +1,198 @@ +package oldpath + +import ( + "fmt" + "path" + "strings" + + cid "github.com/ipfs/go-cid" +) + +// helper type so path parsing errors include the path +type pathError struct { + error error + path string +} + +func (e *pathError) Error() string { + return fmt.Sprintf("invalid path %q: %s", e.path, e.error) +} + +func (e *pathError) Unwrap() error { + return e.error +} + +func (e *pathError) Path() string { + return e.path +} + +// A Path represents an ipfs content path: +// - //path/to/file +// - /ipfs/ +// - /ipns//path/to/folder +// - etc +type Path string + +// ^^^ +// TODO: debate making this a private struct wrapped in a public interface +// would allow us to control creation, and cache segments. + +// FromString safely converts a string type to a Path type. +func FromString(s string) Path { + return Path(s) +} + +// FromCid safely converts a cid.Cid type to a Path type. +func FromCid(c cid.Cid) Path { + return Path("/ipfs/" + c.String()) +} + +// Segments returns the different elements of a path +// (elements are delimited by a /). +func (p Path) Segments() []string { + cleaned := path.Clean(string(p)) + segments := strings.Split(cleaned, "/") + + // Ignore leading slash + if len(segments[0]) == 0 { + segments = segments[1:] + } + + return segments +} + +// String converts a path to string. +func (p Path) String() string { + return string(p) +} + +// IsJustAKey returns true if the path is of the form or /ipfs/, or +// /ipld/ +func (p Path) IsJustAKey() bool { + parts := p.Segments() + return len(parts) == 2 && (parts[0] == "ipfs" || parts[0] == "ipld") +} + +// PopLastSegment returns a new Path without its final segment, and the final +// segment, separately. If there is no more to pop (the path is just a key), +// the original path is returned. +func (p Path) PopLastSegment() (Path, string, error) { + if p.IsJustAKey() { + return p, "", nil + } + + segs := p.Segments() + newPath, err := ParsePath("/" + strings.Join(segs[:len(segs)-1], "/")) + if err != nil { + return "", "", err + } + + return newPath, segs[len(segs)-1], nil +} + +// FromSegments returns a path given its different segments. +func FromSegments(prefix string, seg ...string) (Path, error) { + return ParsePath(prefix + strings.Join(seg, "/")) +} + +// ParsePath returns a well-formed ipfs Path. +// The returned path will always be prefixed with /ipfs/ or /ipns/. +// The prefix will be added if not present in the given string. +// This function will return an error when the given string is +// not a valid ipfs path. +func ParsePath(txt string) (Path, error) { + parts := strings.Split(txt, "/") + if len(parts) == 1 { + kp, err := ParseCidToPath(txt) + if err == nil { + return kp, nil + } + } + + // if the path doesnt begin with a '/' + // we expect this to start with a hash, and be an 'ipfs' path + if parts[0] != "" { + if _, err := cid.Decode(parts[0]); err != nil { + return "", &pathError{error: err, path: txt} + } + // The case when the path starts with hash without a protocol prefix + return Path("/ipfs/" + txt), nil + } + + if len(parts) < 3 { + return "", &pathError{error: fmt.Errorf("path does not begin with '/'"), path: txt} + } + + // TODO: make this smarter + switch parts[1] { + case "ipfs", "ipld": + if parts[2] == "" { + return "", &pathError{error: fmt.Errorf("not enough path components"), path: txt} + } + // Validate Cid. + _, err := cid.Decode(parts[2]) + if err != nil { + return "", &pathError{error: fmt.Errorf("invalid CID: %s", err), path: txt} + } + case "ipns": + if parts[2] == "" { + return "", &pathError{error: fmt.Errorf("not enough path components"), path: txt} + } + default: + return "", &pathError{error: fmt.Errorf("unknown namespace %q", parts[1]), path: txt} + } + + return Path(txt), nil +} + +// ParseCidToPath takes a CID in string form and returns a valid ipfs Path. +func ParseCidToPath(txt string) (Path, error) { + if txt == "" { + return "", &pathError{error: fmt.Errorf("empty"), path: txt} + } + + c, err := cid.Decode(txt) + if err != nil { + return "", &pathError{error: err, path: txt} + } + + return FromCid(c), nil +} + +// IsValid checks if a path is a valid ipfs Path. +func (p *Path) IsValid() error { + _, err := ParsePath(p.String()) + return err +} + +// Join joins strings slices using / +func Join(pths []string) string { + return strings.Join(pths, "/") +} + +// SplitList splits strings usings / +func SplitList(pth string) []string { + return strings.Split(pth, "/") +} + +// SplitAbsPath clean up and split fpath. It extracts the first component (which +// must be a Multihash) and return it separately. +func SplitAbsPath(fpath Path) (cid.Cid, []string, error) { + parts := fpath.Segments() + if parts[0] == "ipfs" || parts[0] == "ipld" { + parts = parts[1:] + } + + // if nothing, bail. + if len(parts) == 0 { + return cid.Cid{}, nil, &pathError{error: fmt.Errorf("empty"), path: string(fpath)} + } + + c, err := cid.Decode(parts[0]) + // first element in the path is a cid + if err != nil { + return cid.Cid{}, nil, &pathError{error: fmt.Errorf("invalid CID: %s", err), path: string(fpath)} + } + + return c, parts[1:], nil +} diff --git a/pkg/util/ffiwrapper/basicfs/fs.go b/pkg/util/ffiwrapper/basicfs/fs.go new file mode 100644 index 0000000000..01240d992f --- /dev/null +++ b/pkg/util/ffiwrapper/basicfs/fs.go @@ -0,0 +1,86 @@ +package basicfs + +import ( + "context" + "os" + "path/filepath" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/venus/pkg/util/storiface" +) + +type sectorFile struct { + abi.SectorID + storiface.SectorFileType +} + +type Provider struct { + Root string + + lk sync.Mutex + waitSector map[sectorFile]chan struct{} +} + +func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0o755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err + } + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTSealed.String()), 0o755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err + } + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTCache.String()), 0o755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err + } + + done := func() {} + + out := storiface.SectorPaths{ + ID: id.ID, + } + + for _, fileType := range storiface.PathTypes { + if !existing.Has(fileType) && !allocate.Has(fileType) { + continue + } + + b.lk.Lock() + if b.waitSector == nil { + b.waitSector = map[sectorFile]chan struct{}{} + } + ch, found := b.waitSector[sectorFile{id.ID, fileType}] + if !found { + ch = make(chan struct{}, 1) + b.waitSector[sectorFile{id.ID, fileType}] = ch + } + b.lk.Unlock() + + select { + case ch <- struct{}{}: + case <-ctx.Done(): + done() + return storiface.SectorPaths{}, nil, ctx.Err() + } + + path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID)) + + prevDone := done + done = func() { + prevDone() + <-ch + } + + if !allocate.Has(fileType) { + if _, err := os.Stat(path); os.IsNotExist(err) { + done() + return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound + } + } + + storiface.SetPathByType(&out, fileType, path) + } + + return out, done, nil +} diff --git a/pkg/util/ffiwrapper/impl/config.go b/pkg/util/ffiwrapper/impl/config.go new file mode 100644 index 0000000000..eb8d269075 --- /dev/null +++ b/pkg/util/ffiwrapper/impl/config.go @@ -0,0 +1,55 @@ +package impl + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("ffiwrapper") + +type Config struct { + SealProofType abi.RegisteredSealProof + + _ struct{} // guard against nameless init +} + +//nolint +func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) { + switch { + case nv < network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return 0, fmt.Errorf("unsupported sector size for miner: %v", ssize) + } + case nv >= network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return 0, fmt.Errorf("unsupported sector size for miner: %v", ssize) + } + } + + return 0, fmt.Errorf("unsupported network version") +} diff --git a/pkg/util/ffiwrapper/impl/partialfile.go b/pkg/util/ffiwrapper/impl/partialfile.go new file mode 100644 index 0000000000..7799e4cfaa --- /dev/null +++ b/pkg/util/ffiwrapper/impl/partialfile.go @@ -0,0 +1,316 @@ +package impl + +import ( + "encoding/binary" + "fmt" + "io" + "os" + "syscall" + + "github.com/detailyang/go-fallocate" + + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/pkg/util/fsutil" + "github.com/filecoin-project/venus/pkg/util/storiface" +) + +const veryLargeRle = 1 << 20 + +// Sectors can be partially unsealed. We support this by appending a small +// trailer to each unsealed sector file containing an RLE+ marking which bytes +// in a sector are unsealed, and which are not (holes) + +// unsealed sector files internally have this structure +// [unpadded (raw) data][rle+][4B LE length fo the rle+ field] + +type partialFile struct { + maxPiece abi.PaddedPieceSize + + path string + allocated rlepluslazy.RLE + + file *os.File +} + +func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) error { + trailer, err := rlepluslazy.EncodeRuns(r, nil) + if err != nil { + return fmt.Errorf("encoding trailer: %w", err) + } + + // maxPieceSize == unpadded(sectorSize) == trailer start + if _, err := w.Seek(maxPieceSize, io.SeekStart); err != nil { + return fmt.Errorf("seek to trailer start: %w", err) + } + + rb, err := w.Write(trailer) + if err != nil { + return fmt.Errorf("writing trailer data: %w", err) + } + + if err := binary.Write(w, binary.LittleEndian, uint32(len(trailer))); err != nil { + return fmt.Errorf("writing trailer length: %w", err) + } + + return w.Truncate(maxPieceSize + int64(rb) + 4) +} + +func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644) // nolint + if err != nil { + return nil, fmt.Errorf("openning partial file '%s': %w", path, err) + } + + err = func() error { + err := fallocate.Fallocate(f, 0, int64(maxPieceSize)) + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { + log.Warnf("could not allocated space, ignoring: %v", errno) + err = nil // log and ignore + } + } + if err != nil { + return fmt.Errorf("fallocate '%s': %w", path, err) + } + + if err := writeTrailer(int64(maxPieceSize), f, &rlepluslazy.RunSliceIterator{}); err != nil { + return fmt.Errorf("writing trailer: %w", err) + } + + return nil + }() + if err != nil { + _ = f.Close() + return nil, err + } + if err := f.Close(); err != nil { + return nil, fmt.Errorf("close empty partial file: %w", err) + } + + return openPartialFile(maxPieceSize, path) +} + +func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { + f, err := os.OpenFile(path, os.O_RDWR, 0o644) // nolint + if err != nil { + return nil, fmt.Errorf("openning partial file '%s': %w", path, err) + } + + var rle rlepluslazy.RLE + err = func() error { + st, err := f.Stat() + if err != nil { + return fmt.Errorf("stat '%s': %w", path, err) + } + if st.Size() < int64(maxPieceSize) { + return fmt.Errorf("sector file '%s' was smaller than the sector size %d < %d", path, st.Size(), maxPieceSize) + } + // read trailer + var tlen [4]byte + _, err = f.ReadAt(tlen[:], st.Size()-int64(len(tlen))) + if err != nil { + return fmt.Errorf("reading trailer length: %w", err) + } + + // sanity-check the length + trailerLen := binary.LittleEndian.Uint32(tlen[:]) + expectLen := int64(trailerLen) + int64(len(tlen)) + int64(maxPieceSize) + if expectLen != st.Size() { + return fmt.Errorf("file '%s' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen)+int64(len(tlen)), maxPieceSize) + } + if trailerLen > veryLargeRle { + log.Warnf("Partial file '%s' has a VERY large trailer with %d bytes", path, trailerLen) + } + + trailerStart := st.Size() - int64(len(tlen)) - int64(trailerLen) + if trailerStart != int64(maxPieceSize) { + return fmt.Errorf("expected sector size to equal trailer start index") + } + + trailerBytes := make([]byte, trailerLen) + _, err = f.ReadAt(trailerBytes, trailerStart) + if err != nil { + return fmt.Errorf("reading trailer: %w", err) + } + + rle, err = rlepluslazy.FromBuf(trailerBytes) + if err != nil { + return fmt.Errorf("decoding trailer: %w", err) + } + + it, err := rle.RunIterator() + if err != nil { + return fmt.Errorf("getting trailer run iterator: %w", err) + } + + f, err := rlepluslazy.Fill(it) + if err != nil { + return fmt.Errorf("filling bitfield: %w", err) + } + lastSet, err := rlepluslazy.Count(f) + if err != nil { + return fmt.Errorf("finding last set byte index: %w", err) + } + + if lastSet > uint64(maxPieceSize) { + return fmt.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize) + } + + return nil + }() + if err != nil { + _ = f.Close() + return nil, err + } + + return &partialFile{ + maxPiece: maxPieceSize, + path: path, + allocated: rle, + file: f, + }, nil +} + +func (pf *partialFile) Close() error { + return pf.file.Close() +} + +func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { + if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { + return nil, fmt.Errorf("seek piece start: %w", err) + } + + { + have, err := pf.allocated.RunIterator() + if err != nil { + return nil, err + } + + and, err := rlepluslazy.And(have, pieceRun(offset, size)) + if err != nil { + return nil, err + } + + c, err := rlepluslazy.Count(and) + if err != nil { + return nil, err + } + + if c > 0 { + log.Warnf("getting partial file writer overwriting %d allocated bytes", c) + } + } + + return pf.file, nil +} + +func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { + have, err := pf.allocated.RunIterator() + if err != nil { + return err + } + + ored, err := rlepluslazy.Or(have, pieceRun(offset, size)) + if err != nil { + return err + } + + if err := writeTrailer(int64(pf.maxPiece), pf.file, ored); err != nil { + return fmt.Errorf("writing trailer: %w", err) + } + + return nil +} + +func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { + have, err := pf.allocated.RunIterator() + if err != nil { + return err + } + + if err := fsutil.Deallocate(pf.file, int64(offset), int64(size)); err != nil { + return fmt.Errorf("deallocating: %w", err) + } + + s, err := rlepluslazy.Subtract(have, pieceRun(offset, size)) + if err != nil { + return err + } + + if err := writeTrailer(int64(pf.maxPiece), pf.file, s); err != nil { + return fmt.Errorf("writing trailer: %w", err) + } + + return nil +} + +func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { + if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { + return nil, fmt.Errorf("seek piece start: %w", err) + } + + { + have, err := pf.allocated.RunIterator() + if err != nil { + return nil, err + } + + and, err := rlepluslazy.And(have, pieceRun(offset, size)) + if err != nil { + return nil, err + } + + c, err := rlepluslazy.Count(and) + if err != nil { + return nil, err + } + + if c != uint64(size) { + log.Warnf("getting partial file reader reading %d unallocated bytes", uint64(size)-c) + } + } + + return pf.file, nil +} + +func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { + return pf.allocated.RunIterator() +} + +func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + have, err := pf.Allocated() + if err != nil { + return false, err + } + + u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded())) + if err != nil { + return false, err + } + + uc, err := rlepluslazy.Count(u) + if err != nil { + return false, err + } + + return abi.PaddedPieceSize(uc) == size.Padded(), nil +} + +func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { + var runs []rlepluslazy.Run + if offset > 0 { + runs = append(runs, rlepluslazy.Run{ + Val: false, + Len: uint64(offset), + }) + } + + runs = append(runs, rlepluslazy.Run{ + Val: true, + Len: uint64(size), + }) + + return &rlepluslazy.RunSliceIterator{Runs: runs} +} diff --git a/pkg/util/ffiwrapper/impl/sealer.go b/pkg/util/ffiwrapper/impl/sealer.go new file mode 100644 index 0000000000..83309d8de0 --- /dev/null +++ b/pkg/util/ffiwrapper/impl/sealer.go @@ -0,0 +1,10 @@ +package impl + +type Sealer struct { + sectors SectorProvider + stopping chan struct{} +} + +func (sb *Sealer) Stop() { + close(sb.stopping) +} diff --git a/pkg/util/ffiwrapper/impl/sealer_cgo.go b/pkg/util/ffiwrapper/impl/sealer_cgo.go new file mode 100644 index 0000000000..ad7b39ebcc --- /dev/null +++ b/pkg/util/ffiwrapper/impl/sealer_cgo.go @@ -0,0 +1,509 @@ +//go:build cgo +// +build cgo + +package impl + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math/bits" + "os" + "runtime" + + "github.com/filecoin-project/go-commp-utils/zerocomm" + + "github.com/ipfs/go-cid" + + ffi "github.com/filecoin-project/filecoin-ffi" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" + commcid "github.com/filecoin-project/go-fil-commcid" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/venus/pkg/util/fr32" + "github.com/filecoin-project/venus/pkg/util/storiface" +) + +var _ Storage = &Sealer{} + +func New(sectors SectorProvider) (*Sealer, error) { + sb := &Sealer{ + sectors: sectors, + + stopping: make(chan struct{}), + } + + return sb, nil +} + +func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error { + // TODO: Allocate the sector here instead of in addpiece + + return nil +} + +func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { + // TODO: allow tuning those: + chunk := abi.PaddedPieceSize(4 << 20) + parallel := runtime.NumCPU() + + var offset abi.UnpaddedPieceSize + for _, size := range existingPieceSizes { + offset += size + } + + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return abi.PieceInfo{}, err + } + + maxPieceSize := abi.PaddedPieceSize(ssize) + + if offset.Padded()+pieceSize.Padded() > maxPieceSize { + return abi.PieceInfo{}, fmt.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset) + } + + var done func() + var stagedFile *partialFile + + defer func() { + if done != nil { + done() + } + + if stagedFile != nil { + if err := stagedFile.Close(); err != nil { + log.Errorf("closing staged file: %+v", err) + } + } + }() + + var stagedPath storiface.SectorPaths + if len(existingPieceSizes) == 0 { + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed, storiface.PathSealing) + if err != nil { + return abi.PieceInfo{}, fmt.Errorf("acquire unsealed sector: %w", err) + } + + stagedFile, err = createPartialFile(maxPieceSize, stagedPath.Unsealed) + if err != nil { + return abi.PieceInfo{}, fmt.Errorf("creating unsealed sector file: %w", err) + } + } else { + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathSealing) + if err != nil { + return abi.PieceInfo{}, fmt.Errorf("acquire unsealed sector: %w", err) + } + + stagedFile, err = openPartialFile(maxPieceSize, stagedPath.Unsealed) + if err != nil { + return abi.PieceInfo{}, fmt.Errorf("opening unsealed sector file: %w", err) + } + } + + w, err := stagedFile.Writer(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()) + if err != nil { + return abi.PieceInfo{}, fmt.Errorf("getting partial file writer: %w", err) + } + + pw := fr32.NewPadWriter(w) + + pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw) + + throttle := make(chan []byte, parallel) + piecePromises := make([]func() (abi.PieceInfo, error), 0) + + buf := make([]byte, chunk.Unpadded()) + for i := 0; i < parallel; i++ { + if abi.UnpaddedPieceSize(i)*chunk.Unpadded() >= pieceSize { + break // won't use this many buffers + } + throttle <- make([]byte, chunk.Unpadded()) + } + + for { + var read int + for rbuf := buf; len(rbuf) > 0; { + n, err := pr.Read(rbuf) + if err != nil && err != io.EOF { + return abi.PieceInfo{}, fmt.Errorf("pr read error: %w", err) + } + + rbuf = rbuf[n:] + read += n + + if err == io.EOF { + break + } + } + if read == 0 { + break + } + + done := make(chan struct { + cid.Cid + error + }, 1) + pbuf := <-throttle + copy(pbuf, buf[:read]) + + go func(read int) { + defer func() { + throttle <- pbuf + }() + + c, err := sb.pieceCid(sector.ProofType, pbuf[:read]) + done <- struct { + cid.Cid + error + }{c, err} + }(read) + + piecePromises = append(piecePromises, func() (abi.PieceInfo, error) { + select { + case e := <-done: + if e.error != nil { + return abi.PieceInfo{}, e.error + } + + return abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), + PieceCID: e.Cid, + }, nil + case <-ctx.Done(): + return abi.PieceInfo{}, ctx.Err() + } + }) + } + + if err := pw.Close(); err != nil { + return abi.PieceInfo{}, fmt.Errorf("closing padded writer: %w", err) + } + + if err := stagedFile.MarkAllocated(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()); err != nil { + return abi.PieceInfo{}, fmt.Errorf("marking data range as allocated: %w", err) + } + + if err := stagedFile.Close(); err != nil { + return abi.PieceInfo{}, err + } + stagedFile = nil + + if len(piecePromises) == 1 { + return piecePromises[0]() + } + + pieceCids := make([]abi.PieceInfo, len(piecePromises)) + for i, promise := range piecePromises { + pieceCids[i], err = promise() + if err != nil { + return abi.PieceInfo{}, err + } + } + + pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids) + if err != nil { + return abi.PieceInfo{}, fmt.Errorf("generate unsealed CID: %w", err) + } + + // validate that the pieceCID was properly formed + if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil { + return abi.PieceInfo{}, err + } + + return abi.PieceInfo{ + Size: pieceSize.Padded(), + PieceCID: pieceCID, + }, nil +} + +func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) { + prf, werr, err := commpffi.ToReadableFile(bytes.NewReader(in), int64(len(in))) + if err != nil { + return cid.Undef, fmt.Errorf("getting tee reader pipe: %w", err) + } + + pieceCID, err := ffi.GeneratePieceCIDFromFile(spt, prf, abi.UnpaddedPieceSize(len(in))) + if err != nil { + return cid.Undef, fmt.Errorf("generating piece commitment: %w", err) + } + + _ = prf.Close() + + return pieceCID, werr() +} + +func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) + if err != nil { + return nil, fmt.Errorf("acquiring sector paths: %w", err) + } + defer done() + + e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0o644) // nolint:gosec + if err != nil { + return nil, fmt.Errorf("ensuring sealed file exists: %w", err) + } + if err := e.Close(); err != nil { + return nil, err + } + + if err := os.Mkdir(paths.Cache, 0o755); err != nil { // nolint + if os.IsExist(err) { + log.Warnf("existing cache in %s; removing", paths.Cache) + + if err := os.RemoveAll(paths.Cache); err != nil { + return nil, fmt.Errorf("remove existing sector cache from %s (sector %d): %w", paths.Cache, sector, err) + } + + if err := os.Mkdir(paths.Cache, 0o755); err != nil { // nolint:gosec + return nil, fmt.Errorf("mkdir cache path after cleanup: %w", err) + } + } else { + return nil, err + } + } + + var sum abi.UnpaddedPieceSize + for _, piece := range pieces { + sum += piece.Size.Unpadded() + } + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return nil, err + } + ussize := abi.PaddedPieceSize(ssize).Unpadded() + if sum != ussize { + return nil, fmt.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum)) + } + + // TODO: context cancellation respect + p1o, err := ffi.SealPreCommitPhase1( + sector.ProofType, + paths.Cache, + paths.Unsealed, + paths.Sealed, + sector.ID.Number, + sector.ID.Miner, + ticket, + pieces, + ) + if err != nil { + return nil, fmt.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) + } + p1odec := map[string]interface{}{} + if err := json.Unmarshal(p1o, &p1odec); err != nil { + return nil, fmt.Errorf("unmarshaling pc1 output: %w", err) + } + + p1odec["_lotus_SealRandomness"] = ticket + + return json.Marshal(&p1odec) +} + +var PC2CheckRounds = 3 + +func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) + if err != nil { + return storage.SectorCids{}, fmt.Errorf("acquiring sector paths: %w", err) + } + defer done() + + sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed) + if err != nil { + return storage.SectorCids{}, fmt.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) + } + + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return storage.SectorCids{}, fmt.Errorf("get ssize: %w", err) + } + + p1odec := map[string]interface{}{} + if err := json.Unmarshal(phase1Out, &p1odec); err != nil { + return storage.SectorCids{}, fmt.Errorf("unmarshaling pc1 output: %w", err) + } + + var ticket abi.SealRandomness + ti, found := p1odec["_lotus_SealRandomness"] + + if found { + ticket, err = base64.StdEncoding.DecodeString(ti.(string)) + if err != nil { + return storage.SectorCids{}, fmt.Errorf("decoding ticket: %w", err) + } + + for i := 0; i < PC2CheckRounds; i++ { + var sd [32]byte + _, _ = rand.Read(sd[:]) + + _, err := ffi.SealCommitPhase1( + sector.ProofType, + sealedCID, + unsealedCID, + paths.Cache, + paths.Sealed, + sector.ID.Number, + sector.ID.Miner, + ticket, + sd[:], + []abi.PieceInfo{{Size: abi.PaddedPieceSize(ssize), PieceCID: unsealedCID}}, + ) + if err != nil { + log.Warn("checking PreCommit failed: ", err) + log.Warnf("num:%d tkt:%v seed:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, sd[:], sealedCID, unsealedCID) + + return storage.SectorCids{}, fmt.Errorf("checking PreCommit failed: %w", err) + } + } + } + + return storage.SectorCids{ + Unsealed: unsealedCID, + Sealed: sealedCID, + }, nil +} + +func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return err + } + maxPieceSize := abi.PaddedPieceSize(ssize) + + if len(keepUnsealed) > 0 { + + sr := pieceRun(0, maxPieceSize) + + for _, s := range keepUnsealed { + si := &rlepluslazy.RunSliceIterator{} + if s.Offset != 0 { + si.Runs = append(si.Runs, rlepluslazy.Run{Val: false, Len: uint64(s.Offset)}) + } + si.Runs = append(si.Runs, rlepluslazy.Run{Val: true, Len: uint64(s.Size)}) + + var err error + sr, err = rlepluslazy.Subtract(sr, si) + if err != nil { + return err + } + } + + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathStorage) + if err != nil { + return fmt.Errorf("acquiring sector cache path: %w", err) + } + defer done() + + pf, err := openPartialFile(maxPieceSize, paths.Unsealed) + if err == nil { + var at uint64 + for sr.HasNext() { + r, err := sr.NextRun() + if err != nil { + _ = pf.Close() + return err + } + + offset := at + at += r.Len + if !r.Val { + continue + } + + err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded()) + if err != nil { + _ = pf.Close() + return fmt.Errorf("free partial file range: %w", err) + } + } + + if err := pf.Close(); err != nil { + return err + } + } else { + if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("opening partial file: %w", err) + } + } + + } + + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage) + if err != nil { + return fmt.Errorf("acquiring sector cache path: %w", err) + } + defer done() + + return ffi.ClearCache(uint64(ssize), paths.Cache) +} + +func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) { + padPieces := make([]abi.PaddedPieceSize, 0) + + toFill := uint64(-oldLength % newPieceLength) + + n := bits.OnesCount64(toFill) + var sum abi.PaddedPieceSize + for i := 0; i < n; i++ { + next := bits.TrailingZeros64(toFill) + psize := uint64(1) << uint(next) + toFill ^= psize + + padded := abi.PaddedPieceSize(psize) + padPieces = append(padPieces, padded) + sum += padded + } + + return padPieces, sum +} + +func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { + ssize, err := proofType.SectorSize() + if err != nil { + return cid.Undef, err + } + + pssize := abi.PaddedPieceSize(ssize) + allPieces := make([]abi.PieceInfo, 0, len(pieces)) + if len(pieces) == 0 { + allPieces = append(allPieces, abi.PieceInfo{ + Size: pssize, + PieceCID: zerocomm.ZeroPieceCommitment(pssize.Unpadded()), + }) + } else { + var sum abi.PaddedPieceSize + + padTo := func(pads []abi.PaddedPieceSize) { + for _, p := range pads { + allPieces = append(allPieces, abi.PieceInfo{ + Size: p, + PieceCID: zerocomm.ZeroPieceCommitment(p.Unpadded()), + }) + + sum += p + } + } + + for _, p := range pieces { + ps, _ := GetRequiredPadding(sum, p.Size) + padTo(ps) + + allPieces = append(allPieces, p) + sum += p.Size + } + + ps, _ := GetRequiredPadding(sum, pssize) + padTo(ps) + } + + return ffi.GenerateUnsealedCID(proofType, allPieces) +} diff --git a/pkg/util/ffiwrapper/impl/testing.go b/pkg/util/ffiwrapper/impl/testing.go new file mode 100644 index 0000000000..ad31720bde --- /dev/null +++ b/pkg/util/ffiwrapper/impl/testing.go @@ -0,0 +1,40 @@ +package impl + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" +) + +// FakeVerifier is a simple mock Verifier for testing. +type FakeVerifier struct{} + +var _ ffiwrapper.Verifier = (*FakeVerifier)(nil) + +func (f *FakeVerifier) VerifySeal(proof7.SealVerifyInfo) (bool, error) { + return true, nil +} + +func (f *FakeVerifier) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) { + return true, nil +} + +func (f *FakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return true, nil +} + +func (f *FakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { + return true, nil +} + +func (f *FakeVerifier) VerifyWindowPoSt(context.Context, proof7.WindowPoStVerifyInfo) (bool, error) { + return true, nil +} + +func (f *FakeVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { + return []uint64{}, nil +} diff --git a/pkg/util/ffiwrapper/impl/types.go b/pkg/util/ffiwrapper/impl/types.go new file mode 100644 index 0000000000..454d9d7506 --- /dev/null +++ b/pkg/util/ffiwrapper/impl/types.go @@ -0,0 +1,26 @@ +package impl + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/basicfs" + "github.com/filecoin-project/venus/pkg/util/storiface" +) + +type Storage interface { + SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) + SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storage.SectorCids, error) + + FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error +} + +type SectorProvider interface { + // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist + // * returns an error when allocate is set, and existing isn't, and the sector exists + AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) +} + +var _ SectorProvider = &basicfs.Provider{} diff --git a/pkg/util/ffiwrapper/impl/unseal_ranges.go b/pkg/util/ffiwrapper/impl/unseal_ranges.go new file mode 100644 index 0000000000..2e4041bbd1 --- /dev/null +++ b/pkg/util/ffiwrapper/impl/unseal_ranges.go @@ -0,0 +1,27 @@ +package impl + +import ( + "fmt" + + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/pkg/util/storiface" +) + +// merge gaps between ranges which are close to each other +// TODO: more benchmarking to come up with more optimal number +const mergeGaps = 32 << 20 + +// TODO const expandRuns = 16 << 20 // unseal more than requested for future requests + +// nolint +func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { + todo := pieceRun(offset.Padded(), size.Padded()) + todo, err := rlepluslazy.Subtract(todo, unsealed) + if err != nil { + return nil, fmt.Errorf("compute todo-unsealed: %w", err) + } + + return rlepluslazy.JoinClose(todo, mergeGaps) +} diff --git a/pkg/util/ffiwrapper/impl/verifier_cgo.go b/pkg/util/ffiwrapper/impl/verifier_cgo.go new file mode 100644 index 0000000000..5a1d5287d3 --- /dev/null +++ b/pkg/util/ffiwrapper/impl/verifier_cgo.go @@ -0,0 +1,172 @@ +//go:build cgo +// +build cgo + +package impl + +import ( + "context" + "fmt" + + "go.opencensus.io/trace" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + ffiproof "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/pkg/util/storiface" +) + +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { + randomness[31] &= 0x3f + privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? + if err != nil { + return nil, err + } + defer done() + if len(skipped) > 0 { + return nil, fmt.Errorf("pubSectorToPriv skipped sectors: %+v", skipped) + } + + return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) +} + +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { + randomness[31] &= 0x3f + privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) + if err != nil { + return nil, nil, fmt.Errorf("gathering sector info: %w", err) + } + + defer done() + + if len(skipped) > 0 { + return nil, skipped, fmt.Errorf("pubSectorToPriv skipped some sectors") + } + + proof, faulty, err := ffi.GenerateWindowPoSt(minerID, privsectors, randomness) + + var faultyIDs []abi.SectorID + for _, f := range faulty { + faultyIDs = append(faultyIDs, abi.SectorID{ + Miner: minerID, + Number: f, + }) + } + return proof, faultyIDs, err +} + +func (sb *Sealer) pubExtendedSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { + fmap := map[abi.SectorNumber]struct{}{} + for _, fault := range faults { + fmap[fault] = struct{}{} + } + + var doneFuncs []func() + done := func() { + for _, df := range doneFuncs { + df() + } + } + + var skipped []abi.SectorID + var out []ffi.PrivateSectorInfo + for _, s := range sectorInfo { + if _, faulty := fmap[s.SectorNumber]; faulty { + continue + } + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: mid, Number: s.SectorNumber}, + ProofType: s.SealProof, + } + proveUpdate := s.SectorKey != nil + var cache string + var sealed string + if proveUpdate { + log.Debugf("Posting over updated sector for sector id: %d", s.SectorNumber) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTUpdateCache|storiface.FTUpdate, 0, storiface.PathStorage) + if err != nil { + log.Warnw("failed to acquire FTUpdateCache and FTUpdate of sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) + continue + } + doneFuncs = append(doneFuncs, d) + cache = paths.UpdateCache + sealed = paths.Update + } else { + log.Debugf("Posting over sector key sector for sector id: %d", s.SectorNumber) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) + if err != nil { + log.Warnw("failed to acquire FTCache and FTSealed of sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) + continue + } + doneFuncs = append(doneFuncs, d) + cache = paths.Cache + sealed = paths.Sealed + } + + postProofType, err := rpt(s.SealProof) + if err != nil { + done() + return ffi.SortedPrivateSectorInfo{}, nil, nil, fmt.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) + } + + ffiInfo := ffiproof.SectorInfo{ + SealProof: s.SealProof, + SectorNumber: s.SectorNumber, + SealedCID: s.SealedCID, + } + out = append(out, ffi.PrivateSectorInfo{ + CacheDirPath: cache, + PoStProofType: postProofType, + SealedSectorPath: sealed, + SectorInfo: ffiInfo, + }) + } + + return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil +} + +var _ ffiwrapper.Verifier = ProofVerifier + +type proofVerifier struct{} + +var ProofVerifier = proofVerifier{} + +func (proofVerifier) VerifySeal(info proof.SealVerifyInfo) (bool, error) { + return ffi.VerifySeal(info) +} + +func (proofVerifier) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) { + return ffi.VerifyAggregateSeals(aggregate) +} + +func (proofVerifier) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) { + return ffi.SectorUpdate.VerifyUpdateProof(update) +} + +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { + info.Randomness[31] &= 0x3f + _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") + defer span.End() + + return ffi.VerifyWinningPoSt(info) +} + +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { + info.Randomness[31] &= 0x3f + _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") + defer span.End() + + return ffi.VerifyWindowPoSt(info) +} + +func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { + randomness[31] &= 0x3f + return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount) +} diff --git a/pkg/util/ffiwrapper/verifier.go b/pkg/util/ffiwrapper/verifier.go new file mode 100644 index 0000000000..60bf1be9c6 --- /dev/null +++ b/pkg/util/ffiwrapper/verifier.go @@ -0,0 +1,19 @@ +package ffiwrapper + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" +) + +type Verifier interface { + VerifySeal(proof7.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) + + GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) +} diff --git a/pkg/util/fr32/fr32.go b/pkg/util/fr32/fr32.go new file mode 100644 index 0000000000..3b37013ddc --- /dev/null +++ b/pkg/util/fr32/fr32.go @@ -0,0 +1,157 @@ +package fr32 + +import ( + "math/bits" + "runtime" + "sync" + + "github.com/filecoin-project/go-state-types/abi" +) + +var MTTresh = uint64(32 << 20) + +func mtChunkCount(usz abi.PaddedPieceSize) uint64 { + threads := (uint64(usz)) / MTTresh + if threads > uint64(runtime.NumCPU()) { + threads = 1 << (bits.Len32(uint32(runtime.NumCPU()))) + } + if threads == 0 { + return 1 + } + if threads > 32 { + return 32 // avoid too large buffers + } + return threads +} + +func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { + threads := mtChunkCount(abi.PaddedPieceSize(padLen)) + threadBytes := abi.PaddedPieceSize(padLen / int(threads)) + + var wg sync.WaitGroup + wg.Add(int(threads)) + + for i := 0; i < int(threads); i++ { + go func(thread int) { + defer wg.Done() + + start := threadBytes * abi.PaddedPieceSize(thread) + end := start + threadBytes + + op(in[start.Unpadded():end.Unpadded()], out[start:end]) + }(i) + } + wg.Wait() +} + +func Pad(in, out []byte) { + // Assumes len(in)%127==0 and len(out)%128==0 + if len(out) > int(MTTresh) { + mt(in, out, len(out), pad) + return + } + + pad(in, out) +} + +func pad(in, out []byte) { + chunks := len(out) / 128 + for chunk := 0; chunk < chunks; chunk++ { + inOff := chunk * 127 + outOff := chunk * 128 + + copy(out[outOff:outOff+31], in[inOff:inOff+31]) + + t := in[inOff+31] >> 6 + out[outOff+31] = in[inOff+31] & 0x3f + var v byte + + for i := 32; i < 64; i++ { + v = in[inOff+i] + out[outOff+i] = (v << 2) | t + t = v >> 6 + } + + t = v >> 4 + out[outOff+63] &= 0x3f + + for i := 64; i < 96; i++ { + v = in[inOff+i] + out[outOff+i] = (v << 4) | t + t = v >> 4 + } + + t = v >> 2 + out[outOff+95] &= 0x3f + + for i := 96; i < 127; i++ { + v = in[inOff+i] + out[outOff+i] = (v << 6) | t + t = v >> 2 + } + + out[outOff+127] = t & 0x3f + } +} + +func Unpad(in []byte, out []byte) { + // Assumes len(in)%128==0 and len(out)%127==0 + if len(in) > int(MTTresh) { + mt(out, in, len(in), unpad) + return + } + + unpad(out, in) +} + +func unpad(out, in []byte) { + chunks := len(in) / 128 + for chunk := 0; chunk < chunks; chunk++ { + inOffNext := chunk*128 + 1 + outOff := chunk * 127 + + at := in[chunk*128] + + for i := 0; i < 32; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at + // out[i] |= next << 8 + + at = next + } + + out[outOff+31] |= at << 6 + + for i := 32; i < 64; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at >> 2 + out[outOff+i] |= next << 6 + + at = next + } + + out[outOff+63] ^= (at << 6) ^ (at << 4) + + for i := 64; i < 96; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at >> 4 + out[outOff+i] |= next << 4 + + at = next + } + + out[outOff+95] ^= (at << 4) ^ (at << 2) + + for i := 96; i < 127; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at >> 6 + out[outOff+i] |= next << 2 + + at = next + } + } +} diff --git a/pkg/util/fr32/fr32_ffi_cmp_test.go b/pkg/util/fr32/fr32_ffi_cmp_test.go new file mode 100644 index 0000000000..71e3d12aca --- /dev/null +++ b/pkg/util/fr32/fr32_ffi_cmp_test.go @@ -0,0 +1,66 @@ +package fr32_test + +import ( + "bytes" + "io" + "os" + "testing" + + ffi "github.com/filecoin-project/filecoin-ffi" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/util/fr32" +) + +func TestWriteTwoPcs(t *testing.T) { + tf.UnitTest(t) + tf, _ := os.CreateTemp("/tmp/", "scrb-") + + paddedSize := abi.PaddedPieceSize(16 << 20) + n := 2 + + var rawBytes []byte + + for i := 0; i < n; i++ { + buf := bytes.Repeat([]byte{0xab * byte(i)}, int(paddedSize.Unpadded())) + rawBytes = append(rawBytes, buf...) + + rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) + + _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) + if err != nil { + panic(err) + } + if err := w(); err != nil { + panic(err) + } + } + + if _, err := tf.Seek(io.SeekStart, 0); err != nil { // nolint:staticcheck + panic(err) + } + + ffiBytes, err := io.ReadAll(tf) + if err != nil { + panic(err) + } + + if err := tf.Close(); err != nil { + panic(err) + } + + if err := os.Remove(tf.Name()); err != nil { + panic(err) + } + + outBytes := make([]byte, int(paddedSize)*n) + fr32.Pad(rawBytes, outBytes) + require.Equal(t, ffiBytes, outBytes) + + unpadBytes := make([]byte, int(paddedSize.Unpadded())*n) + fr32.Unpad(ffiBytes, unpadBytes) + require.Equal(t, rawBytes, unpadBytes) +} diff --git a/pkg/util/fr32/fr32_test.go b/pkg/util/fr32/fr32_test.go new file mode 100644 index 0000000000..a222632bb3 --- /dev/null +++ b/pkg/util/fr32/fr32_test.go @@ -0,0 +1,262 @@ +package fr32_test + +import ( + "bytes" + "io" + "math/rand" + "os" + "testing" + + ffi "github.com/filecoin-project/filecoin-ffi" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/util/fr32" +) + +func padFFI(buf []byte) []byte { + rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) + tf, _ := os.CreateTemp("/tmp/", "scrb-") + + _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) + if err != nil { + panic(err) + } + if err := w(); err != nil { + panic(err) + } + + if _, err := tf.Seek(io.SeekStart, 0); err != nil { // nolint:staticcheck + panic(err) + } + + padded, err := io.ReadAll(tf) + if err != nil { + panic(err) + } + + if err := tf.Close(); err != nil { + panic(err) + } + + if err := os.Remove(tf.Name()); err != nil { + panic(err) + } + + return padded +} + +func TestPadChunkFFI(t *testing.T) { + tf.UnitTest(t) + testByteChunk := func(b byte) func(*testing.T) { + return func(t *testing.T) { + var buf [128]byte + copy(buf[:], bytes.Repeat([]byte{b}, 127)) + + fr32.Pad(buf[:], buf[:]) + + expect := padFFI(bytes.Repeat([]byte{b}, 127)) + + require.Equal(t, expect, buf[:]) + } + } + + t.Run("ones", testByteChunk(0xff)) + t.Run("lsb1", testByteChunk(0x01)) + t.Run("msb1", testByteChunk(0x80)) + t.Run("zero", testByteChunk(0x0)) + t.Run("mid", testByteChunk(0x3c)) +} + +func TestPadChunkRandEqFFI(t *testing.T) { + tf.UnitTest(t) + for i := 0; i < 200; i++ { + var input [127]byte + rand.Read(input[:]) + + var buf [128]byte + + fr32.Pad(input[:], buf[:]) + + expect := padFFI(input[:]) + + require.Equal(t, expect, buf[:]) + } +} + +func TestRoundtrip(t *testing.T) { + tf.UnitTest(t) + testByteChunk := func(b byte) func(*testing.T) { + return func(t *testing.T) { + var buf [128]byte + input := bytes.Repeat([]byte{0x01}, 127) + + fr32.Pad(input, buf[:]) + + var out [127]byte + fr32.Unpad(buf[:], out[:]) + + require.Equal(t, input, out[:]) + } + } + + t.Run("ones", testByteChunk(0xff)) + t.Run("lsb1", testByteChunk(0x01)) + t.Run("msb1", testByteChunk(0x80)) + t.Run("zero", testByteChunk(0x0)) + t.Run("mid", testByteChunk(0x3c)) +} + +func TestRoundtripChunkRand(t *testing.T) { + tf.UnitTest(t) + for i := 0; i < 200; i++ { + var input [127]byte + rand.Read(input[:]) + + var buf [128]byte + copy(buf[:], input[:]) + + fr32.Pad(buf[:], buf[:]) + + var out [127]byte + fr32.Unpad(buf[:], out[:]) + + require.Equal(t, input[:], out[:]) + } +} + +func TestRoundtrip16MRand(t *testing.T) { + tf.UnitTest(t) + up := abi.PaddedPieceSize(16 << 20).Unpadded() + + input := make([]byte, up) + rand.Read(input[:]) + + buf := make([]byte, 16<<20) + + fr32.Pad(input, buf) + + out := make([]byte, up) + fr32.Unpad(buf, out) + + require.Equal(t, input, out) + + ffi := padFFI(input) + require.Equal(t, ffi, buf) +} + +func BenchmarkPadChunk(b *testing.B) { + tf.BenchUnitTest(b) + var buf [128]byte + in := bytes.Repeat([]byte{0xff}, 127) + + b.SetBytes(127) + + for i := 0; i < b.N; i++ { + fr32.Pad(in, buf[:]) + } +} + +func BenchmarkChunkRoundtrip(b *testing.B) { + tf.BenchUnitTest(b) + var buf [128]byte + copy(buf[:], bytes.Repeat([]byte{0xff}, 127)) + var out [127]byte + + b.SetBytes(127) + + for i := 0; i < b.N; i++ { + fr32.Pad(buf[:], buf[:]) + fr32.Unpad(buf[:], out[:]) + } +} + +func BenchmarkUnpadChunk(b *testing.B) { + tf.BenchUnitTest(b) + var buf [128]byte + copy(buf[:], bytes.Repeat([]byte{0xff}, 127)) + + fr32.Pad(buf[:], buf[:]) + var out [127]byte + + b.SetBytes(127) + b.ReportAllocs() + + bs := buf[:] + + for i := 0; i < b.N; i++ { + fr32.Unpad(bs, out[:]) + } +} + +func BenchmarkUnpad16MChunk(b *testing.B) { + tf.BenchUnitTest(b) + up := abi.PaddedPieceSize(16 << 20).Unpadded() + + var buf [16 << 20]byte + + fr32.Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) + var out [16 << 20]byte + + b.SetBytes(16 << 20) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + fr32.Unpad(buf[:], out[:]) + } +} + +func BenchmarkPad16MChunk(b *testing.B) { + tf.BenchUnitTest(b) + up := abi.PaddedPieceSize(16 << 20).Unpadded() + + var buf [16 << 20]byte + + in := bytes.Repeat([]byte{0xff}, int(up)) + + b.SetBytes(16 << 20) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + fr32.Pad(in, buf[:]) + } +} + +func BenchmarkPad1GChunk(b *testing.B) { + tf.BenchUnitTest(b) + up := abi.PaddedPieceSize(1 << 30).Unpadded() + + var buf [1 << 30]byte + + in := bytes.Repeat([]byte{0xff}, int(up)) + + b.SetBytes(1 << 30) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + fr32.Pad(in, buf[:]) + } +} + +func BenchmarkUnpad1GChunk(b *testing.B) { + tf.BenchUnitTest(b) + up := abi.PaddedPieceSize(1 << 30).Unpadded() + + var buf [1 << 30]byte + + fr32.Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) + var out [1 << 30]byte + + b.SetBytes(1 << 30) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + fr32.Unpad(buf[:], out[:]) + } +} diff --git a/pkg/util/fr32/readers.go b/pkg/util/fr32/readers.go new file mode 100644 index 0000000000..d46b6494a4 --- /dev/null +++ b/pkg/util/fr32/readers.go @@ -0,0 +1,132 @@ +package fr32 + +import ( + "fmt" + "io" + "math/bits" + + "github.com/filecoin-project/go-state-types/abi" +) + +type unpadReader struct { + src io.Reader + + left uint64 + work []byte +} + +func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { + if err := sz.Validate(); err != nil { + return nil, fmt.Errorf("bad piece size: %w", err) + } + + buf := make([]byte, MTTresh*mtChunkCount(sz)) + + return &unpadReader{ + src: src, + + left: uint64(sz), + work: buf, + }, nil +} + +func (r *unpadReader) Read(out []byte) (int, error) { + if r.left == 0 { + return 0, io.EOF + } + + chunks := len(out) / 127 + + outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(chunks*128))) + + if err := abi.PaddedPieceSize(outTwoPow).Validate(); err != nil { + return 0, fmt.Errorf("output must be of valid padded piece size: %w", err) + } + + todo := abi.PaddedPieceSize(outTwoPow) + if r.left < uint64(todo) { + todo = abi.PaddedPieceSize(1 << (63 - bits.LeadingZeros64(r.left))) + } + + r.left -= uint64(todo) + + n, err := r.src.Read(r.work[:todo]) + if err != nil && err != io.EOF { + return n, err + } + + if n != int(todo) { + return 0, fmt.Errorf("didn't read enough: %w", err) + } + + Unpad(r.work[:todo], out[:todo.Unpadded()]) + + return int(todo.Unpadded()), err +} + +type padWriter struct { + dst io.Writer + + stash []byte + work []byte +} + +func NewPadWriter(dst io.Writer) io.WriteCloser { + return &padWriter{ + dst: dst, + } +} + +func (w *padWriter) Write(p []byte) (int, error) { + in := p + + if len(p)+len(w.stash) < 127 { + w.stash = append(w.stash, p...) + return len(p), nil + } + + if len(w.stash) != 0 { + in = append(w.stash, in...) + } + + for { + pieces := subPieces(abi.UnpaddedPieceSize(len(in))) + biggest := pieces[len(pieces)-1] + + if abi.PaddedPieceSize(cap(w.work)) < biggest.Padded() { + w.work = make([]byte, 0, biggest.Padded()) + } + + Pad(in[:int(biggest)], w.work[:int(biggest.Padded())]) + + n, err := w.dst.Write(w.work[:int(biggest.Padded())]) + if err != nil { + return int(abi.PaddedPieceSize(n).Unpadded()), err + } + + in = in[biggest:] + + if len(in) < 127 { + if cap(w.stash) < len(in) { + w.stash = make([]byte, 0, len(in)) + } + w.stash = w.stash[:len(in)] + copy(w.stash, in) + + return len(p), nil + } + } +} + +func (w *padWriter) Close() error { + if len(w.stash) > 0 { + return fmt.Errorf("still have %d unprocessed bytes", len(w.stash)) + } + + // allow gc + w.stash = nil + w.work = nil + w.dst = nil + + return nil +} diff --git a/pkg/util/fr32/readers_test.go b/pkg/util/fr32/readers_test.go new file mode 100644 index 0000000000..8be2d38886 --- /dev/null +++ b/pkg/util/fr32/readers_test.go @@ -0,0 +1,38 @@ +package fr32_test + +import ( + "bufio" + "bytes" + "io" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/util/fr32" +) + +func TestUnpadReader(t *testing.T) { + tf.UnitTest(t) + ps := abi.PaddedPieceSize(64 << 20).Unpadded() + + raw := bytes.Repeat([]byte{0x77}, int(ps)) + + padOut := make([]byte, ps.Padded()) + fr32.Pad(raw, padOut) + + r, err := fr32.NewUnpadReader(bytes.NewReader(padOut), ps.Padded()) + if err != nil { + t.Fatal(err) + } + + // using bufio reader to make sure reads are big enough for the padreader - it can't handle small reads right now + readered, err := io.ReadAll(bufio.NewReaderSize(r, 512)) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, raw, readered) +} diff --git a/pkg/util/fr32/utils.go b/pkg/util/fr32/utils.go new file mode 100644 index 0000000000..26c348f4f9 --- /dev/null +++ b/pkg/util/fr32/utils.go @@ -0,0 +1,31 @@ +package fr32 + +import ( + "math/bits" + + "github.com/filecoin-project/go-state-types/abi" +) + +func subPieces(in abi.UnpaddedPieceSize) []abi.UnpaddedPieceSize { + // Convert to in-sector bytes for easier math: + // + // (we convert to sector bytes as they are nice round binary numbers) + + w := uint64(in.Padded()) + + out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(w)) + for i := range out { + // Extract the next lowest non-zero bit + next := bits.TrailingZeros64(w) + psize := uint64(1) << next + // e.g: if the number is 0b010100, psize will be 0b000100 + + // set that bit to 0 by XORing it, so the next iteration looks at the + // next bit + w ^= psize + + // Add the piece size to the list of pieces we need to create + out[i] = abi.PaddedPieceSize(psize).Unpadded() + } + return out +} diff --git a/pkg/util/fsm/events.go b/pkg/util/fsm/events.go new file mode 100644 index 0000000000..6932c65740 --- /dev/null +++ b/pkg/util/fsm/events.go @@ -0,0 +1,24 @@ +package fsm + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" +) + +// Chain a interface used to get chain head and net version +type Chain interface { + ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) + StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) +} + +// `curH`-`ts.Height` = `confidence` +type ( + HeightHandler func(ctx context.Context, tok TipSetToken, curH abi.ChainEpoch) error + RevertHandler func(ctx context.Context, tok TipSetToken) error +) + +type Events interface { + ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error +} diff --git a/pkg/util/fsm/types.go b/pkg/util/fsm/types.go new file mode 100644 index 0000000000..fbd681e67d --- /dev/null +++ b/pkg/util/fsm/types.go @@ -0,0 +1,67 @@ +package fsm + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/ipfs/go-cid" +) + +// Piece is a tuple of piece and deal info +type PieceWithDealInfo struct { + Piece abi.PieceInfo + DealInfo DealInfo +} + +// Piece is a tuple of piece info and optional deal +type Piece struct { + Piece abi.PieceInfo + DealInfo *DealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) +} + +// DealInfo is a tuple of deal identity and its schedule +type DealInfo struct { + PublishCid *cid.Cid + DealID abi.DealID + DealSchedule DealSchedule + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a storage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} + +type Log struct { + Timestamp uint64 + Trace string // for errors + + Message string + + // additional data (Event info) + Kind string +} + +type ReturnState string + +type TipSetToken []byte + +type MsgLookup struct { + Receipt MessageReceipt + TipSetTok TipSetToken + Height abi.ChainEpoch +} + +type MessageReceipt struct { + ExitCode exitcode.ExitCode + Return []byte + GasUsed int64 +} + +func (mr *MessageReceipt) Equals(o *MessageReceipt) bool { + return mr.ExitCode == o.ExitCode && bytes.Equal(mr.Return, o.Return) && mr.GasUsed == o.GasUsed +} diff --git a/pkg/util/fsutil/dealloc_linux.go b/pkg/util/fsutil/dealloc_linux.go new file mode 100644 index 0000000000..0b20c568db --- /dev/null +++ b/pkg/util/fsutil/dealloc_linux.go @@ -0,0 +1,28 @@ +package fsutil + +import ( + "os" + "syscall" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("fsutil") + +const FallocFlPunchHole = 0x02 // linux/falloc.h + +func Deallocate(file *os.File, offset int64, length int64) error { + if length == 0 { + return nil + } + + err := syscall.Fallocate(int(file.Fd()), FallocFlPunchHole, offset, length) + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { + log.Warnf("could not deallocate space, ignoring: %v", errno) + err = nil // log and ignore + } + } + + return err +} diff --git a/pkg/util/fsutil/dealloc_other.go b/pkg/util/fsutil/dealloc_other.go new file mode 100644 index 0000000000..ff373d97d2 --- /dev/null +++ b/pkg/util/fsutil/dealloc_other.go @@ -0,0 +1,18 @@ +//go:build !linux +// +build !linux + +package fsutil + +import ( + "os" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("fsutil") + +func Deallocate(file *os.File, offset int64, length int64) error { + log.Warnf("deallocating space not supported") + + return nil +} diff --git a/pkg/util/fsutil/filesize_unix.go b/pkg/util/fsutil/filesize_unix.go new file mode 100644 index 0000000000..33227391da --- /dev/null +++ b/pkg/util/fsutil/filesize_unix.go @@ -0,0 +1,28 @@ +package fsutil + +import ( + "fmt" + "os" + "syscall" +) + +type SizeInfo struct { + OnDisk int64 +} + +// FileSize returns bytes used by a file on disk +func FileSize(path string) (SizeInfo, error) { + var stat syscall.Stat_t + if err := syscall.Stat(path, &stat); err != nil { + if err == syscall.ENOENT { + return SizeInfo{}, os.ErrNotExist + } + return SizeInfo{}, fmt.Errorf("stat: %w", err) + } + + // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize + // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html + return SizeInfo{ + int64(stat.Blocks) * 512, // nolint NOTE: int64 cast is needed on osx + }, nil +} diff --git a/pkg/util/fsutil/statfs.go b/pkg/util/fsutil/statfs.go new file mode 100644 index 0000000000..2a00ccb9ab --- /dev/null +++ b/pkg/util/fsutil/statfs.go @@ -0,0 +1,7 @@ +package fsutil + +type FsStat struct { + Capacity int64 + Available int64 // Available to use for sector storage + Reserved int64 +} diff --git a/pkg/util/fsutil/statfs_unix.go b/pkg/util/fsutil/statfs_unix.go new file mode 100644 index 0000000000..bd93016397 --- /dev/null +++ b/pkg/util/fsutil/statfs_unix.go @@ -0,0 +1,20 @@ +package fsutil + +import ( + "fmt" + "syscall" +) + +func Statfs(path string) (FsStat, error) { + var stat syscall.Statfs_t + if err := syscall.Statfs(path, &stat); err != nil { + return FsStat{}, fmt.Errorf("statfs: %w", err) + } + + // force int64 to handle platform specific differences + //nolint:unconvert + return FsStat{ + Capacity: int64(stat.Blocks) * int64(stat.Bsize), + Available: int64(stat.Bavail) * int64(stat.Bsize), + }, nil +} diff --git a/pkg/util/fsutil/statfs_windows.go b/pkg/util/fsutil/statfs_windows.go new file mode 100644 index 0000000000..d785651826 --- /dev/null +++ b/pkg/util/fsutil/statfs_windows.go @@ -0,0 +1,28 @@ +package fsutil + +import ( + "syscall" + "unsafe" +) + +func Statfs(volumePath string) (FsStat, error) { + // From https://github.com/ricochet2200/go-disk-usage/blob/master/du/diskusage_windows.go + + h := syscall.MustLoadDLL("kernel32.dll") + c := h.MustFindProc("GetDiskFreeSpaceExW") + + var freeBytes int64 + var totalBytes int64 + var availBytes int64 + + c.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(volumePath))), + uintptr(unsafe.Pointer(&freeBytes)), + uintptr(unsafe.Pointer(&totalBytes)), + uintptr(unsafe.Pointer(&availBytes))) + + return FsStat{ + Capacity: totalBytes, + Available: availBytes, + }, nil +} diff --git a/internal/pkg/util/hasher/hasher.go b/pkg/util/hasher/hasher.go similarity index 100% rename from internal/pkg/util/hasher/hasher.go rename to pkg/util/hasher/hasher.go diff --git a/internal/pkg/cborutil/ipld.go b/pkg/util/ipld.go similarity index 95% rename from internal/pkg/cborutil/ipld.go rename to pkg/util/ipld.go index 61506ee1af..450136e1ef 100644 --- a/internal/pkg/cborutil/ipld.go +++ b/pkg/util/ipld.go @@ -1,4 +1,4 @@ -package cborutil +package util import ( "context" diff --git a/pkg/util/makecid.go b/pkg/util/makecid.go new file mode 100644 index 0000000000..a5d984fe7f --- /dev/null +++ b/pkg/util/makecid.go @@ -0,0 +1,15 @@ +package util + +import ( + "github.com/filecoin-project/venus/pkg/constants" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" +) + +func MakeCid(i interface{}) (cid.Cid, error) { + node, err := cbor.WrapObject(i, constants.DefaultHashFunction, -1) + if err != nil { + return cid.Undef, err + } + return constants.DefaultCidBuilder.Sum(node.RawData()) +} diff --git a/internal/pkg/util/moresync/latch.go b/pkg/util/moresync/latch.go similarity index 100% rename from internal/pkg/util/moresync/latch.go rename to pkg/util/moresync/latch.go diff --git a/internal/pkg/util/moresync/latch_test.go b/pkg/util/moresync/latch_test.go similarity index 86% rename from internal/pkg/util/moresync/latch_test.go rename to pkg/util/moresync/latch_test.go index 824f597150..bd60e8af2f 100644 --- a/internal/pkg/util/moresync/latch_test.go +++ b/pkg/util/moresync/latch_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/internal/pkg/util/moresync" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/util/moresync" ) func TestLatch(t *testing.T) { diff --git a/pkg/util/paralle/paralle.go b/pkg/util/paralle/paralle.go new file mode 100644 index 0000000000..82524f5d9e --- /dev/null +++ b/pkg/util/paralle/paralle.go @@ -0,0 +1,31 @@ +package paralle + +import "sync" + +type DoWorkPieceFunc func(piece int) + +// Parallelize is a very simple framework that allow for parallelizing +// N independent pieces of work. +func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { + toProcess := make(chan int, pieces) + for i := 0; i < pieces; i++ { + toProcess <- i + } + close(toProcess) + + if pieces < workers { + workers = pieces + } + + wg := sync.WaitGroup{} + wg.Add(workers) + for i := 0; i < workers; i++ { + go func() { + defer wg.Done() + for piece := range toProcess { + doWorkPiece(piece) + } + }() + } + wg.Wait() +} diff --git a/pkg/util/storiface/ffi.go b/pkg/util/storiface/ffi.go new file mode 100644 index 0000000000..c7813053ef --- /dev/null +++ b/pkg/util/storiface/ffi.go @@ -0,0 +1,18 @@ +package storiface + +import ( + "errors" + + "github.com/filecoin-project/go-state-types/abi" +) + +// ErrSectorNotFound sector not found error +var ErrSectorNotFound = errors.New("sector not found") + +type UnpaddedByteIndex uint64 + +func (i UnpaddedByteIndex) Padded() PaddedByteIndex { + return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded()) +} + +type PaddedByteIndex uint64 diff --git a/pkg/util/storiface/filetype.go b/pkg/util/storiface/filetype.go new file mode 100644 index 0000000000..140595eba4 --- /dev/null +++ b/pkg/util/storiface/filetype.go @@ -0,0 +1,176 @@ +package storiface + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" +) + +const ( + FTUnsealed SectorFileType = 1 << iota + FTSealed + FTCache + FTUpdate + FTUpdateCache + + FileTypes = iota +) + +var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache} + +const ( + FTNone SectorFileType = 0 +) + +const FSOverheadDen = 10 + +var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen, + FTUpdateCache: FSOverheadDen * 2, + FTCache: 141, // 11 layers + D(2x ssize) + C + R' +} + +// sector size * disk / fs overhead. FSOverheadDen is like the unit of sector size + +var FsOverheadFinalized = map[SectorFileType]int{ + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen * 2, // XXX: we should clear the update cache on Finalize??? + FTUpdateCache: FSOverheadDen, + FTCache: 2, +} + +type SectorFileType int + +func (t SectorFileType) String() string { + switch t { + case FTUnsealed: + return "unsealed" + case FTSealed: + return "sealed" + case FTCache: + return "cache" + case FTUpdate: + return "update" + case FTUpdateCache: + return "update-cache" + default: + return fmt.Sprintf("", t) + } +} + +func (t SectorFileType) Has(singleType SectorFileType) bool { + return t&singleType == singleType +} + +func (t SectorFileType) SealSpaceUse(ssize abi.SectorSize) (uint64, error) { + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FSOverheadSeal[pathType] + if !ok { + return 0, fmt.Errorf("no seal overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / FSOverheadDen + } + + return need, nil +} + +func (t SectorFileType) StoreSpaceUse(ssize abi.SectorSize) (uint64, error) { + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FsOverheadFinalized[pathType] + if !ok { + return 0, fmt.Errorf("no finalized overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / FSOverheadDen + } + + return need, nil +} + +func (t SectorFileType) All() [FileTypes]bool { + var out [FileTypes]bool + + for i := range out { + out[i] = t&(1< 0 + } + + return out +} + +type SectorPaths struct { + ID abi.SectorID + + Unsealed string + Sealed string + Cache string + Update string + UpdateCache string +} + +func ParseSectorID(baseName string) (abi.SectorID, error) { + var n abi.SectorNumber + var mid abi.ActorID + read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n) + if err != nil { + return abi.SectorID{}, fmt.Errorf("sscanf sector name ('%s'): %w", baseName, err) + } + + if read != 2 { + return abi.SectorID{}, fmt.Errorf("parseSectorID expected to scan 2 values, got %d", read) + } + + return abi.SectorID{ + Miner: mid, + Number: n, + }, nil +} + +func SectorName(sid abi.SectorID) string { + return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number) +} + +func PathByType(sps SectorPaths, fileType SectorFileType) string { + switch fileType { + case FTUnsealed: + return sps.Unsealed + case FTSealed: + return sps.Sealed + case FTCache: + return sps.Cache + case FTUpdate: + return sps.Update + case FTUpdateCache: + return sps.UpdateCache + } + + panic("requested unknown path type") +} + +func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { + switch fileType { + case FTUnsealed: + sps.Unsealed = p + case FTSealed: + sps.Sealed = p + case FTCache: + sps.Cache = p + case FTUpdate: + sps.Update = p + case FTUpdateCache: + sps.UpdateCache = p + } +} diff --git a/pkg/util/storiface/storage.go b/pkg/util/storiface/storage.go new file mode 100644 index 0000000000..e1324bb977 --- /dev/null +++ b/pkg/util/storiface/storage.go @@ -0,0 +1,10 @@ +package storiface + +type PathType string + +const ( + PathStorage PathType = "storage" + PathSealing PathType = "sealing" +) + +type AcquireMode string diff --git a/pkg/util/test/equal.go b/pkg/util/test/equal.go new file mode 100644 index 0000000000..57568ced3d --- /dev/null +++ b/pkg/util/test/equal.go @@ -0,0 +1,35 @@ +package test + +import ( + "bytes" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) bool { + expectRaw, err := json.Marshal(expected) + if err != nil { + return false + } + actualRaw, err := json.Marshal(actual) + if err != nil { + return false + } + + if !bytes.Equal(expectRaw, actualRaw) { + return assert.Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s", string(expectRaw), string(actualRaw)), msgAndArgs...) + } + return true +} diff --git a/pkg/util/ulimit/ulimit.go b/pkg/util/ulimit/ulimit.go new file mode 100644 index 0000000000..6206eb2ced --- /dev/null +++ b/pkg/util/ulimit/ulimit.go @@ -0,0 +1,123 @@ +package ulimit + +// from go-ipfs + +import ( + "fmt" + "os" + "strconv" + "syscall" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("ulimit") + +var ( + supportsFDManagement = false + + // getlimit returns the soft and hard limits of file descriptors counts + getLimit func() (uint64, uint64, error) + // set limit sets the soft and hard limits of file descriptors counts + setLimit func(uint64, uint64) error +) + +// minimum file descriptor limit before we complain +const minFds = 2048 + +// default max file descriptor limit. +const maxFds = 16 << 10 + +// userMaxFDs returns the value of LOTUS_FD_MAX +func userMaxFDs() uint64 { + // check if the LOTUS_FD_MAX is set up and if it does + // not have a valid fds number notify the user + val := os.Getenv("LOTUS_FD_MAX") + if val == "" { + val = os.Getenv("IPFS_FD_MAX") + } + if val == "" { + val = os.Getenv("VENUS_FD_MAX") + } + if val != "" { + fds, err := strconv.ParseUint(val, 10, 64) + if err != nil { + log.Errorf("bad value for LOTUS_FD_MAX: %s", err) + return 0 + } + return fds + } + return 0 +} + +// ManageFdLimit raise the current max file descriptor count +// of the process based on the LOTUS_FD_MAX value +func ManageFdLimit() (changed bool, newLimit uint64, err error) { + if !supportsFDManagement { + return false, 0, nil + } + + targetLimit := uint64(maxFds) + userLimit := userMaxFDs() + if userLimit > 0 { + targetLimit = userLimit + } + + soft, hard, err := getLimit() + if err != nil { + return false, 0, err + } + + if targetLimit <= soft { + return false, 0, nil + } + + // the soft limit is the value that the kernel enforces for the + // corresponding resource + // the hard limit acts as a ceiling for the soft limit + // an unprivileged process may only set it's soft limit to a + // alue in the range from 0 up to the hard limit + err = setLimit(targetLimit, targetLimit) + switch err { + case nil: + newLimit = targetLimit + case syscall.EPERM: + // lower limit if necessary. + if targetLimit > hard { + targetLimit = hard + } + + // the process does not have permission so we should only + // set the soft value + err = setLimit(targetLimit, hard) + if err != nil { + err = fmt.Errorf("error setting ulimit wihout hard limit: %s", err) + break + } + newLimit = targetLimit + + // Warn on lowered limit. + + if newLimit < userLimit { + err = fmt.Errorf( + "failed to raise ulimit to LOTUS_FD_MAX (%d): set to %d", + userLimit, + newLimit, + ) + break + } + + if userLimit == 0 && newLimit < minFds { + err = fmt.Errorf( + "failed to raise ulimit to minimum %d: set to %d", + minFds, + newLimit, + ) + break + } + default: + err = fmt.Errorf("error setting: ulimit: %s", err) + } + + return newLimit > 0, newLimit, err +} diff --git a/pkg/util/ulimit/ulimit_freebsd.go b/pkg/util/ulimit/ulimit_freebsd.go new file mode 100644 index 0000000000..a5ff35707b --- /dev/null +++ b/pkg/util/ulimit/ulimit_freebsd.go @@ -0,0 +1,37 @@ +//go:build freebsd +// +build freebsd + +package ulimit + +import ( + "errors" + "math" + + unix "golang.org/x/sys/unix" +) + +func init() { + supportsFDManagement = true + getLimit = freebsdGetLimit + setLimit = freebsdSetLimit +} + +func freebsdGetLimit() (uint64, uint64, error) { + rlimit := unix.Rlimit{} + err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit) + if (rlimit.Cur < 0) || (rlimit.Max < 0) { + return 0, 0, errors.New("invalid rlimits") + } + return uint64(rlimit.Cur), uint64(rlimit.Max), err +} + +func freebsdSetLimit(soft uint64, max uint64) error { + if (soft > math.MaxInt64) || (max > math.MaxInt64) { + return errors.New("invalid rlimits") + } + rlimit := unix.Rlimit{ + Cur: int64(soft), + Max: int64(max), + } + return unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit) +} diff --git a/pkg/util/ulimit/ulimit_test.go b/pkg/util/ulimit/ulimit_test.go new file mode 100644 index 0000000000..e22a9bd49e --- /dev/null +++ b/pkg/util/ulimit/ulimit_test.go @@ -0,0 +1,90 @@ +//go:build !windows +// +build !windows + +package ulimit + +import ( + "fmt" + "os" + "strings" + "syscall" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestManageFdLimit(t *testing.T) { + tf.UnitTest(t) + t.Log("Testing file descriptor count") + if _, _, err := ManageFdLimit(); err != nil { + t.Errorf("Cannot manage file descriptors") + } + + if maxFds != uint64(16<<10) { + t.Errorf("Maximum file descriptors default value changed") + } +} + +func TestManageInvalidNFds(t *testing.T) { + tf.UnitTest(t) + t.Logf("Testing file descriptor invalidity") + var err error + if err = os.Unsetenv("IPFS_FD_MAX"); err != nil { + t.Fatal("Cannot unset the IPFS_FD_MAX env variable") + } + + rlimit := syscall.Rlimit{} + if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil { + t.Fatal("Cannot get the file descriptor count") + } + + value := rlimit.Max + rlimit.Cur + if err = os.Setenv("IPFS_FD_MAX", fmt.Sprintf("%d", value)); err != nil { + t.Fatal("Cannot set the IPFS_FD_MAX env variable") + } + + t.Logf("setting ulimit to %d, max %d, cur %d", value, rlimit.Max, rlimit.Cur) + + if changed, new, err := ManageFdLimit(); err == nil { + t.Errorf("ManageFdLimit should return an error: changed %t, new: %d", changed, new) + } else if err != nil { + flag := strings.Contains(err.Error(), + "failed to raise ulimit to LOTUS_FD_MAX") + if !flag { + t.Error("ManageFdLimit returned unexpected error", err) + } + } + + // unset all previous operations + if err = os.Unsetenv("IPFS_FD_MAX"); err != nil { + t.Fatal("Cannot unset the IPFS_FD_MAX env variable") + } +} + +func TestManageFdLimitWithEnvSet(t *testing.T) { + tf.UnitTest(t) + t.Logf("Testing file descriptor manager with IPFS_FD_MAX set") + var err error + if err = os.Unsetenv("IPFS_FD_MAX"); err != nil { + t.Fatal("Cannot unset the IPFS_FD_MAX env variable") + } + + rlimit := syscall.Rlimit{} + if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil { + t.Fatal("Cannot get the file descriptor count") + } + + value := rlimit.Max - rlimit.Cur + 1 + if err = os.Setenv("IPFS_FD_MAX", fmt.Sprintf("%d", value)); err != nil { + t.Fatal("Cannot set the IPFS_FD_MAX env variable") + } + + if _, _, err = ManageFdLimit(); err != nil { + t.Errorf("Cannot manage file descriptor count") + } + + // unset all previous operations + if err = os.Unsetenv("IPFS_FD_MAX"); err != nil { + t.Fatal("Cannot unset the IPFS_FD_MAX env variable") + } +} diff --git a/pkg/util/ulimit/ulimit_unix.go b/pkg/util/ulimit/ulimit_unix.go new file mode 100644 index 0000000000..ac9f4ca908 --- /dev/null +++ b/pkg/util/ulimit/ulimit_unix.go @@ -0,0 +1,28 @@ +//go:build darwin || linux || netbsd || openbsd +// +build darwin linux netbsd openbsd + +package ulimit + +import ( + unix "golang.org/x/sys/unix" +) + +func init() { + supportsFDManagement = true + getLimit = unixGetLimit + setLimit = unixSetLimit +} + +func unixGetLimit() (uint64, uint64, error) { + rlimit := unix.Rlimit{} + err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit) + return rlimit.Cur, rlimit.Max, err +} + +func unixSetLimit(soft uint64, max uint64) error { + rlimit := unix.Rlimit{ + Cur: soft, + Max: max, + } + return unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit) +} diff --git a/pkg/vm/dispatch/cbor_gen.go b/pkg/vm/dispatch/cbor_gen.go new file mode 100644 index 0000000000..74d364721f --- /dev/null +++ b/pkg/vm/dispatch/cbor_gen.go @@ -0,0 +1,83 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package dispatch + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufSimpleParams = []byte{129} + +func (t *SimpleParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufSimpleParams); err != nil { + return err + } + + // t.Name (string) (string) + if len(t.Name) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Name was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Name)); err != nil { + return err + } + return nil +} + +func (t *SimpleParams) UnmarshalCBOR(r io.Reader) (err error) { + *t = SimpleParams{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Name (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Name = string(sval) + } + return nil +} diff --git a/pkg/vm/dispatch/dispatch.go b/pkg/vm/dispatch/dispatch.go new file mode 100644 index 0000000000..4c5f7cdfb1 --- /dev/null +++ b/pkg/vm/dispatch/dispatch.go @@ -0,0 +1,167 @@ +package dispatch + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" +) + +type SimpleParams struct { + Name string +} + +// Actor is the interface all actors have to implement. +type Actor interface { + // Exports has a list of method available on the actor. + Exports() []interface{} + // Code returns the code ID for this actor. + Code() cid.Cid + + // State returns a new State object for this actor. This can be used to + // decode the actor's state. + State() cbor.Er +} + +// Dispatcher allows for dynamic method dispatching on an actor. +type Dispatcher interface { + // Dispatch will call the given method on the actor and pass the arguments. + // + // - The `ctx` argument will be coerced to the type the method expects in its first argument. + // - If arg1 is `[]byte`, it will attempt to decode the value based on second argument in the target method. + Dispatch(method abi.MethodNum, nvk network.Version, ctx interface{}, arg1 interface{}) ([]byte, *ExcuteError) + // Signature is a helper function that returns the signature for a given method. + // + // Note: This is intended to be used by tests and tools. + Signature(method abi.MethodNum) (MethodSignature, *ExcuteError) +} + +type actorDispatcher struct { + code cid.Cid + actor builtin.RegistryEntry +} + +type method interface { + Call(in []reflect.Value) []reflect.Value + Type() reflect.Type +} + +var _ Dispatcher = (*actorDispatcher)(nil) + +// Dispatch implements `Dispatcher`. +func (d *actorDispatcher) Dispatch(methodNum abi.MethodNum, nvk network.Version, ctx interface{}, arg1 interface{}) ([]byte, *ExcuteError) { + // get method signature + m, err := d.signature(methodNum) + if err != nil { + return []byte{}, err + } + + // build args to pass to the method + args := []reflect.Value{ + // the ctx will be automatically coerced + reflect.ValueOf(ctx), + } + // err code + ec := exitcode.ErrSerialization + if nvk < network.Version7 { + ec = 1 + } + + parserByte := func(raw []byte) *ExcuteError { + obj, err := m.ArgInterface(raw) + if err != nil { + return NewExcuteError(ec, "fail to decode params") + } + args = append(args, reflect.ValueOf(obj)) + return nil + } + + switch t := arg1.(type) { + case nil: + args = append(args, m.ArgNil()) + case []byte: + err := parserByte(t) + if err != nil { + return []byte{}, err + } + case cbor.Marshaler: + buf := new(bytes.Buffer) + if err := t.MarshalCBOR(buf); err != nil { + return []byte{}, NewExcuteError(ec, fmt.Sprintf("fail to marshal argument %v", err)) + } + err := parserByte(buf.Bytes()) + if err != nil { + return []byte{}, err + } + default: + args = append(args, reflect.ValueOf(arg1)) + } + + // invoke the method + out := m.method.Call(args) + + // method returns unit + // Note: we need to check for `IsNill()` here because Go doesnt work if you do `== nil` on the interface + if len(out) == 0 || (out[0].Kind() != reflect.Struct && out[0].IsNil()) { + return nil, nil + } + + switch ret := out[0].Interface().(type) { + case []byte: + return ret, nil + case *abi.EmptyValue: // todo remove this code abi.EmptyValue is cbor.Marshaler + return []byte{}, nil + case cbor.Marshaler: + buf := new(bytes.Buffer) + if err := ret.MarshalCBOR(buf); err != nil { + return []byte{}, NewExcuteError(exitcode.SysErrSenderStateInvalid, "failed to marshal response to cbor err:%v", err) + } + return buf.Bytes(), nil + case nil: + return []byte{}, nil + default: + return []byte{}, NewExcuteError(exitcode.SysErrInvalidMethod, "could not determine type for response from call") + } +} + +func (d *actorDispatcher) signature(methodID abi.MethodNum) (*methodSignature, *ExcuteError) { + exports := d.actor.Exports() + + // get method + method := exports[(uint64)(methodID)].Method + if method == nil { + return nil, NewExcuteError(exitcode.SysErrInvalidMethod, "Method undefined. method: %d, code: %s", methodID, d.code) + } + + return &methodSignature{method: reflect.ValueOf(method)}, nil +} + +// Signature implements `Dispatcher`. +func (d *actorDispatcher) Signature(methodNum abi.MethodNum) (MethodSignature, *ExcuteError) { + return d.signature(methodNum) +} + +// ExcuteError error in vm excute +type ExcuteError struct { + code exitcode.ExitCode + msg string +} + +func NewExcuteError(code exitcode.ExitCode, msg string, args ...interface{}) *ExcuteError { + return &ExcuteError{code: code, msg: fmt.Sprint(msg, args)} +} + +func (err *ExcuteError) ExitCode() exitcode.ExitCode { + return err.code +} + +func (err *ExcuteError) Error() string { + return err.msg +} diff --git a/pkg/vm/dispatch/loader.go b/pkg/vm/dispatch/loader.go new file mode 100644 index 0000000000..4710fa041a --- /dev/null +++ b/pkg/vm/dispatch/loader.go @@ -0,0 +1,129 @@ +package dispatch + +import ( + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/ipfs/go-cid" + + vmr "github.com/filecoin-project/venus/pkg/vm/runtime" + "github.com/filecoin-project/venus/venus-shared/actors" +) + +// CodeLoader allows you to load an actor's code based on its id an epoch. +type CodeLoader struct { + actors map[cid.Cid]ActorInfo +} + +// ActorInfo vm contract actor +type ActorInfo struct { + vmActor builtin.RegistryEntry + // TODO: consider making this a network version range? + predicate ActorPredicate +} + +// GetActorImpl returns executable code for an actor by code cid at a specific network version +func (cl CodeLoader) GetActorImpl(code cid.Cid, rt vmr.Runtime) (Dispatcher, *ExcuteError) { + // todo version check + actor, ok := cl.actors[code] + if !ok { + return nil, NewExcuteError(exitcode.SysErrorIllegalActor, "Actor code not found. code: %s", code) + } + if err := actor.predicate(rt, code); err != nil { + return nil, NewExcuteError(exitcode.SysErrorIllegalActor, "unsupport actor. code: %s", code) + } + + return &actorDispatcher{code: code, actor: actor.vmActor}, nil +} + +// GetActorImpl returns executable code for an actor by code cid at a specific protocol version +func (cl CodeLoader) GetUnsafeActorImpl(code cid.Cid) (Dispatcher, error) { + // todo version check + actor, ok := cl.actors[code] + if !ok { + return nil, fmt.Errorf("unable to get actor for code %s", code) + } + return &actorDispatcher{code: code, actor: actor.vmActor}, nil +} + +func (cl CodeLoader) GetVMActor(code cid.Cid) (builtin.RegistryEntry, error) { + // todo version check + actor, ok := cl.actors[code] + if !ok { + return builtin.RegistryEntry{}, fmt.Errorf("unable to get actor for code %s", code) + } + + return actor.vmActor, nil +} + +// CodeLoaderBuilder helps you build a CodeLoader. +type CodeLoaderBuilder struct { + actors map[cid.Cid]ActorInfo +} + +// NewBuilder creates a builder to generate a builtin.Actor data structure +func NewBuilder() *CodeLoaderBuilder { + return &CodeLoaderBuilder{actors: map[cid.Cid]ActorInfo{}} +} + +// Add lets you add an actor dispatch table for a given version. +func (b *CodeLoaderBuilder) Add(av actorstypes.Version, predict ActorPredicate, actor builtin.RegistryEntry) *CodeLoaderBuilder { + if predict == nil { + predict = func(vmr.Runtime, cid.Cid) error { return nil } + } + + ai := ActorInfo{ + vmActor: actor, + predicate: predict, + } + + ac := actor.Code() + b.actors[ac] = ai + + // necessary to make stuff work + var realCode cid.Cid + if av >= actorstypes.Version8 { + name := actors.CanonicalName(builtin.ActorNameByCode(ac)) + + var ok bool + realCode, ok = actors.GetActorCodeID(av, name) + if ok { + b.actors[realCode] = ai + } + } + + return b +} + +// Add lets you add an actor dispatch table for a given version. +func (b *CodeLoaderBuilder) AddMany(av actorstypes.Version, predict ActorPredicate, actors []builtin.RegistryEntry) *CodeLoaderBuilder { + for _, actor := range actors { + b.Add(av, predict, actor) + } + return b +} + +// Build builds the code loader. +func (b *CodeLoaderBuilder) Build() CodeLoader { + return CodeLoader{actors: b.actors} +} + +// An ActorPredicate returns an error if the given actor is not valid for the given runtime environment (e.g., chain height, version, etc.). +type ActorPredicate func(vmr.Runtime, cid.Cid) error + +// ActorsVersionPredicate get actor predicate base on actor version and network version +func ActorsVersionPredicate(ver actorstypes.Version) ActorPredicate { + return func(rt vmr.Runtime, codeCid cid.Cid) error { + nver, err := actorstypes.VersionForNetwork(rt.NetworkVersion()) + if err != nil { + return fmt.Errorf("version for network %w", err) + } + if nver != ver { + return fmt.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d and nver %d", + codeCid, ver, nver, rt.CurrentEpoch(), rt.NetworkVersion()) + } + return nil + } +} diff --git a/pkg/vm/dispatch/signature.go b/pkg/vm/dispatch/signature.go new file mode 100644 index 0000000000..32c9c67a3d --- /dev/null +++ b/pkg/vm/dispatch/signature.go @@ -0,0 +1,45 @@ +package dispatch + +import ( + "bytes" + "fmt" + "reflect" + + cbg "github.com/whyrusleeping/cbor-gen" +) + +// MethodSignature wraps a specific method and allows you to encode/decodes input/output bytes into concrete types. +type MethodSignature interface { + // ArgNil returns a nil interface for the typed argument expected by the actor method. + ArgNil() reflect.Value + // ArgInterface returns the typed argument expected by the actor method. + ArgInterface(argBytes []byte) (interface{}, error) +} + +type methodSignature struct { + method method +} + +var _ MethodSignature = (*methodSignature)(nil) + +func (ms *methodSignature) ArgNil() reflect.Value { + t := ms.method.Type().In(1) + v := reflect.New(t) + return v.Elem() +} + +func (ms *methodSignature) ArgInterface(argBytes []byte) (interface{}, error) { + // decode arg1 (this is the payload for the actor method) + t := ms.method.Type().In(1) + v := reflect.New(t.Elem()) + obj := v.Interface() + + if val, ok := obj.(cbg.CBORUnmarshaler); ok { + buf := bytes.NewReader(argBytes) + if err := val.UnmarshalCBOR(buf); err != nil { + return nil, err + } + return val, nil + } + return nil, fmt.Errorf("type %T does not implement UnmarshalCBOR", obj) +} diff --git a/pkg/vm/dispatch/signature_test.go b/pkg/vm/dispatch/signature_test.go new file mode 100644 index 0000000000..df1185ca34 --- /dev/null +++ b/pkg/vm/dispatch/signature_test.go @@ -0,0 +1,58 @@ +package dispatch + +import ( + "bytes" + "reflect" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/assert" +) + +type fakeActor struct{} + +type SimpleReturn struct { + someValue uint64 +} + +func (*fakeActor) pointerParam(ctx interface{}, params *SimpleParams) SimpleReturn { + return SimpleReturn{someValue: 3} +} + +func TestArgInterface(t *testing.T) { + tf.UnitTest(t) + + fa := fakeActor{} + + params := SimpleParams{Name: "tester"} + setup := func(method interface{}) (methodSignature, []byte) { + s := methodSignature{method: reflect.ValueOf(method)} + + buf := new(bytes.Buffer) + err := params.MarshalCBOR(buf) + assert.NoError(t, err) + + return s, buf.Bytes() + } + + assertArgInterface := func(s methodSignature, encodedParams []byte) interface{} { + ret, err := s.ArgInterface(encodedParams) + assert.NoError(t, err) + assert.NotNil(t, ret) + return ret + } + + t.Run("pointerParam", func(t *testing.T) { + s, encodedParams := setup(fa.pointerParam) + + ret := assertArgInterface(s, encodedParams) + + v, ok := ret.(*SimpleParams) + assert.True(t, ok) + assert.Equal(t, params.Name, v.Name) + }) + + t.Run("noParams", func(t *testing.T) { + // Dragons: not supported, must panic + }) +} diff --git a/pkg/vm/gas/burn.go b/pkg/vm/gas/burn.go new file mode 100644 index 0000000000..7cf80af1ce --- /dev/null +++ b/pkg/vm/gas/burn.go @@ -0,0 +1,109 @@ +package gas + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" +) + +const ( + gasOveruseNum = 11 + gasOveruseDenom = 10 +) + +// GasOutputs detail of gas after message executed +type GasOutputs struct { //nolint + BaseFeeBurn abi.TokenAmount + OverEstimationBurn abi.TokenAmount + + MinerPenalty abi.TokenAmount + MinerTip abi.TokenAmount + Refund abi.TokenAmount + + GasRefund int64 + GasBurned int64 +} + +// ZeroGasOutputs returns a logically zeroed GasOutputs. +func ZeroGasOutputs() GasOutputs { + return GasOutputs{ + BaseFeeBurn: big.Zero(), + OverEstimationBurn: big.Zero(), + MinerPenalty: big.Zero(), + MinerTip: big.Zero(), + Refund: big.Zero(), + } +} + +// ComputeGasOverestimationBurn computes amount of gas to be refunded and amount of gas to be burned +// Result is (refund, burn) +func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) { + if gasUsed == 0 { + return 0, gasLimit + } + + // over = gasLimit/GasUsed - 1 - 0.1 + // over = min(over, 1) + // gasToBurn = (gasLimit - GasUsed) * over + + // so to factor out division from `over` + // over*GasUsed = min(gasLimit - (11*GasUsed)/10, GasUsed) + // gasToBurn = ((gasLimit - GasUsed)*over*GasUsed) / GasUsed + over := gasLimit - (gasOveruseNum*gasUsed)/gasOveruseDenom + if over < 0 { + return gasLimit - gasUsed, 0 + } + + // if we want sharper scaling it goes here: + // over *= 2 + + if over > gasUsed { + over = gasUsed + } + + // needs bigint, as it overflows in pathological case gasLimit > 2^32 GasUsed = gasLimit / 2 + gasToBurn := big.NewInt(gasLimit - gasUsed) + gasToBurn = big.Mul(gasToBurn, big.NewInt(over)) + gasToBurn = big.Div(gasToBurn, big.NewInt(gasUsed)) + + return gasLimit - gasUsed - gasToBurn.Int64(), gasToBurn.Int64() +} + +// ComputeGasOutputs compute gas outputs base on message gas parameters and gasUsed after executed +func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount, chargeNetworkFee bool) GasOutputs { + gasUsedBig := big.NewInt(gasUsed) + out := ZeroGasOutputs() + + baseFeeToPay := baseFee + if baseFee.Cmp(feeCap.Int) > 0 { + baseFeeToPay = feeCap + out.MinerPenalty = big.Mul(big.Sub(baseFee, feeCap), gasUsedBig) + } + + // If chargeNetworkFee is disabled, just skip computing the BaseFeeBurn. However, + // we charge all the other fees regardless. + if chargeNetworkFee { + out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig) + } + + minerTip := gasPremium + if big.Cmp(big.Add(baseFeeToPay, minerTip), feeCap) > 0 { + minerTip = big.Sub(feeCap, baseFeeToPay) + } + out.MinerTip = big.Mul(minerTip, big.NewInt(gasLimit)) + + out.GasRefund, out.GasBurned = ComputeGasOverestimationBurn(gasUsed, gasLimit) + + if out.GasBurned != 0 { + gasBurnedBig := big.NewInt(out.GasBurned) + out.OverEstimationBurn = big.Mul(baseFeeToPay, gasBurnedBig) + minerPenalty := big.Mul(big.Sub(baseFee, baseFeeToPay), gasBurnedBig) + out.MinerPenalty = big.Add(out.MinerPenalty, minerPenalty) + } + + requiredFunds := big.Mul(big.NewInt(gasLimit), feeCap) + refund := big.Sub(requiredFunds, out.BaseFeeBurn) + refund = big.Sub(refund, out.MinerTip) + refund = big.Sub(refund, out.OverEstimationBurn) + out.Refund = refund + return out +} diff --git a/pkg/vm/gas/gas_tracker.go b/pkg/vm/gas/gas_tracker.go new file mode 100644 index 0000000000..5dd381923a --- /dev/null +++ b/pkg/vm/gas/gas_tracker.go @@ -0,0 +1,102 @@ +package gas + +import ( + "fmt" + "os" + "time" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/venus/pkg/vm/runtime" +) + +// GasTracker maintains the stateView of gas usage throughout the execution of a message. +type GasTracker struct { //nolint + GasAvailable int64 + GasUsed int64 + + ExecutionTrace types.ExecutionTrace + NumActorsCreated uint64 //nolint + AllowInternal bool //nolint + CallerValidated bool //nolint + LastGasChargeTime time.Time //nolint + LastGasCharge *types.GasTrace +} + +// NewGasTracker initializes a new empty gas tracker +func NewGasTracker(limit int64) *GasTracker { + return &GasTracker{ + GasUsed: 0, + GasAvailable: limit, + } +} + +// Charge will add the gas charge To the current Method gas context. +// +// WARNING: this Method will panic if there is no sufficient gas left. +func (t *GasTracker) Charge(gas GasCharge, msg string, args ...interface{}) { + if ok := t.TryCharge(gas); !ok { + fmsg := fmt.Sprintf(msg, args...) + runtime.Abortf(exitcode.SysErrOutOfGas, "gas limit %d exceeded with charge of %d: %s", t.GasAvailable, gas.Total(), fmsg) + } +} + +// EnableDetailedTracing has different behaviour in the LegacyVM and FVM. +// In the LegacyVM, it enables detailed gas tracing, slowing down execution. +// In the FVM, it enables execution traces, which are primarily used to observe subcalls. +var EnableDetailedTracing = os.Getenv("VENUS_VM_ENABLE_TRACING") == "1" + +// TryCharge charges `amount` or `RemainingGas()`, whichever is smaller. +// +// Returns `True` if the there was enough gas To pay for `amount`. +func (t *GasTracker) TryCharge(gasCharge GasCharge) bool { + toUse := gasCharge.Total() + // code for https://github.com/filecoin-project/venus/issues/4610 + if EnableDetailedTracing { + var callers [10]uintptr + cout := 0 // gruntime.Callers(2+skip, callers[:]) + + now := time.Now() + if t.LastGasCharge != nil { + t.LastGasCharge.TimeTaken = now.Sub(t.LastGasChargeTime) + } + + gasTrace := types.GasTrace{ + Name: gasCharge.Name, + Extra: gasCharge.Extra, + + TotalGas: toUse, + ComputeGas: gasCharge.ComputeGas, + StorageGas: gasCharge.StorageGas, + + // TotalVirtualGas: gasCharge.VirtualCompute*GasComputeMulti + gasCharge.VirtualStorage*GasStorageMulti, + TotalVirtualGas: gasCharge.VirtualCompute + gasCharge.VirtualStorage, + VirtualComputeGas: gasCharge.VirtualCompute, + VirtualStorageGas: gasCharge.VirtualStorage, + + Callers: callers[:cout], + } + + if gasTrace.VirtualStorageGas == 0 { + gasTrace.VirtualStorageGas = gasTrace.StorageGas + } + if gasTrace.VirtualComputeGas == 0 { + gasTrace.VirtualComputeGas = gasTrace.ComputeGas + } + + t.ExecutionTrace.GasCharges = append(t.ExecutionTrace.GasCharges, &gasTrace) + t.LastGasChargeTime = now + t.LastGasCharge = &gasTrace + } + + // overflow safe + if t.GasUsed > t.GasAvailable-toUse { + t.GasUsed = t.GasAvailable + // return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gasCharge: used=%d, available=%d", t.GasUsed, t.GasAvailable) + return false + } + t.GasUsed += toUse + return true +} diff --git a/pkg/vm/gas/gascost.go b/pkg/vm/gas/gascost.go new file mode 100644 index 0000000000..f6ad0f5e31 --- /dev/null +++ b/pkg/vm/gas/gascost.go @@ -0,0 +1,239 @@ +package gas + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/crypto" +) + +// GasCharge amount of gas consumed at one time +type GasCharge struct { //nolint + Name string + Extra interface{} + + ComputeGas int64 + StorageGas int64 + + VirtualCompute int64 + VirtualStorage int64 +} + +// Total return all gas in one time +func (g GasCharge) Total() int64 { + return g.ComputeGas + g.StorageGas +} + +func (g GasCharge) WithVirtual(compute, storage int64) GasCharge { + out := g + out.VirtualCompute = compute + out.VirtualStorage = storage + return out +} + +func (g GasCharge) WithExtra(extra interface{}) GasCharge { + out := g + out.Extra = extra + return out +} + +func NewGasCharge(name string, computeGas int64, storageGas int64) GasCharge { + return GasCharge{ + Name: name, + ComputeGas: computeGas, + StorageGas: storageGas, + } +} + +// Pricelist provides prices for operations in the LegacyVM. +// +// Note: this interface should be APPEND ONLY since last chain checkpoint +type Pricelist interface { + // OnChainMessage returns the gas used for storing a message of a given size in the chain. + OnChainMessage(msgSize int) GasCharge + // OnChainReturnValue returns the gas used for storing the response of a message in the chain. + OnChainReturnValue(dataSize int) GasCharge + + // OnMethodInvocation returns the gas used when invoking a method. + OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge + + // OnIpldGet returns the gas used for storing an object + OnIpldGet() GasCharge + // OnIpldPut returns the gas used for storing an object + OnIpldPut(dataSize int) GasCharge + + // OnCreateActor returns the gas used for creating an actor + OnCreateActor() GasCharge + // OnDeleteActor returns the gas used for deleting an actor + OnDeleteActor() GasCharge + + OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) + OnHashing(dataSize int) GasCharge + OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge + OnVerifySeal(info proof7.SealVerifyInfo) GasCharge + OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge + OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge + OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge + OnVerifyConsensusFault() GasCharge +} + +// PricesSchedule schedule gas prices for different network version +type PricesSchedule struct { + prices map[abi.ChainEpoch]Pricelist +} + +// NewPricesSchedule new gasprice schedule from forkParams parameters +func NewPricesSchedule(forkParams *config.ForkUpgradeConfig) *PricesSchedule { + prices := map[abi.ChainEpoch]Pricelist{ + abi.ChainEpoch(0): &pricelistV0{ + computeGasMulti: 1, + storageGasMulti: 1000, + + onChainMessageComputeBase: 38863, + onChainMessageStorageBase: 36, + onChainMessageStoragePerByte: 1, + + onChainReturnValuePerByte: 1, + + sendBase: 29233, + sendTransferFunds: 27500, + sendTransferOnlyPremium: 159672, + sendInvokeMethod: -5377, + + ipldGetBase: 75242, + ipldPutBase: 84070, + ipldPutPerByte: 1, + + createActorCompute: 1108454, + createActorStorage: 36 + 40, + deleteActor: -(36 + 40), // -createActorStorage + + verifySignature: map[crypto.SigType]int64{ + crypto.SigTypeBLS: 16598605, + crypto.SigTypeSecp256k1: 1637292, + }, + + hashingBase: 31355, + computeUnsealedSectorCidBase: 98647, + verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifyAggregateSealBase: 0, + verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ + abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { + flat: 123861062, + scale: 9226981, + }, + abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: { + flat: 748593537, + scale: 85639, + }, + abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: { + flat: 748593537, + scale: 85639, + }, + }, + verifyPostDiscount: true, + verifyConsensusFault: 495422, + }, + forkParams.UpgradeCalicoHeight: &pricelistV0{ + computeGasMulti: 1, + storageGasMulti: 1300, + + onChainMessageComputeBase: 38863, + onChainMessageStorageBase: 36, + onChainMessageStoragePerByte: 1, + + onChainReturnValuePerByte: 1, + + sendBase: 29233, + sendTransferFunds: 27500, + sendTransferOnlyPremium: 159672, + sendInvokeMethod: -5377, + + ipldGetBase: 114617, + ipldPutBase: 353640, + ipldPutPerByte: 1, + + createActorCompute: 1108454, + createActorStorage: 36 + 40, + deleteActor: -(36 + 40), // -createActorStorage + + verifySignature: map[crypto.SigType]int64{ + crypto.SigTypeBLS: 16598605, + crypto.SigTypeSecp256k1: 1637292, + }, + + hashingBase: 31355, + computeUnsealedSectorCidBase: 98647, + verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used + + verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{ + abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900, + abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272, + }, + verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{ + abi.RegisteredSealProof_StackedDrg32GiBV1_1: { + {4, 103994170}, + {7, 112356810}, + {13, 122912610}, + {26, 137559930}, + {52, 162039100}, + {103, 210960780}, + {205, 318351180}, + {410, 528274980}, + }, + abi.RegisteredSealProof_StackedDrg64GiBV1_1: { + {4, 102581240}, + {7, 110803030}, + {13, 120803700}, + {26, 134642130}, + {52, 157357890}, + {103, 203017690}, + {205, 304253590}, + {410, 509880640}, + }, + }, + + verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ + abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { + flat: 117680921, + scale: 43780, + }, + abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: { + flat: 117680921, + scale: 43780, + }, + abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: { + flat: 117680921, + scale: 43780, + }, + }, + verifyPostDiscount: false, + verifyConsensusFault: 495422, + verifyReplicaUpdate: 36316136, + }, + } + return &PricesSchedule{prices: prices} +} + +// PricelistByEpoch finds the latest prices for the given epoch +func (schedule *PricesSchedule) PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { + // since we are storing the prices as map or epoch to price + // we need to get the price with the highest epoch that is lower or equal to the `epoch` arg + bestEpoch := abi.ChainEpoch(0) + bestPrice := schedule.prices[bestEpoch] + for e, pl := range schedule.prices { + // if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch` + if e > bestEpoch && e <= epoch { + bestEpoch = e + bestPrice = pl + } + } + if bestPrice == nil { + panic(fmt.Sprintf("bad setup: no gas prices available for epoch %d", epoch)) + } + return bestPrice +} diff --git a/pkg/vm/gas/pricelistV0.go b/pkg/vm/gas/pricelistV0.go new file mode 100644 index 0000000000..64789f4450 --- /dev/null +++ b/pkg/vm/gas/pricelistV0.go @@ -0,0 +1,270 @@ +package gas + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" +) + +type scalingCost struct { + flat int64 + scale int64 +} + +type stepCost []step + +type step struct { + start int64 + cost int64 +} + +func (sc stepCost) Lookup(x int64) int64 { + i := 0 + for ; i < len(sc); i++ { + if sc[i].start > x { + break + } + } + i-- // look at previous item + if i < 0 { + return 0 + } + + return sc[i].cost +} + +type pricelistV0 struct { + computeGasMulti int64 + storageGasMulti int64 + /////////////////////////////////////////////////////////////////////////// + // System operations + /////////////////////////////////////////////////////////////////////////// + + // Gas cost charged to the originator of an on-chain message (regardless of + // whether it succeeds or fails in application) is given by: + // OnChainMessageBase + len(serialized message)*OnChainMessagePerByte + // Together, these account for the cost of message propagation and validation, + // up to but excluding any actual processing by the LegacyVM. + // This is the cost a block producer burns when including an invalid message. + onChainMessageComputeBase int64 + onChainMessageStorageBase int64 + onChainMessageStoragePerByte int64 + + // Gas cost charged to the originator of a non-nil return value produced + // by an on-chain message is given by: + // len(return value)*OnChainReturnValuePerByte + onChainReturnValuePerByte int64 + + // Gas cost for any message send execution(including the top-level one + // initiated by an on-chain message). + // This accounts for the cost of loading sender and receiver actors and + // (for top-level messages) incrementing the sender's sequence number. + // Load and store of actor sub-state is charged separately. + sendBase int64 + + // Gas cost charged, in addition to SendBase, if a message send + // is accompanied by any nonzero currency amount. + // Accounts for writing receiver's new balance (the sender's state is + // already accounted for). + sendTransferFunds int64 + + // Gsa cost charged, in addition to SendBase, if message only transfers funds. + sendTransferOnlyPremium int64 + + // Gas cost charged, in addition to SendBase, if a message invokes + // a method on the receiver. + // Accounts for the cost of loading receiver code and method dispatch. + sendInvokeMethod int64 + + // Gas cost for any Get operation to the IPLD store + // in the runtime LegacyVM context. + ipldGetBase int64 + + // Gas cost (Base + len*PerByte) for any Put operation to the IPLD store + // in the runtime LegacyVM context. + // + // Note: these costs should be significantly higher than the costs for Get + // operations, since they reflect not only serialization/deserialization + // but also persistent storage of chain data. + ipldPutBase int64 + ipldPutPerByte int64 + + // Gas cost for creating a new actor (via InitActor's Exec method). + // + // Note: this costs assume that the extra will be partially or totally refunded while + // the base is covering for the put. + createActorCompute int64 + createActorStorage int64 + + // Gas cost for deleting an actor. + // + // Note: this partially refunds the create cost to incentivise the deletion of the actors. + deleteActor int64 + + verifySignature map[crypto.SigType]int64 + + hashingBase int64 + + computeUnsealedSectorCidBase int64 + verifySealBase int64 + verifyAggregateSealBase int64 + verifyAggregateSealPer map[abi.RegisteredSealProof]int64 + verifyAggregateSealSteps map[abi.RegisteredSealProof]stepCost + + verifyPostLookup map[abi.RegisteredPoStProof]scalingCost + verifyPostDiscount bool + verifyConsensusFault int64 + verifyReplicaUpdate int64 +} + +var _ Pricelist = (*pricelistV0)(nil) + +// OnChainMessage returns the gas used for storing a message of a given size in the chain. +func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge { + return NewGasCharge("OnChainMessage", pl.onChainMessageComputeBase, + (pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize))*pl.storageGasMulti) +} + +// OnChainReturnValue returns the gas used for storing the response of a message in the chain. +func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge { + return NewGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte*pl.storageGasMulti) +} + +// OnMethodInvocation returns the gas used when invoking a method. +func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge { + ret := pl.sendBase + extra := "" + + if big.Cmp(value, abi.NewTokenAmount(0)) != 0 { + ret += pl.sendTransferFunds + if methodNum == builtin.MethodSend { + // transfer only + ret += pl.sendTransferOnlyPremium + } + extra += "t" + } + + if methodNum != builtin.MethodSend { + extra += "i" + // running actors is cheaper becase we hand over to actors + ret += pl.sendInvokeMethod + } + return NewGasCharge("OnMethodInvocation", ret, 0).WithExtra(extra) +} + +// OnIpldGet returns the gas used for storing an object +func (pl *pricelistV0) OnIpldGet() GasCharge { + return NewGasCharge("OnIpldGet", pl.ipldGetBase, 0).WithVirtual(114617, 0) +} + +// OnIpldPut returns the gas used for storing an object +func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge { + return NewGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte*pl.storageGasMulti). + WithExtra(dataSize).WithVirtual(400000, int64(dataSize)*1300) +} + +// OnCreateActor returns the gas used for creating an actor +func (pl *pricelistV0) OnCreateActor() GasCharge { + return NewGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage*pl.storageGasMulti) +} + +// OnDeleteActor returns the gas used for deleting an actor +func (pl *pricelistV0) OnDeleteActor() GasCharge { + return NewGasCharge("OnDeleteActor", 0, pl.deleteActor*pl.storageGasMulti) +} + +// OnVerifySignature + +func (pl *pricelistV0) OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) { + cost, ok := pl.verifySignature[sigType] + if !ok { + return GasCharge{}, fmt.Errorf("cost function for signature type %d not supported", sigType) + } + + sigName, _ := sigType.Name() + return NewGasCharge("OnVerifySignature", cost, 0). + WithExtra(map[string]interface{}{ + "type": sigName, + "size": planTextSize, + }), nil +} + +// OnHashing +func (pl *pricelistV0) OnHashing(dataSize int) GasCharge { + return NewGasCharge("OnHashing", pl.hashingBase, 0).WithExtra(dataSize) +} + +// OnComputeUnsealedSectorCid +func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge { + return NewGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0) +} + +// OnVerifySeal +func (pl *pricelistV0) OnVerifySeal(info proof7.SealVerifyInfo) GasCharge { + // TODO: this needs more cost tunning, check with @lotus + // this is not used + return NewGasCharge("OnVerifySeal", pl.verifySealBase, 0) +} + +// OnVerifyAggregateSeals +func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge { + proofType := aggregate.SealProof + perProof, ok := pl.verifyAggregateSealPer[proofType] + if !ok { + perProof = pl.verifyAggregateSealPer[abi.RegisteredSealProof_StackedDrg32GiBV1_1] + } + + step, ok := pl.verifyAggregateSealSteps[proofType] + if !ok { + step = pl.verifyAggregateSealSteps[abi.RegisteredSealProof_StackedDrg32GiBV1_1] + } + num := int64(len(aggregate.Infos)) + return NewGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0) +} + +// OnVerifyReplicaUpdate +func (pl *pricelistV0) OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge { + return NewGasCharge("OnVerifyReplicaUpdate", pl.verifyReplicaUpdate, 0) +} + +// OnVerifyPost +func (pl *pricelistV0) OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge { + sectorSize := "unknown" + var proofType abi.RegisteredPoStProof + + if len(info.Proofs) != 0 { + proofType = info.Proofs[0].PoStProof + ss, err := info.Proofs[0].PoStProof.SectorSize() + if err == nil { + sectorSize = ss.ShortString() + } + } + + cost, ok := pl.verifyPostLookup[proofType] + if !ok { + cost = pl.verifyPostLookup[abi.RegisteredPoStProof_StackedDrgWindow512MiBV1] + } + + gasUsed := cost.flat + int64(len(info.ChallengedSectors))*cost.scale + if pl.verifyPostDiscount { + gasUsed /= 2 // XXX: this is an artificial discount + } + + return NewGasCharge("OnVerifyPost", gasUsed, 0). + WithVirtual(117680921+43780*int64(len(info.ChallengedSectors)), 0). + WithExtra(map[string]interface{}{ + "type": sectorSize, + "size": len(info.ChallengedSectors), + }) +} + +// OnVerifyConsensusFault +func (pl *pricelistV0) OnVerifyConsensusFault() GasCharge { + return NewGasCharge("OnVerifyConsensusFault", pl.verifyConsensusFault, 0) +} diff --git a/pkg/vm/register/default.go b/pkg/vm/register/default.go new file mode 100644 index 0000000000..59d37012ed --- /dev/null +++ b/pkg/vm/register/default.go @@ -0,0 +1,63 @@ +package register + +import ( + "bytes" + "fmt" + "sync" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" + exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" + exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" + exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" + exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" + exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported" + exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported" + "github.com/filecoin-project/venus/pkg/vm/dispatch" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" +) + +// defaultActors is list of all actors that ship with Filecoin. +// They are indexed by their CID. +// Dragons: add the rest of the actors +var ( + DefaultActorBuilder = dispatch.NewBuilder() + loadOnce sync.Once + defaultActors dispatch.CodeLoader +) + +func GetDefaultActros() *dispatch.CodeLoader { + loadOnce.Do(func() { + DefaultActorBuilder.AddMany(actorstypes.Version0, dispatch.ActorsVersionPredicate(actorstypes.Version0), builtin.MakeRegistryLegacy(exported0.BuiltinActors())) + DefaultActorBuilder.AddMany(actorstypes.Version2, dispatch.ActorsVersionPredicate(actorstypes.Version2), builtin.MakeRegistryLegacy(exported2.BuiltinActors())) + DefaultActorBuilder.AddMany(actorstypes.Version3, dispatch.ActorsVersionPredicate(actorstypes.Version3), builtin.MakeRegistryLegacy(exported3.BuiltinActors())) + DefaultActorBuilder.AddMany(actorstypes.Version4, dispatch.ActorsVersionPredicate(actorstypes.Version4), builtin.MakeRegistryLegacy(exported4.BuiltinActors())) + DefaultActorBuilder.AddMany(actorstypes.Version5, dispatch.ActorsVersionPredicate(actorstypes.Version5), builtin.MakeRegistryLegacy(exported5.BuiltinActors())) + DefaultActorBuilder.AddMany(actorstypes.Version6, dispatch.ActorsVersionPredicate(actorstypes.Version6), builtin.MakeRegistryLegacy(exported6.BuiltinActors())) + DefaultActorBuilder.AddMany(actorstypes.Version7, dispatch.ActorsVersionPredicate(actorstypes.Version7), builtin.MakeRegistryLegacy(exported7.BuiltinActors())) + DefaultActorBuilder.AddMany(actorstypes.Version8, dispatch.ActorsVersionPredicate(actorstypes.Version8), builtin.MakeRegistry(actorstypes.Version8)) + DefaultActorBuilder.AddMany(actorstypes.Version9, dispatch.ActorsVersionPredicate(actorstypes.Version9), builtin.MakeRegistry(actorstypes.Version9)) + defaultActors = DefaultActorBuilder.Build() + }) + + return &defaultActors +} + +func DumpActorState(codeLoader *dispatch.CodeLoader, act *types.Actor, b []byte) (interface{}, error) { + if builtin.IsAccountActor(act.Code) { // Account code special case + return nil, nil + } + + vmActor, err := codeLoader.GetVMActor(act.Code) + if err != nil { + return nil, fmt.Errorf("state type for actor %s not found", act.Code) + } + + um := vmActor.State() + if err := um.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("unmarshaling actor state: %w", err) + } + + return um, nil +} diff --git a/internal/pkg/vm/internal/runtime/runtime.go b/pkg/vm/runtime/runtime.go similarity index 78% rename from internal/pkg/vm/internal/runtime/runtime.go rename to pkg/vm/runtime/runtime.go index 27cb3ecefc..f2cb556366 100644 --- a/internal/pkg/vm/internal/runtime/runtime.go +++ b/pkg/vm/runtime/runtime.go @@ -4,17 +4,21 @@ import ( "fmt" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/network" "github.com/ipfs/go-cid" - "github.com/filecoin-project/specs-actors/actors/abi" - specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" ) -// Runtime has operations in the VM that are exposed to all actors. +// Runtime has operations in the LegacyVM that are exposed to all actors. type Runtime interface { // CurrentEpoch is the current chain epoch. CurrentEpoch() abi.ChainEpoch + + NetworkVersion() network.Version } // InvocationContext is passed to the actors on each method call. @@ -24,17 +28,17 @@ type InvocationContext interface { // Store is the raw store for IPLD objects. // // Note: this is required for custom data structures. - Store() specsruntime.Store + Store() rt5.Store // Message contains information available to the actor about the executing message. - Message() specsruntime.Message + Message() rt5.Message // ValidateCaller validates the caller against a patter. // // All actor methods MUST call this method before returning. ValidateCaller(CallerPattern) // StateHandle handles access to the actor state. - State() specsruntime.StateHandle + State() rt5.StateHandle // Send allows actors to invoke methods on other actors - Send(toAddr address.Address, methodNum abi.MethodNum, params specsruntime.CBORMarshaler, value abi.TokenAmount) (specsruntime.SendReturn, exitcode.ExitCode) + Send(toAddr address.Address, methodNum abi.MethodNum, params cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode // Balance is the current balance on the current actors account. // // Note: the value received for this invocation is already reflected on the balance. @@ -92,12 +96,12 @@ func (p ExecutionPanic) String() string { return fmt.Sprintf("ExitCode(%d)", p.Code()) } -// Abort aborts the VM execution and sets the executing message return to the given `code`. +// Abort aborts the LegacyVM execution and sets the executing message return to the given `code`. func Abort(code exitcode.ExitCode) { panic(ExecutionPanic{code: code}) } -// Abortf will stop the VM execution and return an the error to the caller. +// Abortf will stop the LegacyVM execution and return an the error to the caller. func Abortf(code exitcode.ExitCode, msg string, args ...interface{}) { panic(ExecutionPanic{code: code, msg: fmt.Sprintf(msg, args...)}) } diff --git a/pkg/vm/vm.go b/pkg/vm/vm.go new file mode 100644 index 0000000000..e3ef57bc7c --- /dev/null +++ b/pkg/vm/vm.go @@ -0,0 +1,62 @@ +package vm + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/vm/dispatch" + "github.com/filecoin-project/venus/pkg/vm/register" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" +) + +// Re-exports + +type VmOption = vmcontext.VmOption //nolint + +type Ret = vmcontext.Ret + +// Interpreter is the LegacyVM. +type Interpreter = vmcontext.VMInterpreter + +type ( + SyscallsImpl = vmcontext.SyscallsImpl + SyscallsStateView = vmcontext.SyscallsStateView +) + +type ( + ExecCallBack = vmcontext.ExecCallBack + VmMessage = vmcontext.VmMessage //nolint + FakeSyscalls = vmcontext.FakeSyscalls + ChainRandomness = vmcontext.HeadChainRandomness +) + +type Interface = vmcontext.Interface // nolint + +// NewLegacyVM creates a new LegacyVM interpreter. +func NewLegacyVM(ctx context.Context, option VmOption) (Interpreter, error) { + if option.NetworkVersion >= network.Version16 { + return nil, fmt.Errorf("the legacy VM does not support network versions 16+") + } + if option.ActorCodeLoader == nil { + option.ActorCodeLoader = GetDefaultActors() + } + + return vmcontext.NewLegacyVM(ctx, option.ActorCodeLoader, option) +} + +// GetDefaultActors return a code loader with the built-in actors that come with the system. +var GetDefaultActors = register.GetDefaultActros + +// ActorCodeLoader allows yo to load an actor's code based on its id an epoch. +type ActorCodeLoader = dispatch.CodeLoader + +// ActorMethodSignature wraps a specific method and allows you to encode/decodes input/output bytes into concrete types. +type ActorMethodSignature = dispatch.MethodSignature + +type ( + ILookBack = vmcontext.ILookBack + LookbackStateGetter = vmcontext.LookbackStateGetter +) + +// type LookbackStateGetterForTipset = vmcontext.LookbackStateGetterForTipset diff --git a/pkg/vm/vmcontext/actor_state_handle.go b/pkg/vm/vmcontext/actor_state_handle.go new file mode 100644 index 0000000000..997bb3a65f --- /dev/null +++ b/pkg/vm/vmcontext/actor_state_handle.go @@ -0,0 +1,83 @@ +package vmcontext + +import ( + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + "github.com/filecoin-project/venus/pkg/vm/runtime" + "github.com/ipfs/go-cid" +) + +type actorStateHandle struct { + ctx actorStateHandleContext + // validations is a list of validations that the vm will execute after the actor code finishes. + // + // Any validation failure will result in the execution getting aborted. + validations []validateFn + // used_objs holds the pointers To objs that have been used with this handle and their expected stateView cid. + usedObjs map[interface{}]cid.Cid +} + +// validateFn returns True if it's valid. +type validateFn = func() bool + +type actorStateHandleContext interface { + AllowSideEffects(bool) + Create(obj cbor.Marshaler) cid.Cid + Load(obj cbor.Unmarshaler) cid.Cid + Replace(expected cid.Cid, obj cbor.Marshaler) cid.Cid +} + +// NewActorStateHandle returns a new `ActorStateHandle` +// +// Note: just visible for testing. +func NewActorStateHandle(ctx actorStateHandleContext) rt5.StateHandle { + aux := newActorStateHandle(ctx) + return &aux +} + +func newActorStateHandle(ctx actorStateHandleContext) actorStateHandle { + return actorStateHandle{ + ctx: ctx, + validations: []validateFn{}, + usedObjs: map[interface{}]cid.Cid{}, + } +} + +var _ rt5.StateHandle = (*actorStateHandle)(nil) + +func (h *actorStateHandle) StateCreate(obj cbor.Marshaler) { + // Store the new stateView. + c := h.ctx.Create(obj) + // Store the expected CID of obj. + h.usedObjs[obj] = c +} + +// Readonly is the implementation of the ActorStateHandle interface. +func (h *actorStateHandle) StateReadonly(obj cbor.Unmarshaler) { + // Load stateView To obj. + c := h.ctx.Load(obj) + // Track the stateView and expected CID used by the caller. + h.usedObjs[obj] = c +} + +// Transaction is the implementation of the ActorStateHandle interface. +func (h *actorStateHandle) StateTransaction(obj cbor.Er, f func()) { + if obj == nil { + runtime.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil To Transaction()") + } + + // Load stateView To obj. + prior := h.ctx.Load(obj) + + // Call user code allowing mutation but not side-effects + h.ctx.AllowSideEffects(false) + f() + h.ctx.AllowSideEffects(true) + + // Store the new stateView + newCid := h.ctx.Replace(prior, obj) + + // Record the expected stateView of obj + h.usedObjs[obj] = newCid +} diff --git a/pkg/vm/vmcontext/actor_state_handle_test.go b/pkg/vm/vmcontext/actor_state_handle_test.go new file mode 100644 index 0000000000..0d8e6c469c --- /dev/null +++ b/pkg/vm/vmcontext/actor_state_handle_test.go @@ -0,0 +1,274 @@ +package vmcontext_test + +import ( + "fmt" + "io" + "testing" + + "github.com/filecoin-project/venus/pkg/util" + + "github.com/filecoin-project/go-state-types/cbor" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" +) + +type testActorStateHandleState struct { + FieldA string +} + +func (t *testActorStateHandleState) Clone(b interface{}) error { //nolint + newBoj := &testActorStateHandleState{} + newBoj.FieldA = t.FieldA + b = newBoj //nolint:staticcheck + return nil +} + +func (t *testActorStateHandleState) MarshalCBOR(w io.Writer) error { + if _, err := w.Write([]byte(t.FieldA)); err != nil { + return err + } + return nil +} + +func (t *testActorStateHandleState) UnmarshalCBOR(r io.Reader) error { + bs := make([]byte, 1024) + n, err := r.Read(bs) + if err != nil { + return err + } + t.FieldA = string(bs[:n]) + return nil +} + +func setup() testSetup { + initialstate := testActorStateHandleState{FieldA: "fakestate"} + + store := vmcontext.NewTestStorage(&initialstate) + initialhead, _ := util.MakeCid(&initialstate) + ctx := fakeActorStateHandleContext{ + head: initialhead, + store: store, + allowSideEffects: true, + } + h := vmcontext.NewActorStateHandle(&ctx) + + cleanup := func() {} + + return testSetup{ + initialstate: initialstate, + h: h, + cleanup: cleanup, + } +} + +func TestActorStateHandle(t *testing.T) { + tf.UnitTest(t) + + // this test case verifies that the `Validate` works when nothing was done with the stateView + t.Run("noop", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + }) + + t.Run("readonly", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + + var out testActorStateHandleState + ts.h.StateReadonly(&out) + + assert.Equal(t, out, ts.initialstate) + }) + + t.Run("readonly multiple times", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + + var out testActorStateHandleState + ts.h.StateReadonly(&out) + ts.h.StateReadonly(&out) + + assert.Equal(t, out, ts.initialstate) + }) + + t.Run("readonly promotion", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + + var out testActorStateHandleState + ts.h.StateReadonly(&out) + + ts.h.StateTransaction(&out, func() { + out.FieldA = "changed!" + }) + }) + + t.Run("transaction", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + + var out testActorStateHandleState + expected := "new stateView" + + ts.h.StateTransaction(&out, func() { + // check stateView is not what we are going To use + assert.NotEqual(t, out.FieldA, expected) + out.FieldA = expected + }) + // check that it changed + assert.Equal(t, out.FieldA, expected) + + ts.h.StateReadonly(&out) + // really check by loading it again + assert.Equal(t, out.FieldA, expected) + }) + + t.Run("transaction but no mutation", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + + var out testActorStateHandleState + + // should work, mutating is not compulsory + ts.h.StateTransaction(&out, func() {}) + + assert.Equal(t, out, ts.initialstate) + }) + + t.Run("transaction returning Value", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + + var out testActorStateHandleState + + lastResult := "" + ts.h.StateTransaction(&out, func() { + lastResult = out.FieldA + }) + + assert.Equal(t, lastResult, ts.initialstate.FieldA) + }) + + t.Run("transaction double whammy", func(t *testing.T) { + ts := setup() + defer ts.cleanup() + + var out testActorStateHandleState + + lastResult := "" + ts.h.StateTransaction(&out, func() { + lastResult = "changed!" + out.FieldA = lastResult + }) + + ts.h.StateTransaction(&out, func() { + lastResult = "again!" + out.FieldA = lastResult + }) + + ts.h.StateReadonly(&out) + // really check by loading it again + assert.Equal(t, out.FieldA, lastResult) + }) +} + +func TestActorStateHandleNilState(t *testing.T) { + tf.UnitTest(t) + + setup := func() (rt5.StateHandle, func()) { + store := vmcontext.NewTestStorage(nil) + ctx := fakeActorStateHandleContext{ + store: store, + allowSideEffects: true, + } + + h := vmcontext.NewActorStateHandle(&ctx) + + cleanup := func() {} + + return h, cleanup + } + + t.Run("transaction on nil stateView", func(t *testing.T) { + h, cleanup := setup() + defer cleanup() + + var out testActorStateHandleState + h.StateTransaction(&out, func() {}) + }) + + t.Run("stateView initialized after transaction", func(t *testing.T) { + h, cleanup := setup() + defer cleanup() + + var out testActorStateHandleState + h.StateTransaction(&out, func() {}) + + h.StateReadonly(&out) // should not fail + }) + + t.Run("readonly nil pointer To stateView", func(t *testing.T) { + defer mustPanic(t) + + h, cleanup := setup() + defer cleanup() + + h.StateReadonly(nil) + }) + + t.Run("transaction nil pointer To stateView", func(t *testing.T) { + defer mustPanic(t) + + h, cleanup := setup() + defer cleanup() + + h.StateTransaction(nil, func() {}) + }) +} + +type fakeActorStateHandleContext struct { + store rt5.Store + head cid.Cid + allowSideEffects bool +} + +func (ctx *fakeActorStateHandleContext) AllowSideEffects(allow bool) { + ctx.allowSideEffects = allow +} + +func (ctx *fakeActorStateHandleContext) Create(obj cbor.Marshaler) cid.Cid { + ctx.head = ctx.store.StorePut(obj) + return ctx.head +} + +func (ctx *fakeActorStateHandleContext) Load(obj cbor.Unmarshaler) cid.Cid { + found := ctx.store.StoreGet(ctx.head, obj) + if !found { + panic("inconsistent stateView") + } + return ctx.head +} + +func (ctx *fakeActorStateHandleContext) Replace(expected cid.Cid, obj cbor.Marshaler) cid.Cid { + if !ctx.head.Equals(expected) { + panic(fmt.Errorf("unexpected prior stateView %s expected %s", ctx.head, expected)) + } + ctx.head = ctx.store.StorePut(obj) + return ctx.head +} + +type testSetup struct { + initialstate testActorStateHandleState + h rt5.StateHandle + cleanup func() +} + +func mustPanic(t *testing.T) { + if r := recover(); r == nil { + t.Fail() + } +} diff --git a/pkg/vm/vmcontext/actor_store.go b/pkg/vm/vmcontext/actor_store.go new file mode 100644 index 0000000000..492b46f3a9 --- /dev/null +++ b/pkg/vm/vmcontext/actor_store.go @@ -0,0 +1,80 @@ +package vmcontext + +import ( + "context" + "fmt" + "reflect" + + "github.com/filecoin-project/go-state-types/exitcode" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + "github.com/ipfs/go-cid" + cbornode "github.com/ipfs/go-ipld-cbor" + xerrors "github.com/pkg/errors" + + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vm/runtime" +) + +// ActorStorage hides the storage methods From the actors and turns the errors into runtime panics. +type ActorStorage struct { + context context.Context + inner cbornode.IpldStore + pricelist gas.Pricelist + gasTank *gas.GasTracker +} + +func NewActorStorage(ctx context.Context, inner cbornode.IpldStore, gasTank *gas.GasTracker, pricelist gas.Pricelist) *ActorStorage { + return &ActorStorage{ + context: ctx, + inner: inner, + pricelist: pricelist, + gasTank: gasTank, + } +} + +// +// implement runtime.Store for ActorStorage +// + +var _ rt5.Store = (*ActorStorage)(nil) + +// Serialization technically belongs in the actor code, rather than inside the VM. +// The true VM storage interface is in terms of raw bytes and, when we have user-defined, +// serialization code will be directly in those contracts. +// Our present runtime interface is at a slightly higher level for convenience, but the exit code here is the +// actor, rather than system-level, error code. +const serializationErr = exitcode.ErrSerialization + +func (s *ActorStorage) StorePut(obj cbor.Marshaler) cid.Cid { + cid, err := s.inner.Put(s.context, obj) + if err != nil { + msg := fmt.Sprintf("failed To put object %s in store: %s", reflect.TypeOf(obj), err) + if xerrors.As(err, new(cbornode.SerializationError)) { + runtime.Abortf(serializationErr, msg) + } else { + panic(msg) + } + } + return cid +} + +type notFoundErr interface { + IsNotFound() bool +} + +func (s *ActorStorage) StoreGet(cid cid.Cid, obj cbor.Unmarshaler) bool { + // gas charge must check first + if err := s.inner.Get(s.context, cid, obj); err != nil { + msg := fmt.Sprintf("failed To get object %s %s From store: %s", reflect.TypeOf(obj), cid, err) + var nfe notFoundErr + if xerrors.As(err, &nfe) && nfe.IsNotFound() { + if xerrors.As(err, new(cbornode.SerializationError)) { + runtime.Abortf(serializationErr, msg) + } + return false + } + panic(msg) + } + return true +} diff --git a/pkg/vm/vmcontext/actor_store_test.go b/pkg/vm/vmcontext/actor_store_test.go new file mode 100644 index 0000000000..521de716c7 --- /dev/null +++ b/pkg/vm/vmcontext/actor_store_test.go @@ -0,0 +1,82 @@ +package vmcontext_test + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/filecoin-project/venus/pkg/config" + cbor "github.com/ipfs/go-ipld-cbor" + + cbor2 "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/venus/pkg/vm/gas" + vmr "github.com/filecoin-project/venus/pkg/vm/runtime" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + blockstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + typegen "github.com/whyrusleeping/cbor-gen" +) + +func TestActorStore(t *testing.T) { + ctx := context.Background() + raw := cbor.NewCborStore(blockstore.NewBlockstore(datastore.NewMapDatastore())) + gasTank := gas.NewGasTracker(1e6) + priceSchedule := gas.NewPricesSchedule(config.DefaultForkUpgradeParam) + t.Run("abort on put serialization failure", func(t *testing.T) { + store := vmcontext.NewActorStorage(ctx, raw, gasTank, priceSchedule.PricelistByEpoch(0)) + _, thrown := tryPut(store, cannotCBOR{}) + abort, ok := thrown.(vmr.ExecutionPanic) + assert.NotNil(t, thrown) + assert.True(t, ok, "expected abort") + assert.Equal(t, exitcode.ErrSerialization, abort.Code()) + }) + + t.Run("abort on get serialization failure", func(t *testing.T) { + store := vmcontext.NewActorStorage(ctx, raw, gasTank, priceSchedule.PricelistByEpoch(0)) + v := typegen.CborInt(0) + + c, thrown := tryPut(store, &v) + assert.True(t, c.Defined()) + require.Nil(t, thrown) + + var v2 typegen.CborCid + thrown = tryGet(store, c, &v2) // Attempt decode into wrong type + assert.Contains(t, thrown.(string), "failed To get object *typegen.CborCid ") + }) + + t.Run("panic on get storage failure", func(t *testing.T) { + store := vmcontext.NewActorStorage(ctx, raw, gasTank, priceSchedule.PricelistByEpoch(0)) + var v typegen.CborInt + thrown := tryGet(store, cid.Undef, &v) + _, ok := thrown.(vmr.ExecutionPanic) + assert.NotNil(t, thrown) + assert.False(t, ok, "expected non-abort panic") + }) +} + +func tryPut(s *vmcontext.ActorStorage, v cbor2.Marshaler) (c cid.Cid, thrown interface{}) { + defer func() { + thrown = recover() + }() + c = s.StorePut(v) + return +} + +func tryGet(s *vmcontext.ActorStorage, c cid.Cid, v cbor2.Unmarshaler) (thrown interface{}) { + defer func() { + thrown = recover() + }() + s.StoreGet(c, v) + return +} + +type cannotCBOR struct{} + +func (c cannotCBOR) MarshalCBOR(w io.Writer) error { + return fmt.Errorf("no") +} diff --git a/pkg/vm/vmcontext/gas_charge_ipld.go b/pkg/vm/vmcontext/gas_charge_ipld.go new file mode 100644 index 0000000000..94e92c4247 --- /dev/null +++ b/pkg/vm/vmcontext/gas_charge_ipld.go @@ -0,0 +1,49 @@ +package vmcontext + +import ( + "context" + + "github.com/filecoin-project/venus/pkg/vm/gas" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + xerrors "github.com/pkg/errors" +) + +var _ cbor.IpldBlockstore = (*GasChargeBlockStore)(nil) + +// GasChargeBlockStore in addition to the basic blockstore read and write capabilities, a certain amount of gas consumption will be deducted for each operation +type GasChargeBlockStore struct { + gasTank *gas.GasTracker + pricelist gas.Pricelist + inner cbor.IpldBlockstore +} + +func NewGasChargeBlockStore(gasTank *gas.GasTracker, pricelist gas.Pricelist, inner cbor.IpldBlockstore) *GasChargeBlockStore { + return &GasChargeBlockStore{ + gasTank: gasTank, + pricelist: pricelist, + inner: inner, + } +} + +// Get charge gas and than get the value of cid +func (bs *GasChargeBlockStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + bs.gasTank.Charge(bs.pricelist.OnIpldGet(), "storage get %s", c) + + blk, err := bs.inner.Get(ctx, c) + if err != nil { + panic(xerrors.WithMessage(err, "failed to get block from blockstore")) + } + return blk, nil +} + +// Put first charge gas and than save block +func (bs *GasChargeBlockStore) Put(ctx context.Context, blk blocks.Block) error { + bs.gasTank.Charge(bs.pricelist.OnIpldPut(len(blk.RawData())), "%s storage put %d bytes", blk.Cid(), len(blk.RawData())) + + if err := bs.inner.Put(ctx, blk); err != nil { + panic(xerrors.WithMessage(err, "failed to write data to disk")) + } + return nil +} diff --git a/pkg/vm/vmcontext/interpreter.go b/pkg/vm/vmcontext/interpreter.go new file mode 100644 index 0000000000..1ab67726c8 --- /dev/null +++ b/pkg/vm/vmcontext/interpreter.go @@ -0,0 +1,12 @@ +package vmcontext + +import ( + "github.com/filecoin-project/venus/pkg/state/tree" +) + +// VMInterpreter orchestrates the execution of messages from a tipset on that tipset’s parent State. +type VMInterpreter interface { + Interface + + StateTree() tree.Tree +} diff --git a/pkg/vm/vmcontext/invocation_context.go b/pkg/vm/vmcontext/invocation_context.go new file mode 100644 index 0000000000..a47206697f --- /dev/null +++ b/pkg/vm/vmcontext/invocation_context.go @@ -0,0 +1,612 @@ +package vmcontext + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + "github.com/ipfs/go-cid" + ipfscbor "github.com/ipfs/go-ipld-cbor" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/pkg/vm/dispatch" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vm/runtime" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/aerrors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/account" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var gasOnActorExec = gas.NewGasCharge("OnActorExec", 0, 0) + +// Context for a top-level invocation sequence. +type topLevelContext struct { + originatorStableAddress address.Address // Stable (public key) address of the top-level message sender. + originatorCallSeq uint64 // Call sequence number of the top-level message. + newActorAddressCount uint64 // Count of calls To NewActorAddress (mutable). +} + +// Context for an individual message invocation, including inter-actor sends. +type invocationContext struct { + vm *LegacyVM + topLevel *topLevelContext + originMsg VmMessage // msg not trasfer from and to address + msg VmMessage // The message being processed + gasTank *gas.GasTracker + randSource HeadChainRandomness + isCallerValidated bool + depth uint64 + allowSideEffects bool + stateHandle internalActorStateHandle + gasIpld ipfscbor.IpldStore +} + +type internalActorStateHandle interface { + rt5.StateHandle +} + +func newInvocationContext(rt *LegacyVM, gasIpld ipfscbor.IpldStore, topLevel *topLevelContext, msg VmMessage, + gasTank *gas.GasTracker, randSource HeadChainRandomness, parent *invocationContext, +) invocationContext { + orginMsg := msg + ctx := invocationContext{ + vm: rt, + topLevel: topLevel, + originMsg: orginMsg, + gasTank: gasTank, + randSource: randSource, + isCallerValidated: false, + depth: 0, + allowSideEffects: true, + stateHandle: nil, + gasIpld: gasIpld, + } + + if parent != nil { + // TODO: The version check here should be unnecessary, but we can wait to take it out + if !parent.allowSideEffects && rt.NetworkVersion() >= network.Version7 { + runtime.Abortf(exitcode.SysErrForbidden, "internal calls currently disabled") + } + // ctx.gasUsed = parent.gasUsed + // ctx.origin = parent.origin + // ctx.originNonce = parent.originNonce + // ctx.numActorsCreated = parent.numActorsCreated + ctx.depth = parent.depth + 1 + } + + if ctx.depth > MaxCallDepth && rt.NetworkVersion() >= network.Version6 { + runtime.Abortf(exitcode.SysErrForbidden, "message execution exceeds call depth") + } + + // Note: the toActor and stateHandle are loaded during the `invoke()` + resF, ok := rt.normalizeAddress(msg.From) + if !ok { + runtime.Abortf(exitcode.SysErrInvalidReceiver, "resolve msg.From [%s] address failed", msg.From) + } + msg.From = resF + + if rt.NetworkVersion() > network.Version3 { + resT, _ := rt.normalizeAddress(msg.To) + // may be set to undef if recipient doesn't exist yet + msg.To = resT + } + ctx.msg = msg + + return ctx +} + +type stateHandleContext invocationContext + +func (shc *stateHandleContext) AllowSideEffects(allow bool) { + shc.allowSideEffects = allow +} + +func (shc *stateHandleContext) Create(obj cbor.Marshaler) cid.Cid { + actr := shc.loadActor() + if actr.Head.Defined() { + runtime.Abortf(exitcode.SysErrorIllegalActor, "failed To construct actor stateView: already initialized") + } + c := shc.store().StorePut(obj) + actr.Head = c + shc.storeActor(actr) + return c +} + +func (shc *stateHandleContext) Load(obj cbor.Unmarshaler) cid.Cid { + // The actor must be loaded From store every time since the stateView may have changed via a different stateView handle + // (e.g. in a recursive call). + actr := shc.loadActor() + c := actr.Head + if !c.Defined() { + runtime.Abortf(exitcode.SysErrorIllegalActor, "failed To load undefined stateView, must construct first") + } + found := shc.store().StoreGet(c, obj) + if !found { + panic(fmt.Errorf("failed To load stateView for actor %s, CID %s", shc.msg.To, c)) + } + return c +} + +func (shc *stateHandleContext) Replace(expected cid.Cid, obj cbor.Marshaler) cid.Cid { + actr := shc.loadActor() + if !actr.Head.Equals(expected) { + panic(fmt.Errorf("unexpected prior stateView %s for actor %s, expected %s", actr.Head, shc.msg.To, expected)) + } + c := shc.store().StorePut(obj) + actr.Head = c + shc.storeActor(actr) + return c +} + +func (shc *stateHandleContext) store() rt5.Store { + return ((*invocationContext)(shc)).Store() +} + +func (shc *stateHandleContext) loadActor() *types.Actor { + entry, found, err := shc.vm.State.GetActor(shc.vm.context, shc.originMsg.To) + if err != nil { + panic(err) + } + if !found { + panic(fmt.Errorf("failed To find actor %s for stateView", shc.originMsg.To)) + } + return entry +} + +func (shc *stateHandleContext) storeActor(actr *types.Actor) { + err := shc.vm.State.SetActor(shc.vm.context, shc.originMsg.To, actr) + if err != nil { + panic(err) + } +} + +// runtime aborts are trapped by invoke, it will always return an exit code. +func (ctx *invocationContext) invoke() (ret []byte, errcode exitcode.ExitCode) { + // Checkpoint stateView, for restoration on revert + // Note that changes prior To invocation (sequence number bump and gas prepayment) persist even if invocation fails. + err := ctx.vm.snapshot() + if err != nil { + panic(err) + } + defer ctx.vm.clearSnapshot() + + // Install handler for abort, which rolls back all stateView changes From this and any nested invocations. + // This is the only path by which a non-OK exit code may be returned. + defer func() { + if r := recover(); r != nil { + + if err := ctx.vm.revert(); err != nil { + panic(err) + } + switch e := r.(type) { + case runtime.ExecutionPanic: + p := e + + vmlog.Warnw("Abort during actor execution.", + "errorMessage", p, + "exitCode", p.Code(), + "sender", ctx.originMsg.From, + "receiver", ctx.originMsg.To, + "methodNum", ctx.originMsg.Method, + "Value", ctx.originMsg.Value, + "gasLimit", ctx.gasTank.GasAvailable) + ret = []byte{} // The Empty here should never be used, but slightly safer than zero Value. + errcode = p.Code() + default: + errcode = 1 + ret = []byte{} + // do not trap unknown panics + vmlog.Errorf("spec actors failure: %s", r) + // debug.PrintStack() + } + } + }() + + // pre-dispatch + // 1. charge gas for message invocation + // 2. load target actor + // 3. transfer optional funds + // 4. short-circuit _Send_ Method + // 5. create target stateView handle + // assert From address is an ID address. + if ctx.msg.From.Protocol() != address.ID { + panic("bad code: sender address MUST be an ID address at invocation time") + } + + // 1. load target actor + // Note: we replace the "To" address with the normalized version + toActor, toIDAddr := ctx.resolveTarget(ctx.originMsg.To) + if ctx.vm.NetworkVersion() > network.Version3 { + ctx.msg.To = toIDAddr + } + + // 2. charge gas for msg + ctx.gasTank.Charge(ctx.vm.pricelist.OnMethodInvocation(ctx.originMsg.Value, ctx.originMsg.Method), "Method invocation") + + // 3. transfer funds carried by the msg + if !ctx.originMsg.Value.Nil() && !ctx.originMsg.Value.IsZero() { + ctx.vm.transfer(ctx.msg.From, toIDAddr, ctx.originMsg.Value, ctx.vm.NetworkVersion()) + } + + // 4. if we are just sending funds, there is nothing else To do. + if ctx.originMsg.Method == builtin.MethodSend { + return nil, exitcode.Ok + } + + actorImpl := ctx.vm.getActorImpl(toActor.Code, ctx.Runtime()) + + // 5. create target stateView handle + stateHandle := newActorStateHandle((*stateHandleContext)(ctx)) + ctx.stateHandle = &stateHandle + + // dispatch + adapter := newRuntimeAdapter(ctx) // runtimeAdapter{ctx: ctx} + var extErr *dispatch.ExcuteError + ret, extErr = actorImpl.Dispatch(ctx.originMsg.Method, ctx.vm.NetworkVersion(), adapter, ctx.originMsg.Params) + if extErr != nil { + runtime.Abortf(extErr.ExitCode(), extErr.Error()) + } + + // post-dispatch + // 1. check caller was validated + // 2. check stateView manipulation was valid + // 4. success! + + // 1. check caller was validated + if !ctx.isCallerValidated { + runtime.Abortf(exitcode.SysErrorIllegalActor, "Caller MUST be validated during Method execution") + } + + // Reset To pre-invocation stateView + ctx.stateHandle = nil + + // 3. success! + return ret, exitcode.Ok +} + +// resolveTarget loads and actor and returns its ActorID address. +// +// If the target actor does not exist, and the target address is a pub-key address, +// a new account actor will be created. +// Otherwise, this Method will abort execution. +func (ctx *invocationContext) resolveTarget(target address.Address) (*types.Actor, address.Address) { + // resolve the target address via the InitActor, and attempt To load stateView. + initActorEntry, found, err := ctx.vm.State.GetActor(ctx.vm.context, init_.Address) + if err != nil { + panic(err) + } + if !found { + runtime.Abort(exitcode.SysErrSenderInvalid) + } + + if target == init_.Address { + return initActorEntry, target + } + + // get init State + state, err := init_.Load(ctx.vm.ContextStore(), initActorEntry) + if err != nil { + panic(err) + } + + // lookup the ActorID based on the address + + _, found, err = ctx.vm.State.GetActor(ctx.vm.context, target) + if err != nil { + panic(err) + } + //nolint + if !found { + // Charge gas now that easy checks are done + + ctx.gasTank.Charge(ctx.vm.pricelist.OnCreateActor(), "CreateActor address %s", target) + // actor does not exist, create an account actor + // - precond: address must be a pub-key + // - sent init actor a msg To create the new account + targetIDAddr, err := ctx.vm.State.RegisterNewAddress(target) + if err != nil { + panic(err) + } + + if target.Protocol() != address.SECP256K1 && target.Protocol() != address.BLS { + // Don't implicitly create an account actor for an address without an associated key. + runtime.Abort(exitcode.SysErrInvalidReceiver) + } + ver, err := actorstypes.VersionForNetwork(ctx.vm.NetworkVersion()) + if err != nil { + panic(err) + } + actorCode, found := actors.GetActorCodeID(ver, actors.AccountKey) + if !found { + panic(fmt.Errorf("failed to get account actor code ID for actors version %d", ver)) + } + ctx.CreateActor(actorCode, targetIDAddr) + + // call constructor on account + newMsg := VmMessage{ + From: builtin.SystemActorAddr, + To: targetIDAddr, + Value: big.Zero(), + Method: account.Methods.Constructor, + // use original address as constructor Params + // Note: constructor takes a pointer + Params: &target, + } + + newCtx := newInvocationContext(ctx.vm, ctx.gasIpld, ctx.topLevel, newMsg, ctx.gasTank, ctx.randSource, ctx) + _, code := newCtx.invoke() + if code.IsError() { + // we failed To construct an account actor.. + runtime.Abort(code) + } + + // load actor + targetActor, _, err := ctx.vm.State.GetActor(ctx.vm.context, target) + if err != nil { + panic(err) + } + return targetActor, targetIDAddr + } else { + // load id address + targetIDAddr, found, err := state.ResolveAddress(target) + if err != nil { + panic(err) + } + + if !found { + panic(fmt.Errorf("unreachable: actor is supposed To exist but it does not. addr: %s, idAddr: %s", target, targetIDAddr)) + } + + // load actor + targetActor, found, err := ctx.vm.State.GetActor(ctx.vm.context, targetIDAddr) + if err != nil { + panic(err) + } + + if !found { + runtime.Abort(exitcode.SysErrInvalidReceiver) + } + + return targetActor, targetIDAddr + } +} + +func (ctx *invocationContext) resolveToKeyAddr(addr address.Address) (address.Address, error) { + if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 { + return addr, nil + } + + act, found, err := ctx.vm.State.GetActor(ctx.vm.context, addr) + if !found || err != nil { + return address.Undef, fmt.Errorf("failed to find actor: %s", addr) + } + + aast, err := account.Load(adt.WrapStore(ctx.vm.context, ctx.vm.store), act) + if err != nil { + return address.Undef, fmt.Errorf("failed to get account actor State for %s: %v", addr, err) + } + + return aast.PubkeyAddress() +} + +// implement runtime.InvocationContext for invocationContext +var _ runtime.InvocationContext = (*invocationContext)(nil) + +// Runtime implements runtime.InvocationContext. +func (ctx *invocationContext) Runtime() runtime.Runtime { + return ctx.vm +} + +// Store implements runtime.Runtime. +func (ctx *invocationContext) Store() rt5.Store { + return NewActorStorage(ctx.vm.context, ctx.gasIpld, ctx.gasTank, ctx.vm.pricelist) +} + +// Message implements runtime.InvocationContext. +func (ctx *invocationContext) Message() rt5.Message { + return ctx.msg +} + +// ValidateCaller implements runtime.InvocationContext. +func (ctx *invocationContext) ValidateCaller(pattern runtime.CallerPattern) { + if ctx.isCallerValidated { + runtime.Abortf(exitcode.SysErrorIllegalActor, "Method must validate caller identity exactly once") + } + if !pattern.IsMatch((*patternContext2)(ctx)) { + runtime.Abortf(exitcode.SysErrForbidden, "Method invoked by incorrect caller") + } + ctx.isCallerValidated = true +} + +// State implements runtime.InvocationContext. +func (ctx *invocationContext) State() rt5.StateHandle { + return ctx.stateHandle +} + +// Send implements runtime.InvocationContext. +func (ctx *invocationContext) Send(toAddr address.Address, methodNum abi.MethodNum, params cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode { + // check if side-effects are allowed + if !ctx.allowSideEffects { + runtime.Abortf(exitcode.SysErrorIllegalActor, "Calling Send() is not allowed during side-effect lock") + } + // prepare + // 1. alias fromActor + from := ctx.msg.To + + // 2. build internal message + newMsg := VmMessage{ + From: from, + To: toAddr, + Value: value, + Method: methodNum, + Params: params, + } + + // 3. build new context + newCtx := newInvocationContext(ctx.vm, ctx.gasIpld, ctx.topLevel, newMsg, ctx.gasTank, ctx.randSource, ctx) + // 4. invoke + ret, code := newCtx.invoke() + if code == 0 { + _ = ctx.gasTank.TryCharge(gasOnActorExec) + if err := out.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { + runtime.Abortf(exitcode.ErrSerialization, "failed To unmarshal return Value: %s", err) + } + } + return code +} + +// / Balance implements runtime.InvocationContext. +func (ctx *invocationContext) Balance() abi.TokenAmount { + toActor, found, err := ctx.vm.State.GetActor(ctx.vm.context, ctx.originMsg.To) + if err != nil { + panic(fmt.Errorf("cannot find to actor %v", err)) + } + if !found { + return abi.NewTokenAmount(0) + } + return toActor.Balance +} + +// implement runtime.InvocationContext for invocationContext +var _ runtime.ExtendedInvocationContext = (*invocationContext)(nil) + +// NextActorAddress predicts the address of the next actor created by this address. +// +// Code is adapted from vm.Runtime#NewActorAddress() +func (ctx *invocationContext) NewActorAddress() address.Address { + buf := new(bytes.Buffer) + origin, err := ctx.resolveToKeyAddr(ctx.topLevel.originatorStableAddress) + if err != nil { + panic(err) + } + + err = origin.MarshalCBOR(buf) + if err != nil { + panic(err) + } + + err = binary.Write(buf, binary.BigEndian, ctx.topLevel.originatorCallSeq) + if err != nil { + panic(err) + } + + err = binary.Write(buf, binary.BigEndian, ctx.topLevel.newActorAddressCount) + if err != nil { + panic(err) + } + + actorAddress, err := address.NewActorAddress(buf.Bytes()) + if err != nil { + panic(err) + } + return actorAddress +} + +// CreateActor implements runtime.ExtendedInvocationContext. +func (ctx *invocationContext) CreateActor(codeID cid.Cid, addr address.Address) { + if addr == address.Undef && ctx.vm.NetworkVersion() >= network.Version7 { + runtime.Abortf(exitcode.SysErrorIllegalArgument, "CreateActor with Undef address") + } + + vmlog.Debugf("creating actor, friendly-name: %s, code: %s, addr: %s\n", builtin.ActorNameByCode(codeID), codeID, addr) + + // Check existing address. If nothing there, create empty actor. + // Note: we are storing the actors by ActorID *address* + _, found, err := ctx.vm.State.GetActor(ctx.vm.context, addr) + if err != nil { + panic(err) + } + if found { + runtime.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") + } + + newActor := &types.Actor{ + // make this the right 'type' of actor + Code: codeID, + Balance: abi.NewTokenAmount(0), + Head: EmptyObjectCid, + Nonce: 0, + } + if err := ctx.vm.State.SetActor(ctx.vm.context, addr, newActor); err != nil { + panic(err) + } + + _ = ctx.gasTank.TryCharge(gasOnActorExec) +} + +// DeleteActor implements runtime.ExtendedInvocationContext. +func (ctx *invocationContext) DeleteActor(beneficiary address.Address) { + receiver := ctx.originMsg.To + ctx.gasTank.Charge(ctx.vm.pricelist.OnDeleteActor(), "DeleteActor %s", receiver) + receiverActor, found, err := ctx.vm.State.GetActor(ctx.vm.context, receiver) + if err != nil { + if errors.Is(err, types.ErrActorNotFound) { + runtime.Abortf(exitcode.SysErrorIllegalActor, "failed to load actor in delete actor: %s", err) + } + panic(aerrors.Fatalf("failed to get actor: %s", err)) + } + + if !found { + runtime.Abortf(exitcode.SysErrorIllegalActor, "delete non-existent actor %v", receiverActor) + } + + if !receiverActor.Balance.IsZero() { + // TODO: Should be safe to drop the version-check, + // since only the paych actor called this pre-version 7, but let's leave it for now + if ctx.vm.NetworkVersion() >= network.Version7 { + beneficiaryID, found := ctx.vm.normalizeAddress(beneficiary) + if !found { + runtime.Abortf(exitcode.SysErrorIllegalArgument, "beneficiary doesn't exist") + } + + if beneficiaryID == receiver { + runtime.Abortf(exitcode.SysErrorIllegalArgument, "benefactor cannot be beneficiary") + } + } + + // Transfer the executing actor's balance to the beneficiary + ctx.vm.transfer(receiver, beneficiary, receiverActor.Balance, ctx.vm.NetworkVersion()) + } + + if err := ctx.vm.State.DeleteActor(ctx.vm.context, receiver); err != nil { + panic(aerrors.Fatalf("failed to delete actor: %s", err)) + } + + _ = ctx.gasTank.TryCharge(gasOnActorExec) +} + +func (ctx *invocationContext) stateView() SyscallsStateView { + // The stateView tree's root is not committed until the end of a tipset, so we can't use the external stateView view + // type for this implementation. + // Maybe we could re-work it To use a root HAMT node rather than root CID. + return newSyscallsStateView(ctx, ctx.vm) +} + +// patternContext implements the PatternContext +type patternContext2 invocationContext + +var _ runtime.PatternContext = (*patternContext2)(nil) + +func (ctx *patternContext2) CallerCode() cid.Cid { + toActor, found, err := ctx.vm.State.GetActor(ctx.vm.context, ctx.originMsg.From) + if err != nil || !found { + panic(fmt.Errorf("cannt find to actor %v", err)) + } + return toActor.Code +} + +func (ctx *patternContext2) CallerAddr() address.Address { + return ctx.msg.From +} diff --git a/internal/pkg/vm/internal/pattern/pattern.go b/pkg/vm/vmcontext/pattern.go similarity index 79% rename from internal/pkg/vm/internal/pattern/pattern.go rename to pkg/vm/vmcontext/pattern.go index 9016c572fe..27063d533d 100644 --- a/internal/pkg/vm/internal/pattern/pattern.go +++ b/pkg/vm/vmcontext/pattern.go @@ -1,10 +1,11 @@ -package pattern +package vmcontext import ( "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime" - "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/vm/runtime" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" ) // IsAccountActor pattern checks if the caller is an account actor. @@ -13,17 +14,17 @@ type IsAccountActor struct{} // IsMatch returns "True" if the patterns matches func (IsAccountActor) IsMatch(ctx runtime.PatternContext) bool { - return builtin.AccountActorCodeID.Equals(ctx.CallerCode()) + return builtin.IsAccountActor(ctx.CallerCode()) } // IsAInitActor pattern checks if the caller is the init actor. // Dragons: delete after switching to new actors -type IsAInitActor struct{} +// type IsAInitActor struct{} // IsMatch returns "True" if the patterns matches -func (IsAInitActor) IsMatch(ctx runtime.PatternContext) bool { - return builtin.InitActorCodeID.Equals(ctx.CallerCode()) -} +//func (IsAInitActor) IsMatch(ctx runtime.PatternContext) bool { +// return builtin.IsInitActor(ctx.CallerCode()) +//} // Any patterns always passses. type Any struct{} diff --git a/pkg/vm/vmcontext/runtime_adapter.go b/pkg/vm/vmcontext/runtime_adapter.go new file mode 100644 index 0000000000..26c5d3dba1 --- /dev/null +++ b/pkg/vm/vmcontext/runtime_adapter.go @@ -0,0 +1,334 @@ +package vmcontext + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + cbor2 "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/rt" + rt0 "github.com/filecoin-project/specs-actors/actors/runtime" + rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt3 "github.com/filecoin-project/specs-actors/v3/actors/runtime" + rt4 "github.com/filecoin-project/specs-actors/v4/actors/runtime" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + rt6 "github.com/filecoin-project/specs-actors/v6/actors/runtime" + rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vm/runtime" + "github.com/filecoin-project/venus/venus-shared/actors/aerrors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var EmptyObjectCid cid.Cid + +func init() { + cst := cbor2.NewMemCborStore() + emptyobject, err := cst.Put(context.TODO(), []struct{}{}) + if err != nil { + panic(err) + } + + EmptyObjectCid = emptyobject +} + +var actorLog = logging.Logger("vm.actors") + +var ( + _ rt0.Runtime = (*runtimeAdapter)(nil) + _ rt2.Runtime = (*runtimeAdapter)(nil) + _ rt3.Runtime = (*runtimeAdapter)(nil) + _ rt4.Runtime = (*runtimeAdapter)(nil) + _ rt5.Runtime = (*runtimeAdapter)(nil) + _ rt6.Runtime = (*runtimeAdapter)(nil) + _ rt7.Runtime = (*runtimeAdapter)(nil) +) + +type runtimeAdapter struct { + ctx *invocationContext + syscalls +} + +func newRuntimeAdapter(ctx *invocationContext) *runtimeAdapter { + return &runtimeAdapter{ctx: ctx, syscalls: syscalls{ + impl: ctx.vm.vmOption.SysCallsImpl, + vm: ctx.vm, + gasBlockStore: ctx.gasIpld, + vmMsg: ctx.msg, + gasTank: ctx.gasTank, + pricelist: ctx.vm.pricelist, + stateView: ctx.stateView(), + }} +} + +func (a *runtimeAdapter) Caller() address.Address { + /* if a.ctx.Message().Caller().Protocol() != address.ID { + panic("runtime message has a non-ID caller") + }*/ + return a.ctx.Message().Caller() +} + +func (a *runtimeAdapter) Receiver() address.Address { + //todo refer lotus imple + /* if a.ctx.Message().Receiver() != address.Undef && a.ctx.Message().Receiver().Protocol() != address.ID { + panic("runtime message has a non-ID receiver") + }*/ + return a.ctx.Message().Receiver() +} + +func (a *runtimeAdapter) ValueReceived() abi.TokenAmount { + return a.ctx.Message().ValueReceived() +} + +func (a *runtimeAdapter) StateCreate(obj cbor.Marshaler) { + c := a.StorePut(obj) + err := a.stateCommit(EmptyObjectCid, c) + if err != nil { + panic(fmt.Errorf("failed To commit stateView after creating object: %w", err)) + } +} + +func (a *runtimeAdapter) stateCommit(oldh, newh cid.Cid) error { + // TODO: we can make this more efficient in the future... + act, found, err := a.ctx.vm.State.GetActor(a.Context(), a.Receiver()) + if !found || err != nil { + return fmt.Errorf("failed To get actor To commit stateView, %s", err) + } + + if act.Head != oldh { + return fmt.Errorf("failed To update, inconsistent base reference, %s", err) + } + + act.Head = newh + if err := a.ctx.vm.State.SetActor(a.Context(), a.Receiver(), act); err != nil { + return fmt.Errorf("failed To set actor in commit stateView, %s", err) + } + + return nil +} + +func (a *runtimeAdapter) StateReadonly(obj cbor.Unmarshaler) { + act, found, err := a.ctx.vm.State.GetActor(a.Context(), a.Receiver()) + if !found || err != nil { + a.Abortf(exitcode.SysErrorIllegalArgument, "failed To get actor for Readonly stateView: %s", err) + } + a.StoreGet(act.Head, obj) +} + +func (a *runtimeAdapter) StateTransaction(obj cbor.Er, f func()) { + if obj == nil { + a.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil To Transaction()") + } + + act, found, err := a.ctx.vm.State.GetActor(a.Context(), a.Receiver()) + if !found || err != nil { + a.Abortf(exitcode.SysErrorIllegalActor, "failed To get actor for Transaction: %s", err) + } + baseState := act.Head + a.StoreGet(baseState, obj) + + a.ctx.allowSideEffects = false + f() + a.ctx.allowSideEffects = true + + c := a.StorePut(obj) + + err = a.stateCommit(baseState, c) + if err != nil { + panic(fmt.Errorf("failed To commit stateView after transaction: %w", err)) + } +} + +func (a *runtimeAdapter) StoreGet(c cid.Cid, o cbor.Unmarshaler) bool { + return a.ctx.Store().StoreGet(c, o) +} + +func (a *runtimeAdapter) StorePut(x cbor.Marshaler) cid.Cid { + return a.ctx.Store().StorePut(x) +} + +func (a *runtimeAdapter) NetworkVersion() network.Version { + return a.stateView.GetNetworkVersion(a.Context(), a.CurrEpoch()) +} + +func (a *runtimeAdapter) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { + opt := a.ctx.vm.vmOption + res, err := opt.Rnd.ChainGetRandomnessFromBeacon(a.Context(), personalization, randEpoch, entropy) + if err != nil { + panic(aerrors.Fatalf("could not get beacon randomness: %s", err)) + } + return res +} + +func (a *runtimeAdapter) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { + opt := a.ctx.vm.vmOption + res, err := opt.Rnd.ChainGetRandomnessFromTickets(a.Context(), personalization, randEpoch, entropy) + if err != nil { + panic(aerrors.Fatalf("could not get ticket randomness: %s", err)) + } + return res +} + +func (a *runtimeAdapter) Send(toAddr address.Address, methodNum abi.MethodNum, params cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode { + return a.ctx.Send(toAddr, methodNum, params, value, out) +} + +func (a *runtimeAdapter) ChargeGas(name string, compute int64, virtual int64) { + a.gasTank.Charge(gas.NewGasCharge(name, compute, 0).WithVirtual(virtual, 0), "runtimeAdapter charge gas") +} + +func (a *runtimeAdapter) Log(level rt.LogLevel, msg string, args ...interface{}) { + switch level { + case rt.DEBUG: + actorLog.Debugf(msg, args...) + case rt.INFO: + actorLog.Infof(msg, args...) + case rt.WARN: + actorLog.Warnf(msg, args...) + case rt.ERROR: + actorLog.Errorf(msg, args...) + } +} + +// Message implements Runtime. +func (a *runtimeAdapter) Message() rt5.Message { + return a.ctx.Message() +} + +// CurrEpoch implements Runtime. +func (a *runtimeAdapter) CurrEpoch() abi.ChainEpoch { + return a.ctx.Runtime().CurrentEpoch() +} + +// ImmediateCaller implements Runtime. +func (a *runtimeAdapter) ImmediateCaller() address.Address { + return a.ctx.Message().Caller() +} + +// ValidateImmediateCallerAcceptAny implements Runtime. +func (a *runtimeAdapter) ValidateImmediateCallerAcceptAny() { + a.ctx.ValidateCaller(Any{}) +} + +// ValidateImmediateCallerIs implements Runtime. +func (a *runtimeAdapter) ValidateImmediateCallerIs(addrs ...address.Address) { + a.ctx.ValidateCaller(AddressIn{Addresses: addrs}) +} + +// ValidateImmediateCallerType implements Runtime. +func (a *runtimeAdapter) ValidateImmediateCallerType(codes ...cid.Cid) { + a.ctx.ValidateCaller(CodeIn{Codes: codes}) +} + +// CurrentBalance implements Runtime. +func (a *runtimeAdapter) CurrentBalance() abi.TokenAmount { + return a.ctx.Balance() +} + +// ResolveAddress implements Runtime. +func (a *runtimeAdapter) ResolveAddress(addr address.Address) (address.Address, bool) { + return a.ctx.vm.normalizeAddress(addr) +} + +// GetActorCodeCID implements Runtime. +func (a *runtimeAdapter) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) { + entry, found, err := a.ctx.vm.State.GetActor(a.Context(), addr) + if !found { + return cid.Undef, false + } + if err != nil { + panic(err) + } + return entry.Code, true +} + +// Abortf implements Runtime. +func (a *runtimeAdapter) Abortf(errExitCode exitcode.ExitCode, msg string, args ...interface{}) { + runtime.Abortf(errExitCode, msg, args...) +} + +// NewActorAddress implements Runtime. +func (a *runtimeAdapter) NewActorAddress() address.Address { + return a.ctx.NewActorAddress() +} + +// CreateActor implements Runtime. +func (a *runtimeAdapter) CreateActor(codeID cid.Cid, addr address.Address) { + if !builtin.IsBuiltinActor(codeID) { + runtime.Abortf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.") + } + + vmlog.Debugf("creating actor, friendly-name: %s, code: %s, addr: %s\n", builtin.ActorNameByCode(codeID), codeID, addr) + + // Check existing address. If nothing there, create empty actor. + // + // Note: we are storing the actors by ActorID *address* + _, found, err := a.ctx.vm.State.GetActor(a.ctx.vm.context, addr) + if err != nil { + panic(err) + } + if found { + runtime.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") + } + + // Charge gas now that easy checks are done + + a.ctx.gasTank.Charge(a.ctx.vm.pricelist.OnCreateActor(), "CreateActor code %s, address %s", codeID, addr) + + newActor := &types.Actor{ + // make this the right 'type' of actor + Code: codeID, + Balance: abi.NewTokenAmount(0), + Head: EmptyObjectCid, + Nonce: 0, + } + if err := a.ctx.vm.State.SetActor(a.ctx.vm.context, addr, newActor); err != nil { + panic(err) + } + + _ = a.ctx.gasTank.TryCharge(gasOnActorExec) +} + +// DeleteActor implements Runtime. +func (a *runtimeAdapter) DeleteActor(beneficiary address.Address) { + a.ctx.DeleteActor(beneficiary) +} + +func (a *runtimeAdapter) TotalFilCircSupply() abi.TokenAmount { + circSupply, err := a.stateView.TotalFilCircSupply(a.CurrEpoch(), a.ctx.vm.State) + if err != nil { + runtime.Abortf(exitcode.ErrIllegalState, "failed To get total circ supply: %s", err) + } + return circSupply +} + +// Context implements Runtime. +// Dragons: this can disappear once we have the storage abstraction +func (a *runtimeAdapter) Context() context.Context { + return a.ctx.vm.context +} + +var nullTraceSpan = func() {} + +// StartSpan implements Runtime. +func (a *runtimeAdapter) StartSpan(name string) func() { + // Dragons: leeave empty for now, add TODO To add this into gfc + return nullTraceSpan +} + +func (a *runtimeAdapter) AbortStateMsg(msg string) { + runtime.Abortf(101, msg) +} + +func (a *runtimeAdapter) BaseFee() abi.TokenAmount { + return a.ctx.vm.vmOption.BaseFee +} diff --git a/pkg/vm/vmcontext/syscalls.go b/pkg/vm/vmcontext/syscalls.go new file mode 100644 index 0000000000..4e1831fd2d --- /dev/null +++ b/pkg/vm/vmcontext/syscalls.go @@ -0,0 +1,147 @@ +package vmcontext + +import ( + "context" + goruntime "runtime" + "sync" + + cbornode "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/ipfs/go-cid" + + rt0 "github.com/filecoin-project/specs-actors/actors/runtime" + rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt3 "github.com/filecoin-project/specs-actors/v3/actors/runtime" + rt4 "github.com/filecoin-project/specs-actors/v4/actors/runtime" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + rt6 "github.com/filecoin-project/specs-actors/v6/actors/runtime" + rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + + "github.com/filecoin-project/venus/pkg/crypto" + vmState "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" +) + +type SyscallsStateView interface { + ResolveToKeyAddr(ctx context.Context, address address.Address) (address.Address, error) + MinerInfo(ctx context.Context, maddr address.Address, nv network.Version) (*miner.MinerInfo, error) + TotalFilCircSupply(height abi.ChainEpoch, st vmState.Tree) (abi.TokenAmount, error) + GetNetworkVersion(ctx context.Context, ce abi.ChainEpoch) network.Version +} + +// Syscall implementation interface. +// These methods take the chain epoch and other context that is implicit in the runtime as explicit parameters. +type SyscallsImpl interface { + VerifySignature(ctx context.Context, view SyscallsStateView, signature crypto.Signature, signer address.Address, plaintext []byte) error + HashBlake2b(data []byte) [32]byte + ComputeUnsealedSectorCID(ctx context.Context, proof7 abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) + VerifySeal(ctx context.Context, info proof7.SealVerifyInfo) error + BatchVerifySeals(ctx context.Context, vis map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) + VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) error + VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error + VerifyPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) error + VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, curEpoch abi.ChainEpoch, msg VmMessage, gasIpld cbornode.IpldStore, view SyscallsStateView, getter LookbackStateGetter) (*rt7.ConsensusFault, error) +} + +type syscalls struct { + impl SyscallsImpl + vm *LegacyVM + gasBlockStore cbornode.IpldStore + vmMsg VmMessage + gasTank *gas.GasTracker + pricelist gas.Pricelist + stateView SyscallsStateView +} + +var ( + _ rt0.Syscalls = (*syscalls)(nil) + _ rt2.Syscalls = (*syscalls)(nil) + _ rt3.Syscalls = (*syscalls)(nil) + _ rt4.Syscalls = (*syscalls)(nil) + _ rt5.Syscalls = (*syscalls)(nil) + _ rt6.Syscalls = (*syscalls)(nil) + _ rt7.Syscalls = (*syscalls)(nil) +) + +func (sys syscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { + charge, err := sys.pricelist.OnVerifySignature(signature.Type, len(plaintext)) + if err != nil { + return err + } + sys.gasTank.Charge(charge, "VerifySignature") + return sys.impl.VerifySignature(sys.vm.context, sys.stateView, signature, signer, plaintext) +} + +func (sys syscalls) HashBlake2b(data []byte) [32]byte { + sys.gasTank.Charge(sys.pricelist.OnHashing(len(data)), "HashBlake2b") + return sys.impl.HashBlake2b(data) +} + +func (sys syscalls) ComputeUnsealedSectorCID(proof abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { + sys.gasTank.Charge(sys.pricelist.OnComputeUnsealedSectorCid(proof, pieces), "ComputeUnsealedSectorCID") + return sys.impl.ComputeUnsealedSectorCID(sys.vm.context, proof, pieces) +} + +func (sys syscalls) VerifySeal(info proof7.SealVerifyInfo) error { + sys.gasTank.Charge(sys.pricelist.OnVerifySeal(info), "VerifySeal") + return sys.impl.VerifySeal(sys.vm.context, info) +} + +func (sys syscalls) VerifyPoSt(info proof7.WindowPoStVerifyInfo) error { + sys.gasTank.Charge(sys.pricelist.OnVerifyPost(info), "VerifyWindowPoSt") + return sys.impl.VerifyPoSt(sys.vm.context, info) +} + +func (sys syscalls) VerifyConsensusFault(h1, h2, extra []byte) (*rt7.ConsensusFault, error) { + sys.gasTank.Charge(sys.pricelist.OnVerifyConsensusFault(), "VerifyConsensusFault") + return sys.impl.VerifyConsensusFault(sys.vm.context, h1, h2, extra, sys.vm.currentEpoch, sys.vmMsg, sys.gasBlockStore, sys.stateView, sys.vm.vmOption.LookbackStateGetter) +} + +var BatchSealVerifyParallelism = 2 * goruntime.NumCPU() + +func (sys syscalls) BatchVerifySeals(vis map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) { + out := make(map[address.Address][]bool) + + sema := make(chan struct{}, BatchSealVerifyParallelism) + vmlog.Info("BatchVerifySeals miners:", len(vis)) + var wg sync.WaitGroup + for addr, seals := range vis { + results := make([]bool, len(seals)) + out[addr] = results + + for i, s := range seals { + wg.Add(1) + go func(ma address.Address, ix int, svi proof7.SealVerifyInfo, res []bool) { + defer wg.Done() + sema <- struct{}{} + + if err := sys.VerifySeal(svi); err != nil { + vmlog.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err) + res[ix] = false + } else { + res[ix] = true + } + + <-sema + }(addr, i, s, results) + } + } + wg.Wait() + vmlog.Info("BatchVerifySeals Result miners:", len(out)) + return out, nil +} + +func (sys *syscalls) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) error { + sys.gasTank.Charge(sys.pricelist.OnVerifyAggregateSeals(aggregate), "VerifyAggregateSeals") + return sys.impl.VerifyAggregateSeals(aggregate) +} + +func (sys *syscalls) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + sys.gasTank.Charge(sys.pricelist.OnVerifyReplicaUpdate(update), "OnVerifyReplicaUpdate") + return sys.impl.VerifyReplicaUpdate(update) +} diff --git a/pkg/vm/vmcontext/syscallsStateView.go b/pkg/vm/vmcontext/syscallsStateView.go new file mode 100644 index 0000000000..17b86735d3 --- /dev/null +++ b/pkg/vm/vmcontext/syscallsStateView.go @@ -0,0 +1,80 @@ +package vmcontext + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/account" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/pkg/errors" +) + +// implement syscalls stateView view +type syscallsStateView struct { + ctx *invocationContext + *LegacyVM +} + +func newSyscallsStateView(ctx *invocationContext, VM *LegacyVM) *syscallsStateView { + return &syscallsStateView{ctx: ctx, LegacyVM: VM} +} + +// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`. +func (vm *syscallsStateView) ResolveToKeyAddr(ctx context.Context, accountAddr address.Address) (address.Address, error) { + // Short-circuit when given a pubkey address. + if accountAddr.Protocol() == address.SECP256K1 || accountAddr.Protocol() == address.BLS { + return accountAddr, nil + } + accountActor, found, err := vm.State.GetActor(vm.context, accountAddr) + if err != nil { + return address.Undef, errors.Wrapf(err, "signer resolution failed To find actor %s", accountAddr) + } + if !found { + return address.Undef, fmt.Errorf("signer resolution found no such actor %s", accountAddr) + } + accountState, err := account.Load(adt.WrapStore(vm.context, vm.ctx.gasIpld), accountActor) + if err != nil { + // This error is internal, shouldn't propagate as on-chain failure + panic(fmt.Errorf("signer resolution failed To lost stateView for %s ", accountAddr)) + } + + return accountState.PubkeyAddress() +} + +// MinerInfo get miner info +func (vm *syscallsStateView) MinerInfo(ctx context.Context, maddr address.Address, nv network.Version) (*miner.MinerInfo, error) { + accountActor, found, err := vm.State.GetActor(vm.context, maddr) + if err != nil { + return nil, errors.Wrapf(err, "miner resolution failed To find actor %s", maddr) + } + if !found { + return nil, fmt.Errorf("miner resolution found no such actor %s", maddr) + } + + accountState, err := miner.Load(adt.WrapStore(vm.context, vm.ctx.gasIpld), accountActor) + if err != nil { + panic(fmt.Errorf("signer resolution failed To lost stateView for %s ", maddr)) + } + + minerInfo, err := accountState.Info() + if err != nil { + panic(fmt.Errorf("failed To get miner info %s ", maddr)) + } + + return &minerInfo, nil +} + +// GetNetworkVersion get network version +func (vm *syscallsStateView) GetNetworkVersion(ctx context.Context, ce abi.ChainEpoch) network.Version { + return vm.vmOption.NetworkVersion +} + +// GetNetworkVersion get network version +func (vm *syscallsStateView) TotalFilCircSupply(height abi.ChainEpoch, st tree.Tree) (abi.TokenAmount, error) { + return vm.GetCircSupply(context.TODO()) +} diff --git a/pkg/vm/vmcontext/testing_syscalls.go b/pkg/vm/vmcontext/testing_syscalls.go new file mode 100644 index 0000000000..ad6955b0fe --- /dev/null +++ b/pkg/vm/vmcontext/testing_syscalls.go @@ -0,0 +1,51 @@ +package vmcontext + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/ipfs/go-cid" + "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/venus/pkg/crypto" +) + +type FakeSyscalls struct{} + +func (f FakeSyscalls) VerifySignature(ctx context.Context, view SyscallsStateView, signature crypto.Signature, signer address.Address, plaintext []byte) error { + // The signer is assumed To be already resolved To a pubkey address. + return crypto.Verify(&signature, signer, plaintext) +} + +func (f FakeSyscalls) HashBlake2b(data []byte) [32]byte { + return blake2b.Sum256(data) +} + +func (f FakeSyscalls) ComputeUnsealedSectorCID(context.Context, abi.RegisteredSealProof, []abi.PieceInfo) (cid.Cid, error) { + panic("implement me") +} + +func (f FakeSyscalls) VerifySeal(ctx context.Context, info proof7.SealVerifyInfo) error { + panic("implement me") +} + +func (f FakeSyscalls) BatchVerifySeals(ctx context.Context, vis map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) { + panic("implement me") +} + +func (f FakeSyscalls) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) error { + panic("implement me") +} + +func (f FakeSyscalls) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) error { + panic("implement me") +} + +func (f FakeSyscalls) VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, view SyscallsStateView) (*rt7.ConsensusFault, error) { + panic("implement me") +} diff --git a/pkg/vm/vmcontext/teststoreage.go b/pkg/vm/vmcontext/teststoreage.go new file mode 100644 index 0000000000..e78767d094 --- /dev/null +++ b/pkg/vm/vmcontext/teststoreage.go @@ -0,0 +1,48 @@ +package vmcontext + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/cbor" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + "github.com/filecoin-project/venus/pkg/constants" + "github.com/ipfs/go-cid" + cborUtil "github.com/ipfs/go-ipld-cbor" +) + +// TestStorage is a fake storage used for testing. +type TestStorage struct { + state interface{} +} + +// NewTestStorage returns a new "TestStorage" +func NewTestStorage(state interface{}) *TestStorage { + return &TestStorage{ + state: state, + } +} + +var _ rt5.Store = (*TestStorage)(nil) + +// Put implements runtime.Store. +func (ts *TestStorage) StorePut(v cbor.Marshaler) cid.Cid { + ts.state = v + buf := new(bytes.Buffer) + err := v.MarshalCBOR(buf) + if err == nil { + return cid.NewCidV1(cid.Raw, buf.Bytes()) + } + panic("failed to encode") +} + +// Get implements runtime.Store. +func (ts *TestStorage) StoreGet(cid cid.Cid, obj cbor.Unmarshaler) bool { + node, err := cborUtil.WrapObject(ts.state, constants.DefaultHashFunction, -1) + if err != nil { + return false + } + + err = obj.UnmarshalCBOR(bytes.NewReader(node.RawData())) + + return err == nil +} diff --git a/pkg/vm/vmcontext/types.go b/pkg/vm/vmcontext/types.go new file mode 100644 index 0000000000..4798de18c2 --- /dev/null +++ b/pkg/vm/vmcontext/types.go @@ -0,0 +1,89 @@ +package vmcontext + +import ( + "context" + "time" + + acrypto "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/venus/pkg/state" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm/dispatch" + "github.com/filecoin-project/venus/pkg/vm/gas" +) + +type ( + ExecCallBack func(cid.Cid, *types.Message, *Ret) error + CircSupplyCalculator func(context.Context, abi.ChainEpoch, tree.Tree) (abi.TokenAmount, error) + LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.View, error) +) + +type VmOption struct { //nolint + CircSupplyCalculator CircSupplyCalculator + LookbackStateGetter LookbackStateGetter + NetworkVersion network.Version + Rnd HeadChainRandomness + BaseFee abi.TokenAmount + Fork fork.IFork + ActorCodeLoader *dispatch.CodeLoader + Epoch abi.ChainEpoch + GasPriceSchedule *gas.PricesSchedule + PRoot cid.Cid + Bsstore blockstoreutil.Blockstore + SysCallsImpl SyscallsImpl + Tracing bool +} + +// ChainRandomness define randomness method in filecoin +type ILookBack interface { + StateView(ctx context.Context, ts *types.TipSet) (*state.View, error) + GetLookbackTipSetForRound(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch, version network.Version) (*types.TipSet, cid.Cid, error) +} + +func LookbackStateGetterForTipset(ctx context.Context, backer ILookBack, fork fork.IFork, ts *types.TipSet) LookbackStateGetter { + return func(ctx context.Context, round abi.ChainEpoch) (*state.View, error) { + ver := fork.GetNetworkVersion(ctx, round) + ts, _, err := backer.GetLookbackTipSetForRound(ctx, ts, round, ver) + if err != nil { + return nil, err + } + return backer.StateView(ctx, ts) + } +} + +// ChainRandomness define randomness method in filecoin +type HeadChainRandomness interface { + ChainGetRandomnessFromBeacon(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) + ChainGetRandomnessFromTickets(ctx context.Context, personalization acrypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) +} + +type Ret struct { + GasTracker *gas.GasTracker + OutPuts gas.GasOutputs + Receipt types.MessageReceipt + ActorErr error + Duration time.Duration +} + +// Failure returns with a non-zero exit code. +func Failure(exitCode exitcode.ExitCode, gasAmount int64) types.MessageReceipt { + return types.MessageReceipt{ + ExitCode: exitCode, + Return: []byte{}, + GasUsed: gasAmount, + } +} + +type Interface interface { + ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*Ret, error) + ApplyImplicitMessage(ctx context.Context, msg types.ChainMsg) (*Ret, error) + Flush(ctx context.Context) (cid.Cid, error) +} diff --git a/pkg/vm/vmcontext/vmcontext.go b/pkg/vm/vmcontext/vmcontext.go new file mode 100644 index 0000000000..7bf07caca5 --- /dev/null +++ b/pkg/vm/vmcontext/vmcontext.go @@ -0,0 +1,743 @@ +package vmcontext + +import ( + "context" + "fmt" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/pkg/constants" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/pkg/errors" + + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm/dispatch" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vm/runtime" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + initActor "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/reward" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" +) + +const MaxCallDepth = 4096 + +var vmlog = logging.Logger("vm.context") + +// LegacyVM holds the stateView and executes messages over the stateView. +type LegacyVM struct { + context context.Context + actorImpls ActorImplLookup + bsstore *blockstoreutil.BufferedBS + store cbor.IpldStore + + currentEpoch abi.ChainEpoch + pricelist gas.Pricelist + + debugger *VMDebugMsg // nolint + vmOption VmOption + + baseCircSupply abi.TokenAmount + + State tree.Tree +} + +func (vm *LegacyVM) ApplyImplicitMessage(ctx context.Context, msg types.ChainMsg) (*Ret, error) { + unsignedMsg := msg.VMMessage() + + imsg := VmMessage{ + From: unsignedMsg.From, + To: unsignedMsg.To, + Value: unsignedMsg.Value, + Method: unsignedMsg.Method, + Params: unsignedMsg.Params, + } + + return vm.applyImplicitMessage(imsg) +} + +// ActorImplLookup provides access To upgradeable actor code. +type ActorImplLookup interface { + GetActorImpl(code cid.Cid, rt runtime.Runtime) (dispatch.Dispatcher, *dispatch.ExcuteError) +} + +// implement VMInterpreter for LegacyVM +var _ VMInterpreter = (*LegacyVM)(nil) + +var _ Interface = (*LegacyVM)(nil) + +// NewLegacyVM creates a new runtime for executing messages. +// Dragons: change To take a root and the store, build the tree internally +func NewLegacyVM(ctx context.Context, actorImpls ActorImplLookup, vmOption VmOption) (*LegacyVM, error) { + buf := blockstoreutil.NewBufferedBstore(vmOption.Bsstore) + cst := cbor.NewCborStore(buf) + var st tree.Tree + var err error + if vmOption.PRoot == cid.Undef { + // just for chain gen + st, err = tree.NewState(cst, tree.StateTreeVersion1) + if err != nil { + panic(fmt.Errorf("create state error, should never come here")) + } + } else { + st, err = tree.LoadState(context.Background(), cst, vmOption.PRoot) + if err != nil { + return nil, err + } + } + + baseCirc, err := vmOption.CircSupplyCalculator(ctx, vmOption.Epoch, st) + if err != nil { + return nil, err + } + + return &LegacyVM{ + context: ctx, + actorImpls: actorImpls, + bsstore: buf, + store: cst, + State: st, + vmOption: vmOption, + baseCircSupply: baseCirc, + pricelist: vmOption.GasPriceSchedule.PricelistByEpoch(vmOption.Epoch), + currentEpoch: vmOption.Epoch, + }, nil +} + +// nolint +func (vm *LegacyVM) setDebugger() { + vm.debugger = NewVMDebugMsg() +} + +// ApplyGenesisMessage forces the execution of a message in the vm actor. +// +// This Method is intended To be used in the generation of the genesis block only. +func (vm *LegacyVM) ApplyGenesisMessage(from address.Address, to address.Address, method abi.MethodNum, value abi.TokenAmount, params interface{}) (*Ret, error) { + // normalize From addr + var ok bool + if from, ok = vm.normalizeAddress(from); !ok { + runtime.Abort(exitcode.SysErrSenderInvalid) + } + + // build internal message + imsg := VmMessage{ + From: from, + To: to, + Value: value, + Method: method, + Params: params, + } + + ret, err := vm.applyImplicitMessage(imsg) + if err != nil { + return ret, err + } + + // commit + if _, err := vm.Flush(vm.context); err != nil { + return nil, err + } + + return ret, nil +} + +// ContextStore provides access To specs-actors adt library. +// +// This type of store is used To access some internal actor stateView. +func (vm *LegacyVM) ContextStore() adt.Store { + return adt.WrapStore(vm.context, vm.store) +} + +func (vm *LegacyVM) normalizeAddress(addr address.Address) (address.Address, bool) { + // short-circuit if the address is already an ID address + if addr.Protocol() == address.ID { + return addr, true + } + + // resolve the target address via the InitActor, and attempt To load stateView. + initActorEntry, found, err := vm.State.GetActor(vm.context, initActor.Address) + if err != nil { + panic(errors.Wrapf(err, "failed To load init actor")) + } + if !found { + panic(errors.Wrapf(err, "no init actor")) + } + + // get a view into the actor stateView + initActorState, err := initActor.Load(adt.WrapStore(vm.context, vm.store), initActorEntry) + if err != nil { + panic(err) + } + + idAddr, found, err := initActorState.ResolveAddress(addr) + if !found { + return address.Undef, false + } + if err != nil { + panic(err) + } + return idAddr, true +} + +// applyImplicitMessage applies messages automatically generated by the vm itself. +// +// This messages do not consume client gas and must not fail. +func (vm *LegacyVM) applyImplicitMessage(imsg VmMessage) (*Ret, error) { + // implicit messages gas is tracked separatly and not paid by the miner + gasTank := gas.NewGasTracker(constants.BlockGasLimit * 10000) + + // the execution of the implicit messages is simpler than full external/actor-actor messages + // execution: + // 1. load From actor + // 2. build new context + // 3. invoke message + + // 1. load From actor + fromActor, found, err := vm.State.GetActor(vm.context, imsg.From) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("implicit message `From` field actor not found, addr: %s", imsg.From) + } + // 2. build context + topLevel := topLevelContext{ + originatorStableAddress: imsg.From, + originatorCallSeq: fromActor.Nonce, // Implied Nonce is that of the actor before incrementing. + newActorAddressCount: 0, + } + + gasBsstore := &GasChargeBlockStore{ + inner: vm.bsstore, + pricelist: vm.pricelist, + gasTank: gasTank, + } + cst := cbor.NewCborStore(gasBsstore) + ctx := newInvocationContext(vm, cst, &topLevel, imsg, gasTank, vm.vmOption.Rnd, nil) + + // 3. invoke message + ret, code := ctx.invoke() + if code.IsError() { + return nil, fmt.Errorf("invalid exit code %d during implicit message execution: From %s, To %s, Method %d, Value %s, Params %v", + code, imsg.From, imsg.To, imsg.Method, imsg.Value, imsg.Params) + } + return &Ret{ + GasTracker: gasTank, + OutPuts: gas.GasOutputs{}, + Receipt: types.MessageReceipt{ + ExitCode: code, + Return: ret, + GasUsed: 0, + }, + }, nil +} + +// Get the buffered blockstore associated with the LegacyVM. This includes any temporary blocks produced +// during this LegacyVM's execution. +func (vm *LegacyVM) ActorStore(ctx context.Context) adt.Store { + return adt.WrapStore(ctx, vm.store) +} + +// todo estimate gasLimit +func (vm *LegacyVM) ApplyMessage(ctx context.Context, msg types.ChainMsg) (*Ret, error) { + return vm.applyMessage(msg.VMMessage(), msg.ChainLength()) +} + +// applyMessage applies the message To the current stateView. +func (vm *LegacyVM) applyMessage(msg *types.Message, onChainMsgSize int) (*Ret, error) { + // This Method does not actually execute the message itself, + // but rather deals with the pre/post processing of a message. + // (see: `invocationContext.invoke()` for the dispatch and execution) + // initiate gas tracking + gasTank := gas.NewGasTracker(msg.GasLimit) + // pre-send + // 1. charge for message existence + // 2. load sender actor + // 3. check message seq number + // 4. check sender gas fee is enough + // 5. increment message seq number + // 6. withheld maximum gas From _sender_ + // 7. snapshot stateView + + // 1. charge for bytes used in chain + msgGasCost := vm.pricelist.OnChainMessage(onChainMsgSize) // todo get price list by height + ok := gasTank.TryCharge(msgGasCost) + if !ok { + gasOutputs := gas.ZeroGasOutputs() + gasOutputs.MinerPenalty = big.Mul(vm.vmOption.BaseFee, big.NewInt(msgGasCost.Total())) + // Invalid message; insufficient gas limit To pay for the on-chain message size. + // Note: the miner needs To pay the full msg cost, not what might have been partially consumed + return &Ret{ + GasTracker: gasTank, + OutPuts: gasOutputs, + Receipt: Failure(exitcode.SysErrOutOfGas, 0), + }, nil + } + + minerPenaltyAmount := big.Mul(vm.vmOption.BaseFee, big.NewInt(msg.GasLimit)) + + // 2. load sender actor and check send whether to be an account + fromActor, found, err := vm.State.GetActor(vm.context, msg.From) + if err != nil { + return nil, err + } + if !found { + // Execution error; sender does not exist at time of message execution. + gasOutputs := gas.ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &Ret{ + GasTracker: gasTank, + OutPuts: gasOutputs, + Receipt: Failure(exitcode.SysErrSenderInvalid, 0), + }, nil + } + + if !builtin.IsAccountActor(fromActor.Code) /*!fromActor.Code.Equals(builtin.AccountActorCodeID)*/ { + // Execution error; sender is not an account. + gasOutputs := gas.ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &Ret{ + GasTracker: gasTank, + OutPuts: gasOutputs, + Receipt: Failure(exitcode.SysErrSenderInvalid, 0), + }, nil + } + + // 3. make sure this is the right message order for fromActor + if msg.Nonce != fromActor.Nonce { + // Execution error; invalid seq number. + gasOutputs := gas.ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &Ret{ + GasTracker: gasTank, + OutPuts: gasOutputs, + Receipt: Failure(exitcode.SysErrSenderStateInvalid, 0), + }, nil + } + + // 4. Check sender gas fee is enough + gasLimitCost := big.Mul(big.NewIntUnsigned(uint64(msg.GasLimit)), msg.GasFeeCap) + if fromActor.Balance.LessThan(gasLimitCost) { + // Execution error; sender does not have sufficient funds To pay for the gas limit. + gasOutputs := gas.ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &Ret{ + GasTracker: gasTank, + OutPuts: gasOutputs, + Receipt: Failure(exitcode.SysErrSenderStateInvalid, 0), + }, nil + } + + gasHolder := &types.Actor{Balance: big.NewInt(0)} + if err := vm.transferToGasHolder(msg.From, gasHolder, gasLimitCost); err != nil { + return nil, fmt.Errorf("failed To withdraw gas funds: %w", err) + } + + // 5. increment sender Nonce + if err = vm.State.MutateActor(msg.From, func(msgFromActor *types.Actor) error { + msgFromActor.IncrementSeqNum() + return nil + }); err != nil { + return nil, err + } + + // 7. snapshot stateView + // Even if the message fails, the following accumulated changes will be applied: + // - CallSeqNumber increment + // - sender balance withheld + err = vm.snapshot() + if err != nil { + return nil, err + } + defer vm.clearSnapshot() + + // send + // 1. build internal message + // 2. build invocation context + // 3. process the msg + topLevel := topLevelContext{ + originatorStableAddress: msg.From, + originatorCallSeq: msg.Nonce, + newActorAddressCount: 0, + } + + // 1. build internal msg + imsg := VmMessage{ + From: msg.From, + To: msg.To, + Value: msg.Value, + Method: msg.Method, + Params: msg.Params, + } + + // 2. build invocation context + gasBsstore := &GasChargeBlockStore{ + inner: vm.bsstore, + pricelist: vm.pricelist, + gasTank: gasTank, + } + cst := cbor.NewCborStore(gasBsstore) + // cst.Atlas = vm.store.Atlas // associate the atlas. //todo + + // Note replace from and to address here + ctx := newInvocationContext(vm, cst, &topLevel, imsg, gasTank, vm.vmOption.Rnd, nil) + + // 3. invoke + ret, code := ctx.invoke() + // post-send + // 1. charge gas for putting the return Value on the chain + // 2. settle gas money around (unused_gas -> sender) + // 3. success! + + // 1. charge for the space used by the return Value + // Note: the GasUsed in the message receipt does not + ok = gasTank.TryCharge(vm.pricelist.OnChainReturnValue(len(ret))) + if !ok { + // Insufficient gas remaining To cover the on-chain return Value; proceed as in the case + // of Method execution failure. + code = exitcode.SysErrOutOfGas + ret = []byte{} + } + + // Roll back all stateView if the receipt's exit code is not ok. + // This is required in addition To revert within the invocation context since top level messages can fail for + // more reasons than internal ones. Invocation context still needs its own revert so actors can recover and + // proceed From a nested call failure. + if code != exitcode.Ok { + if err := vm.revert(); err != nil { + return nil, err + } + } + + // 2. settle gas money around (unused_gas -> sender) + gasUsed := gasTank.GasUsed + if gasUsed < 0 { + gasUsed = 0 + } + + burn, err := vm.shouldBurn(vm.context, msg, code) + if err != nil { + return nil, fmt.Errorf("deciding whether should burn failed: %w", err) + } + + gasOutputs := gas.ComputeGasOutputs(gasUsed, msg.GasLimit, vm.vmOption.BaseFee, msg.GasFeeCap, msg.GasPremium, burn) + + if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, gasOutputs.BaseFeeBurn); err != nil { + return nil, fmt.Errorf("failed To burn base fee: %w", err) + } + + if err := vm.transferFromGasHolder(reward.Address, gasHolder, gasOutputs.MinerTip); err != nil { + return nil, fmt.Errorf("failed To give miner gas reward: %w", err) + } + + if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, gasOutputs.OverEstimationBurn); err != nil { + return nil, fmt.Errorf("failed To burn overestimation fee: %w", err) + } + + // refund unused gas + if err := vm.transferFromGasHolder(msg.From, gasHolder, gasOutputs.Refund); err != nil { + return nil, fmt.Errorf("failed To refund gas: %w", err) + } + + if big.Cmp(big.NewInt(0), gasHolder.Balance) != 0 { + return nil, fmt.Errorf("gas handling math is wrong") + } + + // 3. Success! + return &Ret{ + GasTracker: gasTank, + OutPuts: gasOutputs, + Receipt: types.MessageReceipt{ + ExitCode: code, + Return: ret, + GasUsed: gasUsed, + }, + }, nil +} + +func (vm *LegacyVM) shouldBurn(ctx context.Context, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { + if vm.NetworkVersion() <= network.Version12 { + // Check to see if we should burn funds. We avoid burning on successful + // window post. This won't catch _indirect_ window post calls, but this + // is the best we can get for now. + if vm.currentEpoch > vm.vmOption.Fork.GetForkUpgrade().UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == builtintypes.MethodsMiner.SubmitWindowedPoSt { + // Ok, we've checked the _method_, but we still need to check + // the target actor. It would be nice if we could just look at + // the trace, but I'm not sure if that's safe? + if toActor, _, err := vm.State.GetActor(vm.context, msg.To); err != nil { + // If the actor wasn't found, we probably deleted it or something. Move on. + if !errors.Is(err, types.ErrActorNotFound) { + // Otherwise, this should never fail and something is very wrong. + return false, fmt.Errorf("failed to lookup target actor: %w", err) + } + } else if builtin.IsStorageMinerActor(toActor.Code) { + // Ok, this is a storage miner and we've processed a window post. Remove the burn. + return false, nil + } + } + + return true, nil + } + + // Any "don't burn" rules from Network v13 onwards go here, for now we always return true + return true, nil +} + +// transfer debits money From one account and credits it To another. +// avoid calling this Method with a zero amount else it will perform unnecessary actor loading. +// +// WARNING: this Method will panic if the the amount is negative, accounts dont exist, or have inssuficient funds. +// +// Note: this is not idiomatic, it follows the Spec expectations for this Method. +func (vm *LegacyVM) transfer(from address.Address, to address.Address, amount abi.TokenAmount, networkVersion network.Version) { + var fromActor *types.Actor + var fromID, toID address.Address + var err error + var found bool + // switching the order around so that transactions for more than the balance sent to self fail + if networkVersion >= network.Version15 { + if amount.LessThan(big.Zero()) { + runtime.Abortf(exitcode.SysErrForbidden, "attempt To transfer negative Value %s From %s To %s", amount, from, to) + } + + fromID, err = vm.State.LookupID(from) + if err != nil { + panic(fmt.Errorf("transfer failed when resolving sender address: %s", err)) + } + + // retrieve sender account + fromActor, found, err = vm.State.GetActor(vm.context, fromID) + if err != nil { + panic(err) + } + if !found { + panic(fmt.Errorf("unreachable: sender account not found. %s", err)) + } + + // check that account has enough balance for transfer + if fromActor.Balance.LessThan(amount) { + runtime.Abortf(exitcode.SysErrInsufficientFunds, "sender %s insufficient balance %s To transfer %s To %s", amount, fromActor.Balance, from, to) + } + + if from == to { + vmlog.Infow("sending to same address: noop", "from/to addr", from) + return + } + + toID, err = vm.State.LookupID(to) + if err != nil { + panic(fmt.Errorf("transfer failed when resolving receiver address: %s", err)) + } + + if fromID == toID { + vmlog.Infow("sending to same actor ID: noop", "from/to actor", fromID) + return + } + } else { + if from == to { + return + } + + fromID, err = vm.State.LookupID(from) + if err != nil { + panic(fmt.Errorf("transfer failed when resolving sender address: %s", err)) + } + + toID, err = vm.State.LookupID(to) + if err != nil { + panic(fmt.Errorf("transfer failed when resolving receiver address: %s", err)) + } + + if fromID == toID { + return + } + + if amount.LessThan(types.NewInt(0)) { + runtime.Abortf(exitcode.SysErrForbidden, "attempt To transfer negative Value %s From %s To %s", amount, from, to) + } + + // retrieve sender account + fromActor, found, err = vm.State.GetActor(vm.context, fromID) + if err != nil { + panic(err) + } + if !found { + panic(fmt.Errorf("unreachable: sender account not found. %s", err)) + } + } + + // retrieve receiver account + toActor, found, err := vm.State.GetActor(vm.context, toID) + if err != nil { + panic(err) + } + if !found { + panic(fmt.Errorf("unreachable: credit account not found. %s", err)) + } + + // check that account has enough balance for transfer + if fromActor.Balance.LessThan(amount) { + runtime.Abortf(exitcode.SysErrInsufficientFunds, "sender %s insufficient balance %s To transfer %s To %s", amount, fromActor.Balance, from, to) + } + + // deduct funds + fromActor.Balance = big.Sub(fromActor.Balance, amount) + if err := vm.State.SetActor(vm.context, from, fromActor); err != nil { + panic(err) + } + + // deposit funds + toActor.Balance = big.Add(toActor.Balance, amount) + if err := vm.State.SetActor(vm.context, to, toActor); err != nil { + panic(err) + } +} + +func (vm *LegacyVM) getActorImpl(code cid.Cid, runtime2 runtime.Runtime) dispatch.Dispatcher { + actorImpl, err := vm.actorImpls.GetActorImpl(code, runtime2) + if err != nil { + runtime.Abort(exitcode.SysErrInvalidReceiver) + } + return actorImpl +} + +// +// implement runtime.Runtime for LegacyVM +// + +var _ runtime.Runtime = (*LegacyVM)(nil) + +// CurrentEpoch implements runtime.Runtime. +func (vm *LegacyVM) CurrentEpoch() abi.ChainEpoch { + return vm.currentEpoch +} + +func (vm *LegacyVM) NetworkVersion() network.Version { + return vm.vmOption.NetworkVersion +} + +func (vm *LegacyVM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt abi.TokenAmount) error { + if amt.LessThan(big.NewInt(0)) { + return fmt.Errorf("attempted To transfer negative Value To gas holder") + } + return vm.State.MutateActor(addr, func(a *types.Actor) error { + if err := deductFunds(a, amt); err != nil { + return err + } + depositFunds(gasHolder, amt) + return nil + }) +} + +func (vm *LegacyVM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt abi.TokenAmount) error { + if amt.LessThan(big.NewInt(0)) { + return fmt.Errorf("attempted To transfer negative Value From gas holder") + } + + if amt.Equals(big.NewInt(0)) { + return nil + } + + return vm.State.MutateActor(addr, func(a *types.Actor) error { + if err := deductFunds(gasHolder, amt); err != nil { + return err + } + depositFunds(a, amt) + return nil + }) +} + +func (vm *LegacyVM) StateTree() tree.Tree { + return vm.State +} + +func (vm *LegacyVM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { + // Before v15, this was recalculated on each invocation as the state tree was mutated + if vm.vmOption.NetworkVersion <= network.Version14 { + return vm.vmOption.CircSupplyCalculator(ctx, vm.currentEpoch, vm.State) + } + + return vm.baseCircSupply, nil +} + +func deductFunds(act *types.Actor, amt abi.TokenAmount) error { + if act.Balance.LessThan(amt) { + return fmt.Errorf("not enough funds") + } + + act.Balance = big.Sub(act.Balance, amt) + return nil +} + +func depositFunds(act *types.Actor, amt abi.TokenAmount) { + act.Balance = big.Add(act.Balance, amt) +} + +// +// implement runtime.MessageInfo for VmMessage +// + +var _ rt5.Message = (*VmMessage)(nil) + +type VmMessage struct { //nolint + From address.Address + To address.Address + Value abi.TokenAmount + Method abi.MethodNum + Params interface{} +} + +// ValueReceived implements runtime.MessageInfo. +func (msg VmMessage) ValueReceived() abi.TokenAmount { + return msg.Value +} + +// Caller implements runtime.MessageInfo. +func (msg VmMessage) Caller() address.Address { + return msg.From +} + +// Receiver implements runtime.MessageInfo. +func (msg VmMessage) Receiver() address.Address { + return msg.To +} + +func (vm *LegacyVM) revert() error { + return vm.State.Revert() +} + +func (vm *LegacyVM) snapshot() error { + err := vm.State.Snapshot(vm.context) + if err != nil { + return err + } + return nil +} + +func (vm *LegacyVM) clearSnapshot() { + vm.State.ClearSnapshot() +} + +// nolint +func (vm *LegacyVM) Flush(ctx context.Context) (tree.Root, error) { + // Flush all blocks out of the store + if root, err := vm.State.Flush(vm.context); err != nil { + return cid.Undef, err + } else { + if err := blockstoreutil.CopyBlockstore(context.TODO(), vm.bsstore.Write(), vm.bsstore.Read()); err != nil { + return cid.Undef, fmt.Errorf("copying tree: %w", err) + } + return root, nil + } +} diff --git a/pkg/vm/vmcontext/vmdebugger.go b/pkg/vm/vmcontext/vmdebugger.go new file mode 100644 index 0000000000..58d1e3b16d --- /dev/null +++ b/pkg/vm/vmcontext/vmdebugger.go @@ -0,0 +1,36 @@ +package vmcontext + +import ( + "fmt" + "os" + "strings" +) + +// VMDebugMsg for vm debug +type VMDebugMsg struct { + buf *strings.Builder +} + +func NewVMDebugMsg() *VMDebugMsg { + return &VMDebugMsg{buf: &strings.Builder{}} +} + +func (debug *VMDebugMsg) Printfln(msg string, args ...interface{}) { + debug.buf.WriteString(fmt.Sprintf(msg, args...)) + debug.buf.WriteString("\n") +} + +func (debug *VMDebugMsg) Println(args ...interface{}) { + debug.buf.WriteString(fmt.Sprint(args...)) + debug.buf.WriteString("\n") +} + +// WriteToTerminal write debug message to terminal +func (debug *VMDebugMsg) WriteToTerminal() { + fmt.Println(debug.buf.String()) +} + +// WriteToFile write debug message to file +func (debug *VMDebugMsg) WriteToFile(fileName string) error { + return os.WriteFile(fileName, []byte(debug.buf.String()), 0o777) +} diff --git a/pkg/vmsupport/syscalls.go b/pkg/vmsupport/syscalls.go new file mode 100644 index 0000000000..a6bc8cbfc8 --- /dev/null +++ b/pkg/vmsupport/syscalls.go @@ -0,0 +1,165 @@ +package vmsupport + +import ( + "context" + "errors" + "fmt" + goruntime "runtime" + "sync" + + "github.com/ipfs/go-cid" + cbornode "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" +) + +var log = logging.Logger("vmsupport") + +type faultChecker interface { + VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, curEpoch abi.ChainEpoch, msg vm.VmMessage, gasIpld cbornode.IpldStore, view vm.SyscallsStateView, getter vmcontext.LookbackStateGetter) (*vmr.ConsensusFault, error) +} + +// Syscalls contains the concrete implementation of LegacyVM system calls, including connection to +// proof verification and blockchain inspection. +// Errors returned by these methods are intended to be returned to the actor code to respond to: they must be +// entirely deterministic and repeatable by other implementations. +// Any non-deterministic error will instead trigger a panic. +// TODO: determine a more robust mechanism for distinguishing transient runtime failures from deterministic errors +// in LegacyVM and supporting code. https://github.com/filecoin-project/venus/issues/3844 +type Syscalls struct { + faultChecker faultChecker + verifier ffiwrapper.Verifier +} + +func NewSyscalls(faultChecker faultChecker, verifier ffiwrapper.Verifier) *Syscalls { + return &Syscalls{ + faultChecker: faultChecker, + verifier: verifier, + } +} + +func (s *Syscalls) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + ok, err := s.verifier.VerifyReplicaUpdate(update) + if err != nil { + return fmt.Errorf("failed to verify replica update: %w", err) + } + + if !ok { + return fmt.Errorf("invalid replica update") + } + + return nil +} + +// VerifySignature Verifies that a signature is valid for an address and plaintext. +func (s *Syscalls) VerifySignature(ctx context.Context, view vm.SyscallsStateView, signature crypto.Signature, signer address.Address, plaintext []byte) error { + return state.NewSignatureValidator(view).ValidateSignature(ctx, plaintext, signer, signature) +} + +// HashBlake2b Hashes input data using blake2b with 256 bit output. +func (s *Syscalls) HashBlake2b(data []byte) [32]byte { + return blake2b.Sum256(data) +} + +// ComputeUnsealedSectorCID Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. +func (s *Syscalls) ComputeUnsealedSectorCID(_ context.Context, proof abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { + return impl.GenerateUnsealedCID(proof, pieces) +} + +// VerifySeal returns true if the sealing operation from which its inputs were +// derived was valid, and false if not. +func (s *Syscalls) VerifySeal(_ context.Context, info proof7.SealVerifyInfo) error { + ok, err := s.verifier.VerifySeal(info) + if err != nil { + return err + } else if !ok { + return fmt.Errorf("seal invalid") + } + return nil +} + +var BatchSealVerifyParallelism = 2 * goruntime.NumCPU() + +// BatchVerifySeals batch verify windows post +func (s *Syscalls) BatchVerifySeals(ctx context.Context, vis map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) { + out := make(map[address.Address][]bool) + + sema := make(chan struct{}, BatchSealVerifyParallelism) + + var wg sync.WaitGroup + for addr, seals := range vis { + results := make([]bool, len(seals)) + out[addr] = results + + for i, seal := range seals { + wg.Add(1) + go func(ma address.Address, ix int, svi proof7.SealVerifyInfo, res []bool) { + defer wg.Done() + sema <- struct{}{} + + if err := s.VerifySeal(ctx, svi); err != nil { + log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err) + res[ix] = false + } else { + res[ix] = true + } + + <-sema + }(addr, i, seal, results) + } + } + wg.Wait() + + return out, nil +} + +func (s *Syscalls) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) error { + ok, err := s.verifier.VerifyAggregateSeals(aggregate) + if err != nil { + return fmt.Errorf("failed to verify aggregated PoRep: %w", err) + } + if !ok { + return fmt.Errorf("invalid aggregate proof") + } + + return nil +} + +// VerifyPoSt verify windows post +func (s *Syscalls) VerifyPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) error { + ok, err := s.verifier.VerifyWindowPoSt(ctx, info) + if err != nil { + return err + } + if !ok { + return errors.New("window PoSt verification failed") + } + return nil +} + +// Verifies that two block headers provide proof of a consensus fault: +// - both headers mined by the same actor +// - headers are different +// - first header is of the same or lower epoch as the second +// - at least one of the headers appears in the current chain at or after epoch `earliest` +// - the headers provide evidence of a fault (see the spec for the different fault types). +// The parameters are all serialized block headers. The third "extra" parameter is consulted only for +// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the +// blocks in the parent of h2 (i.e. h2's grandparent). +// Returns nil and an error if the headers don't prove a fault. +func (s *Syscalls) VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte, curEpoch abi.ChainEpoch, msg vm.VmMessage, gasIpld cbornode.IpldStore, view vm.SyscallsStateView, getter vmcontext.LookbackStateGetter) (*vmr.ConsensusFault, error) { + return s.faultChecker.VerifyConsensusFault(ctx, h1, h2, extra, curEpoch, msg, gasIpld, view, getter) +} diff --git a/pkg/vmsupport/util.go b/pkg/vmsupport/util.go new file mode 100644 index 0000000000..7002e414a8 --- /dev/null +++ b/pkg/vmsupport/util.go @@ -0,0 +1,19 @@ +package vmsupport + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + cbornode "github.com/ipfs/go-ipld-cbor" + + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" +) + +type NilFaultChecker struct{} + +func (n *NilFaultChecker) VerifyConsensusFault(_ context.Context, _, _, _ []byte, _ abi.ChainEpoch, _ vm.VmMessage, _ cbornode.IpldStore, _ vm.SyscallsStateView, _ vmcontext.LookbackStateGetter) (*rt5.ConsensusFault, error) { + return nil, fmt.Errorf("empty chain cannot have consensus fault") +} diff --git a/pkg/wallet/backend.go b/pkg/wallet/backend.go new file mode 100644 index 0000000000..2f0ea756ca --- /dev/null +++ b/pkg/wallet/backend.go @@ -0,0 +1,43 @@ +package wallet + +import ( + "context" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/venus/pkg/crypto" +) + +// Backend is the interface to represent different storage backends +// that can contain many addresses. +type Backend interface { + // Addresses returns a list of all accounts currently stored in this backend. + Addresses(ctx context.Context) []address.Address + + // Contains returns true if this backend stores the passed in address. + HasAddress(context.Context, address.Address) bool + + DeleteAddress(context.Context, address.Address) error + + // Sign cryptographically signs data with the private key associated with an address. + SignBytes(context.Context, []byte, address.Address) (*crypto.Signature, error) + + // GetKeyInfo will return the keyinfo associated with address `addr` + // iff backend contains the addr. + GetKeyInfo(context.Context, address.Address) (*crypto.KeyInfo, error) + + GetKeyInfoPassphrase(context.Context, address.Address, []byte) (*crypto.KeyInfo, error) + + LockWallet(context.Context) error + UnLockWallet(context.Context, []byte) error + WalletState(context.Context) int +} + +// Importer is a specialization of a wallet backend that can import +// new keys into its permanent storage. Disk backed wallets can do this, +// hardware wallets generally cannot. +type Importer interface { + // ImportKey imports the key described by the given keyinfo + // into the backend + ImportKey(context.Context, *crypto.KeyInfo) error +} diff --git a/pkg/wallet/dsbackend.go b/pkg/wallet/dsbackend.go new file mode 100644 index 0000000000..0e638f2918 --- /dev/null +++ b/pkg/wallet/dsbackend.go @@ -0,0 +1,379 @@ +package wallet + +import ( + "context" + "crypto/rand" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/awnumar/memguard" + "github.com/filecoin-project/go-address" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + "github.com/pborman/uuid" + "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/repo" +) + +const ( + undetermined = iota + + Lock + Unlock +) + +var ( + ErrInvalidPassword = errors.New("password matching failed") + ErrRepeatPassword = errors.New("set password more than once") +) + +// DSBackendType is the reflect type of the DSBackend. +var DSBackendType = reflect.TypeOf(&DSBackend{}) + +// DSBackend is a wallet backend implementation for storing addresses in a datastore. +type DSBackend struct { + lk sync.RWMutex + + // TODO: use a better interface that supports time locks, encryption, etc. + ds repo.Datastore + + cache map[address.Address]struct{} + + PassphraseConf config.PassphraseConfig + + password *memguard.Enclave + unLocked map[address.Address]*crypto.KeyInfo + + state int +} + +var _ Backend = (*DSBackend)(nil) + +// NewDSBackend constructs a new backend using the passed in datastore. +func NewDSBackend(ctx context.Context, ds repo.Datastore, passphraseCfg config.PassphraseConfig, password []byte) (*DSBackend, error) { + result, err := ds.Query(ctx, dsq.Query{ + KeysOnly: true, + }) + if err != nil { + return nil, errors.Wrap(err, "failed to query datastore") + } + + list, err := result.Rest() + if err != nil { + return nil, errors.Wrap(err, "failed to read query results") + } + + addrCache := make(map[address.Address]struct{}, len(list)) + for _, el := range list { + parsedAddr, err := address.NewFromString(strings.Trim(el.Key, "/")) + if err != nil { + return nil, errors.Wrapf(err, "trying to restore invalid address: %s", el.Key) + } + addrCache[parsedAddr] = struct{}{} + } + + backend := &DSBackend{ + ds: ds, + cache: addrCache, + PassphraseConf: passphraseCfg, + unLocked: make(map[address.Address]*crypto.KeyInfo, len(addrCache)), + } + + if len(password) != 0 { + if err := backend.SetPassword(ctx, password); err != nil { + return nil, err + } + } + + return backend, nil +} + +// ImportKey loads the address in `ai` and KeyInfo `ki` into the backend +func (backend *DSBackend) ImportKey(ctx context.Context, ki *crypto.KeyInfo) error { + return backend.putKeyInfo(ctx, ki) +} + +// Addresses returns a list of all addresses that are stored in this backend. +func (backend *DSBackend) Addresses(ctx context.Context) []address.Address { + backend.lk.RLock() + defer backend.lk.RUnlock() + + var cpy []address.Address + for addr := range backend.cache { + cpy = append(cpy, addr) + } + return cpy +} + +// HasAddress checks if the passed in address is stored in this backend. +// Safe for concurrent access. +func (backend *DSBackend) HasAddress(ctx context.Context, addr address.Address) bool { + backend.lk.RLock() + defer backend.lk.RUnlock() + + _, ok := backend.cache[addr] + return ok +} + +// NewAddress creates a new address and stores it. +// Safe for concurrent access. +func (backend *DSBackend) NewAddress(ctx context.Context, protocol address.Protocol) (address.Address, error) { + backend.lk.Lock() + defer backend.lk.Unlock() + + switch protocol { + case address.BLS: + return backend.newBLSAddress(ctx) + case address.SECP256K1: + return backend.newSecpAddress(ctx) + default: + return address.Undef, errors.Errorf("Unknown address protocol %d", protocol) + } +} + +func (backend *DSBackend) newSecpAddress(ctx context.Context) (address.Address, error) { + ki, err := crypto.NewSecpKeyFromSeed(rand.Reader) + if err != nil { + return address.Undef, err + } + + if err := backend.putKeyInfo(ctx, &ki); err != nil { + return address.Undef, err + } + return ki.Address() +} + +func (backend *DSBackend) newBLSAddress(ctx context.Context) (address.Address, error) { + ki, err := crypto.NewBLSKeyFromSeed(rand.Reader) + if err != nil { + return address.Undef, err + } + + if err := backend.putKeyInfo(ctx, &ki); err != nil { + return address.Undef, err + } + return ki.Address() +} + +func (backend *DSBackend) putKeyInfo(ctx context.Context, ki *crypto.KeyInfo) error { + addr, err := ki.Address() + if err != nil { + return err + } + + key := &Key{ + ID: uuid.NewRandom(), + Address: addr, + KeyInfo: ki, + } + + var keyJSON []byte + err = backend.UsePassword(func(password []byte) error { + var err error + keyJSON, err = encryptKey(key, password, backend.PassphraseConf.ScryptN, backend.PassphraseConf.ScryptP) + return err + }) + if err != nil { + return err + } + + if err := backend.ds.Put(ctx, ds.NewKey(key.Address.String()), keyJSON); err != nil { + return errors.Wrapf(err, "failed to store new address: %s", key.Address.String()) + } + backend.cache[addr] = struct{}{} + backend.unLocked[addr] = ki + return nil +} + +func (backend *DSBackend) DeleteAddress(ctx context.Context, addr address.Address) error { + backend.lk.RLock() + defer backend.lk.RUnlock() + + if _, ok := backend.cache[addr]; ok { + err := backend.ds.Delete(ctx, ds.NewKey(addr.String())) + if err != nil { + return err + } + delete(backend.cache, addr) + return nil + } + + return errors.New("backend does not contain address") +} + +// SignBytes cryptographically signs `data` using the private key `priv`. +func (backend *DSBackend) SignBytes(ctx context.Context, data []byte, addr address.Address) (*crypto.Signature, error) { + backend.lk.Lock() + ki, found := backend.unLocked[addr] + backend.lk.Unlock() + if !found { + return nil, errors.Errorf("%s is locked", addr.String()) + } + + var signature *crypto.Signature + err := ki.UsePrivateKey(func(privateKey []byte) error { + var err error + signature, err = crypto.Sign(data, privateKey, ki.SigType) + return err + }) + return signature, err +} + +// GetKeyInfo will return the private & public keys associated with address `addr` +// iff backend contains the addr. +func (backend *DSBackend) GetKeyInfo(ctx context.Context, addr address.Address) (*crypto.KeyInfo, error) { + if !backend.HasAddress(ctx, addr) { + return nil, errors.New("backend does not contain address") + } + + var key *Key + err := backend.UsePassword(func(password []byte) error { + var err error + key, err = backend.getKey(ctx, addr, password) + + return err + }) + if err != nil { + return nil, err + } + + return key.KeyInfo, nil +} + +// GetKeyInfoPassphrase get private private key from wallet, get encrypt byte from db and decrypto it with password +func (backend *DSBackend) GetKeyInfoPassphrase(ctx context.Context, addr address.Address, password []byte) (*crypto.KeyInfo, error) { + if !backend.HasAddress(ctx, addr) { + return nil, errors.New("backend does not contain address") + } + + key, err := backend.getKey(ctx, addr, password) + if err != nil { + return nil, err + } + + return key.KeyInfo, nil +} + +func (backend *DSBackend) getKey(ctx context.Context, addr address.Address, password []byte) (*Key, error) { + b, err := backend.ds.Get(ctx, ds.NewKey(addr.String())) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch private key from backend") + } + + return decryptKey(b, password) +} + +func (backend *DSBackend) LockWallet(ctx context.Context) error { + if backend.state == Lock { + return fmt.Errorf("already locked") + } + + if len(backend.Addresses(ctx)) == 0 { + return fmt.Errorf("no address need lock") + } + + for _, addr := range backend.Addresses(ctx) { + backend.lk.Lock() + delete(backend.unLocked, addr) + backend.lk.Unlock() + } + backend.cleanPassword() + backend.state = Lock + + return nil +} + +// UnLockWallet unlock wallet with password, decrypt local key in db and save to protected memory +func (backend *DSBackend) UnLockWallet(ctx context.Context, password []byte) error { + defer func() { + for i := range password { + password[i] = 0 + } + }() + if backend.state == Unlock { + return fmt.Errorf("already unlocked") + } + + if len(backend.Addresses(ctx)) == 0 { + return fmt.Errorf("no address need unlock") + } + + for _, addr := range backend.Addresses(ctx) { + ki, err := backend.GetKeyInfoPassphrase(ctx, addr, password) + if err != nil { + return err + } + + backend.lk.Lock() + backend.unLocked[addr] = ki + backend.lk.Unlock() + } + backend.state = Unlock + + return nil +} + +// SetPassword set password for wallet , and wallet used this password to encrypt private key +func (backend *DSBackend) SetPassword(ctx context.Context, password []byte) error { + if backend.password != nil { + return ErrRepeatPassword + } + + for _, addr := range backend.Addresses(ctx) { + ki, err := backend.GetKeyInfoPassphrase(ctx, addr, password) + if err != nil { + return err + } + backend.lk.Lock() + backend.unLocked[addr] = ki + backend.lk.Unlock() + } + if backend.state == undetermined { + backend.state = Unlock + } + + backend.setPassword(password) + + return nil +} + +// HasPassword return whether the password has been set in the wallet +func (backend *DSBackend) HasPassword() bool { + return backend.password != nil +} + +// WalletState return wallet state(lock/unlock) +func (backend *DSBackend) WalletState(ctx context.Context) int { + return backend.state +} + +func (backend *DSBackend) setPassword(password []byte) { + backend.lk.Lock() + defer backend.lk.Unlock() + + backend.password = memguard.NewEnclave(password) +} + +func (backend *DSBackend) UsePassword(f func(password []byte) error) error { + if backend.password == nil { + return f([]byte{}) + } + buf, err := backend.password.Open() + if err != nil { + return err + } + defer buf.Destroy() + + return f(buf.Bytes()) +} + +func (backend *DSBackend) cleanPassword() { + backend.lk.Lock() + defer backend.lk.Unlock() + backend.password = nil +} diff --git a/pkg/wallet/dsbackend_test.go b/pkg/wallet/dsbackend_test.go new file mode 100644 index 0000000000..886179fdbe --- /dev/null +++ b/pkg/wallet/dsbackend_test.go @@ -0,0 +1,182 @@ +package wallet + +import ( + "context" + "crypto/rand" + "io" + "sync" + "testing" + + "github.com/filecoin-project/venus/pkg/crypto" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/venus/pkg/config" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestDSBackendSimple(t *testing.T) { + tf.UnitTest(t) + + ds := datastore.NewMapDatastore() + defer func() { + require.NoError(t, ds.Close()) + }() + + ctx := context.Background() + fs, err := NewDSBackend(ctx, ds, config.TestPassphraseConfig(), TestPassword) + assert.NoError(t, err) + + t.Log("empty address list on empty datastore") + assert.Len(t, fs.Addresses(ctx), 0) + + t.Log("can create new address") + addr, err := fs.NewAddress(ctx, address.SECP256K1) + assert.NoError(t, err) + + t.Log("address is stored") + assert.True(t, fs.HasAddress(ctx, addr)) + + t.Log("address is stored in repo, and back when loading fresh in a new backend") + fs2, err := NewDSBackend(ctx, ds, config.TestPassphraseConfig(), []byte("test-password")) + assert.NoError(t, err) + + assert.True(t, fs2.HasAddress(ctx, addr)) + + t.Log("delete the address") + err = fs2.DeleteAddress(ctx, addr) + assert.NoError(t, err) + assert.False(t, fs2.HasAddress(ctx, addr)) +} + +func TestDSBackendKeyPairMatchAddress(t *testing.T) { + tf.UnitTest(t) + + ds := datastore.NewMapDatastore() + defer func() { + require.NoError(t, ds.Close()) + }() + + ctx := context.Background() + fs, err := NewDSBackend(ctx, ds, config.TestPassphraseConfig(), TestPassword) + assert.NoError(t, err) + + t.Log("can create new address") + addr, err := fs.NewAddress(ctx, address.SECP256K1) + assert.NoError(t, err) + + t.Log("address is stored") + assert.True(t, fs.HasAddress(ctx, addr)) + + t.Log("address references to a secret key") + ki, err := fs.GetKeyInfo(ctx, addr) + assert.NoError(t, err) + + dAddr, err := ki.Address() + assert.NoError(t, err) + + t.Log("generated address and stored address should match") + assert.Equal(t, addr, dAddr) +} + +func TestDSBackendErrorsForUnknownAddress(t *testing.T) { + tf.UnitTest(t) + + // create 2 backends + ds1 := datastore.NewMapDatastore() + defer func() { + require.NoError(t, ds1.Close()) + }() + + ctx := context.Background() + fs1, err := NewDSBackend(ctx, ds1, config.TestPassphraseConfig(), TestPassword) + assert.NoError(t, err) + + ds2 := datastore.NewMapDatastore() + defer func() { + require.NoError(t, ds2.Close()) + }() + fs2, err := NewDSBackend(ctx, ds2, config.TestPassphraseConfig(), TestPassword) + assert.NoError(t, err) + + t.Log("can create new address in fs1") + addr, err := fs1.NewAddress(ctx, address.SECP256K1) + assert.NoError(t, err) + + t.Log("address is stored fs1") + assert.True(t, fs1.HasAddress(ctx, addr)) + + t.Log("address is not stored fs2") + assert.False(t, fs2.HasAddress(ctx, addr)) + + t.Log("address references to a secret key in fs1") + _, err = fs1.GetKeyInfo(ctx, addr) + assert.NoError(t, err) + + t.Log("address does not references to a secret key in fs2") + _, err = fs2.GetKeyInfo(ctx, addr) + assert.Error(t, err) + assert.Contains(t, "backend does not contain address", err.Error()) +} + +func TestDSBackendParallel(t *testing.T) { + tf.UnitTest(t) + + ds := datastore.NewMapDatastore() + defer func() { + require.NoError(t, ds.Close()) + }() + + ctx := context.Background() + fs, err := NewDSBackend(ctx, ds, config.TestPassphraseConfig(), TestPassword) + assert.NoError(t, err) + + var wg sync.WaitGroup + count := 10 + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + _, err := fs.NewAddress(ctx, address.SECP256K1) + assert.NoError(t, err) + wg.Done() + }() + } + + wg.Wait() + assert.Len(t, fs.Addresses(ctx), 10) +} + +func BenchmarkDSBackendSimple(b *testing.B) { + ds := datastore.NewMapDatastore() + defer func() { + require.NoError(b, ds.Close()) + }() + + ctx := context.Background() + fs, err := NewDSBackend(ctx, ds, config.TestPassphraseConfig(), TestPassword) + assert.NoError(b, err) + + corruptData := make([]byte, 32) + for i := 0; i < b.N; i++ { + addr, err := fs.NewAddress(ctx, address.SECP256K1) + assert.NoError(b, err) + + data := make([]byte, 32) + _, err = io.ReadFull(rand.Reader, data) + assert.NoError(b, err) + copy(corruptData, data) + + signature, err := fs.SignBytes(ctx, data, addr) + if err != nil { + b.Log(len(signature.Data), signature) + } + assert.NoError(b, err) + + assert.NoError(b, crypto.Verify(signature, addr, corruptData)) + } +} diff --git a/pkg/wallet/key.go b/pkg/wallet/key.go new file mode 100644 index 0000000000..786939774b --- /dev/null +++ b/pkg/wallet/key.go @@ -0,0 +1,99 @@ +package wallet + +import ( + "encoding/hex" + "encoding/json" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/pkg/crypto" + + "github.com/pborman/uuid" +) + +const ( + version = 3 +) + +// Key private key info +type Key struct { + ID uuid.UUID // Version 4 "random" for unique id not derived from key data + // to simplify lookups we also store the address + Address address.Address + KeyInfo *crypto.KeyInfo +} + +type plainKey struct { + Address string `json:"address"` + KeyInfo string `json:"privatekey"` + ID string `json:"id"` + Version int `json:"version"` +} + +type encryptedKey struct { + Address string `json:"address"` + Crypto CryptoJSON `json:"crypto"` + ID string `json:"id"` + Version int `json:"version"` +} + +type CryptoJSON struct { + Cipher string `json:"cipher"` + CipherText string `json:"ciphertext"` + CipherParams cipherParams `json:"cipherparams"` + KDF string `json:"kdf"` + KDFParams map[string]interface{} `json:"kdfparams"` + MAC string `json:"mac"` +} + +type cipherParams struct { + IV string `json:"iv"` +} + +func (k *Key) MarshalJSON() (j []byte, err error) { + kiBytes, err := json.Marshal(k.KeyInfo) + if err != nil { + return nil, err + } + + jStruct := plainKey{ + hex.EncodeToString([]byte(k.Address.String())), + hex.EncodeToString(kiBytes), + k.ID.String(), + version, + } + j, err = json.Marshal(jStruct) + return j, err +} + +func (k *Key) UnmarshalJSON(j []byte) (err error) { + plainKey := new(plainKey) + err = json.Unmarshal(j, &plainKey) + if err != nil { + return err + } + + u := new(uuid.UUID) + *u = uuid.Parse(plainKey.ID) + k.ID = *u + + addr, err := hex.DecodeString(plainKey.Address) + if err != nil { + return err + } + k.Address, err = address.NewFromString(string(addr)) + if err != nil { + return err + } + + k.KeyInfo = new(crypto.KeyInfo) + kiBytes, err := hex.DecodeString(plainKey.KeyInfo) + if err != nil { + return err + } + err = json.Unmarshal(kiBytes, k.KeyInfo) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/wallet/msgtypes.go b/pkg/wallet/msgtypes.go new file mode 100644 index 0000000000..4b5ee8e54c --- /dev/null +++ b/pkg/wallet/msgtypes.go @@ -0,0 +1,31 @@ +// Code from github.com/filecoin-project/venus-wallet/core/msgtypes.go. DO NOT EDIT. + +package wallet + +const ( + MTUnknown = MsgType("unknown") + + // Signing message CID. MsgMeta.Extra contains raw cbor message bytes + MTChainMsg = MsgType("message") + + // Signing a blockheader. signing raw cbor block bytes (MsgMeta.Extra is empty) + MTBlock = MsgType("block") + + // Signing a deal proposal. signing raw cbor proposal bytes (MsgMeta.Extra is empty) + MTDealProposal = MsgType("dealproposal") + // extra is nil, 'toSign' is cbor raw bytes of 'DrawRandomParams' + // following types follow above rule + MTDrawRandomParam = MsgType("drawrandomparam") + MTSignedVoucher = MsgType("signedvoucher") + MTStorageAsk = MsgType("storageask") + MTAskResponse = MsgType("askresponse") + MTNetWorkResponse = MsgType("networkresposne") + + // reference : storagemarket/impl/remotecli.go:330 + // sign storagemarket.ClientDeal.ProposalCid, + // MsgMeta.Extra is nil, 'toSign' is market.ClientDealProposal + // storagemarket.ClientDeal.ProposalCid equals cborutil.AsIpld(market.ClientDealProposal).Cid() + MTClientDeal = MsgType("clientdeal") + + MTProviderDealState = MsgType("providerdealstate") +) diff --git a/pkg/wallet/passphrase.go b/pkg/wallet/passphrase.go new file mode 100644 index 0000000000..502d2badc1 --- /dev/null +++ b/pkg/wallet/passphrase.go @@ -0,0 +1,220 @@ +package wallet + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + + "golang.org/x/crypto/pbkdf2" + "golang.org/x/crypto/scrypt" + "golang.org/x/crypto/sha3" +) + +const ( + keyHeaderKDF = "scrypt" + + scryptR = 8 + scryptDKLen = 32 +) + +var ErrDecrypt = errors.New("could not decrypt key with given password") + +// EncryptKey encrypts a key using the specified scrypt parameters into a json +// blob that can be decrypted later on. +func encryptKey(key *Key, password []byte, scryptN, scryptP int) ([]byte, error) { + keyBytes, err := json.Marshal(key) + if err != nil { + return nil, err + } + cryptoStruct, err := encryptData(keyBytes, password, scryptN, scryptP) + if err != nil { + return nil, err + } + encryptedKey := encryptedKey{ + hex.EncodeToString([]byte(key.Address.String())), + cryptoStruct, + key.ID.String(), + version, + } + return json.Marshal(encryptedKey) +} + +// encryptData encrypts the data given as 'data' with the password 'venusauth'. +func encryptData(data, password []byte, scryptN, scryptP int) (CryptoJSON, error) { + salt := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + return CryptoJSON{}, fmt.Errorf("reading from crypto/rand failed: " + err.Error()) + } + derivedKey, err := scrypt.Key(password, salt, scryptN, scryptR, scryptP, scryptDKLen) + if err != nil { + return CryptoJSON{}, err + } + encryptKey := derivedKey[:16] + + iv := make([]byte, aes.BlockSize) // 16 + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return CryptoJSON{}, fmt.Errorf("reading from crypto/rand failed: " + err.Error()) + } + cipherText, err := aesCTRXOR(encryptKey, data, iv) + if err != nil { + return CryptoJSON{}, err + } + mac := keccak256(derivedKey[16:32], cipherText) + + scryptParamsJSON := make(map[string]interface{}, 5) + scryptParamsJSON["n"] = scryptN + scryptParamsJSON["r"] = scryptR + scryptParamsJSON["p"] = scryptP + scryptParamsJSON["dklen"] = scryptDKLen + scryptParamsJSON["salt"] = hex.EncodeToString(salt) + cipherParams := cipherParams{ + IV: hex.EncodeToString(iv), + } + + cryptoStruct := CryptoJSON{ + Cipher: "aes-128-ctr", + CipherText: hex.EncodeToString(cipherText), + CipherParams: cipherParams, + KDF: keyHeaderKDF, + KDFParams: scryptParamsJSON, + MAC: hex.EncodeToString(mac), + } + return cryptoStruct, nil +} + +func aesCTRXOR(key, inText, iv []byte) ([]byte, error) { + // AES-128 is selected due to size of encryptKey. + aesBlock, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + stream := cipher.NewCTR(aesBlock, iv) + outText := make([]byte, len(inText)) + stream.XORKeyStream(outText, inText) + return outText, err +} + +// decryptKey decrypts a key from a json blob, returning the Key. +func decryptKey(keyjson, password []byte) (*Key, error) { + var ( + keyBytes []byte + err error + ) + + k := new(encryptedKey) + if err := json.Unmarshal(keyjson, k); err != nil { + return nil, err + } + if k.Version != version { + return nil, fmt.Errorf("version not supported: %v", k.Version) + } + + keyBytes, err = decryptData(k.Crypto, password) + if err != nil { + return nil, err + } + + key := &Key{} + if err = json.Unmarshal(keyBytes, key); err != nil { + return nil, err + } + return key, nil +} + +func decryptData(cryptoJSON CryptoJSON, password []byte) ([]byte, error) { + if cryptoJSON.Cipher != "aes-128-ctr" { + return nil, fmt.Errorf("cipher not supported: %v", cryptoJSON.Cipher) + } + mac, err := hex.DecodeString(cryptoJSON.MAC) + if err != nil { + return nil, err + } + + iv, err := hex.DecodeString(cryptoJSON.CipherParams.IV) + if err != nil { + return nil, err + } + + cipherText, err := hex.DecodeString(cryptoJSON.CipherText) + if err != nil { + return nil, err + } + + derivedKey, err := getKDFKey(cryptoJSON, password) + if err != nil { + return nil, err + } + + calculatedMAC := keccak256(derivedKey[16:32], cipherText) + if !bytes.Equal(calculatedMAC, mac) { + return nil, ErrDecrypt + } + + plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv) + if err != nil { + return nil, err + } + + return plainText, err +} + +func getKDFKey(cryptoJSON CryptoJSON, password []byte) ([]byte, error) { + salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string)) + if err != nil { + return nil, err + } + dkLen := ensureInt(cryptoJSON.KDFParams["dklen"]) + + if cryptoJSON.KDF == keyHeaderKDF { + n := ensureInt(cryptoJSON.KDFParams["n"]) + r := ensureInt(cryptoJSON.KDFParams["r"]) + p := ensureInt(cryptoJSON.KDFParams["p"]) + return scrypt.Key(password, salt, n, r, p, dkLen) + + } else if cryptoJSON.KDF == "pbkdf2" { + c := ensureInt(cryptoJSON.KDFParams["c"]) + prf := cryptoJSON.KDFParams["prf"].(string) + if prf != "hmac-sha256" { + return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf) + } + key := pbkdf2.Key(password, salt, c, dkLen, sha256.New) + return key, nil + } + + return nil, fmt.Errorf("unsupported KDF: %s", cryptoJSON.KDF) +} + +// KeccakState wraps sha3.state. In addition to the usual hash methods, it also supports +// Read to get a variable amount of data from the hash state. Read is faster than Sum +// because it doesn't copy the internal state, but also modifies the internal state. +type KeccakState interface { + hash.Hash + Read([]byte) (int, error) +} + +// keccak256 calculates and returns the Keccak256 hash of the input data. +func keccak256(data ...[]byte) []byte { + b := make([]byte, 32) + d := sha3.NewLegacyKeccak256().(KeccakState) + for _, b := range data { + _, _ = d.Write(b) + } + _, _ = d.Read(b) + return b +} + +func ensureInt(x interface{}) int { + res, ok := x.(int) + if !ok { + res = int(x.(float64)) + } + return res +} diff --git a/pkg/wallet/passphrase_test.go b/pkg/wallet/passphrase_test.go new file mode 100644 index 0000000000..48e8dae507 --- /dev/null +++ b/pkg/wallet/passphrase_test.go @@ -0,0 +1,65 @@ +package wallet + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/scrypt" + + "github.com/filecoin-project/venus/pkg/config" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestEncrypKeyAndDecryptKey(t *testing.T) { + tf.UnitTest(t) + + ds := datastore.NewMapDatastore() + defer func() { + require.NoError(t, ds.Close()) + }() + + fs, err := NewDSBackend(context.Background(), ds, config.TestPassphraseConfig(), TestPassword) + assert.NoError(t, err) + + w := New(fs) + ctx := context.Background() + ki, err := w.NewKeyInfo(ctx) + assert.NoError(t, err) + + addr, err := ki.Address() + assert.NoError(t, err) + + key := &Key{ + ID: uuid.NewRandom(), + Address: addr, + KeyInfo: ki, + } + + b, err := encryptKey(key, TestPassword, config.TestPassphraseConfig().ScryptN, config.TestPassphraseConfig().ScryptP) + assert.NoError(t, err) + + key2, err := decryptKey(b, TestPassword) + assert.NoError(t, err) + + assert.Equal(t, key.ID, key2.ID) + assert.Equal(t, key.Address, key2.Address) + assert.Equal(t, key.KeyInfo.Key(), key2.KeyInfo.Key()) +} + +func TestScrypt(t *testing.T) { + t.Skipf("had test this too much, ignore this time!") + for n := uint8(14); n < 24; n++ { + b := testing.Benchmark(func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = scrypt.Key([]byte("password"), []byte("salt"), 1< cbg.MaxLength { + return xerrors.Errorf("Value in field t.Value was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Value))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Value)); err != nil { + return err + } + + // t.Unmarshallable ([]*chaos.UnmarshallableCBOR) (slice) + if len(t.Unmarshallable) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Unmarshallable was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Unmarshallable))); err != nil { + return err + } + for _, v := range t.Unmarshallable { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Value (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Value = string(sval) + } + // t.Unmarshallable ([]*chaos.UnmarshallableCBOR) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Unmarshallable: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Unmarshallable = make([]*UnmarshallableCBOR, extra) + } + + for i := 0; i < int(extra); i++ { + + var v UnmarshallableCBOR + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Unmarshallable[i] = &v + } + + return nil +} + +var lengthBufCallerValidationArgs = []byte{131} + +func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCallerValidationArgs); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Branch (chaos.CallerValidationBranch) (int64) + if t.Branch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Branch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil { + return err + } + } + + // t.Addrs ([]address.Address) (slice) + if len(t.Addrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Addrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Addrs))); err != nil { + return err + } + for _, v := range t.Addrs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.Types ([]cid.Cid) (slice) + if len(t.Types) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Types was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Types))); err != nil { + return err + } + for _, v := range t.Types { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Types: %w", err) + } + } + return nil +} + +func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) error { + *t = CallerValidationArgs{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Branch (chaos.CallerValidationBranch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Branch = CallerValidationBranch(extraI) + } + // t.Addrs ([]address.APIAddress) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Addrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Addrs = make([]address.Address, extra) + } + + for i := 0; i < int(extra); i++ { + + var v address.Address + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Addrs[i] = v + } + + // t.Types ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Types: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Types = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.Types failed: %w", err) + } + t.Types[i] = c + } + + return nil +} + +var lengthBufCreateActorArgs = []byte{132} + +func (t *CreateActorArgs) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCreateActorArgs); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.UndefActorCID (bool) (bool) + if err := cbg.WriteBool(w, t.UndefActorCID); err != nil { + return err + } + + // t.ActorCID (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.ActorCID); err != nil { + return xerrors.Errorf("failed to write cid field t.ActorCID: %w", err) + } + + // t.UndefAddress (bool) (bool) + if err := cbg.WriteBool(w, t.UndefAddress); err != nil { + return err + } + + // t.APIAddress (address.APIAddress) (struct) + if err := t.Address.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *CreateActorArgs) UnmarshalCBOR(r io.Reader) error { + *t = CreateActorArgs{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.UndefActorCID (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.UndefActorCID = false + case 21: + t.UndefActorCID = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.ActorCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ActorCID: %w", err) + } + + t.ActorCID = c + + } + // t.UndefAddress (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.UndefAddress = false + case 21: + t.UndefAddress = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.APIAddress (address.APIAddress) (struct) + + { + + if err := t.Address.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.APIAddress: %w", err) + } + + } + return nil +} + +var lengthBufResolveAddressResponse = []byte{130} + +func (t *ResolveAddressResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufResolveAddressResponse); err != nil { + return err + } + + // t.APIAddress (address.APIAddress) (struct) + if err := t.Address.MarshalCBOR(w); err != nil { + return err + } + + // t.Success (bool) (bool) + if err := cbg.WriteBool(w, t.Success); err != nil { + return err + } + return nil +} + +func (t *ResolveAddressResponse) UnmarshalCBOR(r io.Reader) error { + *t = ResolveAddressResponse{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.APIAddress (address.APIAddress) (struct) + + { + + if err := t.Address.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.APIAddress: %w", err) + } + + } + // t.Success (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Success = false + case 21: + t.Success = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufSendArgs = []byte{132} + +func (t *SendArgs) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSendArgs); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.To (address.APIAddress) (struct) + if err := t.To.MarshalCBOR(w); err != nil { + return err + } + + // t.Value (big.Int) (struct) + if err := t.Value.MarshalCBOR(w); err != nil { + return err + } + + // t.Method (abi.MethodNum) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil { + return err + } + + // t.Params ([]uint8) (slice) + if len(t.Params) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Params was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Params))); err != nil { + return err + } + + if _, err := w.Write(t.Params[:]); err != nil { + return err + } + return nil +} + +func (t *SendArgs) UnmarshalCBOR(r io.Reader) error { + *t = SendArgs{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.To (address.APIAddress) (struct) + + { + + if err := t.To.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.To: %w", err) + } + + } + // t.Value (big.Int) (struct) + + { + + if err := t.Value.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Value: %w", err) + } + + } + // t.Method (abi.MethodNum) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Method = abi.MethodNum(extra) + + } + // t.Params ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Params: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Params[:]); err != nil { + return err + } + return nil +} + +var lengthBufSendReturn = []byte{130} + +func (t *SendReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSendReturn); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Return (builtin.CBORBytes) (slice) + if len(t.Return) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Return was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Return))); err != nil { + return err + } + + if _, err := w.Write(t.Return[:]); err != nil { + return err + } + + // t.Code (exitcode.ExitCode) (int64) + if t.Code >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil { + return err + } + } + return nil +} + +func (t *SendReturn) UnmarshalCBOR(r io.Reader) error { + *t = SendReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Return (builtin.CBORBytes) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Return: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Return = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Return[:]); err != nil { + return err + } + // t.Code (exitcode.ExitCode) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Code = exitcode.ExitCode(extraI) + } + return nil +} + +var lengthBufMutateStateArgs = []byte{130} + +func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMutateStateArgs); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Value (string) (string) + if len(t.Value) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Value was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Value))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Value)); err != nil { + return err + } + + // t.Branch (chaos.MutateStateBranch) (int64) + if t.Branch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Branch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Branch-1)); err != nil { + return err + } + } + return nil +} + +func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) error { + *t = MutateStateArgs{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Value (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Value = string(sval) + } + // t.Branch (chaos.MutateStateBranch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Branch = MutateStateBranch(extraI) + } + return nil +} + +var lengthBufAbortWithArgs = []byte{131} + +func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufAbortWithArgs); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Code (exitcode.ExitCode) (int64) + if t.Code >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil { + return err + } + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.Uncontrolled (bool) (bool) + if err := cbg.WriteBool(w, t.Uncontrolled); err != nil { + return err + } + return nil +} + +func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) error { + *t = AbortWithArgs{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Code (exitcode.ExitCode) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Code = exitcode.ExitCode(extraI) + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Uncontrolled (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Uncontrolled = false + case 21: + t.Uncontrolled = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufInspectRuntimeReturn = []byte{134} + +func (t *InspectRuntimeReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufInspectRuntimeReturn); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Caller (address.APIAddress) (struct) + if err := t.Caller.MarshalCBOR(w); err != nil { + return err + } + + // t.Receiver (address.APIAddress) (struct) + if err := t.Receiver.MarshalCBOR(w); err != nil { + return err + } + + // t.ValueReceived (big.Int) (struct) + if err := t.ValueReceived.MarshalCBOR(w); err != nil { + return err + } + + // t.CurrEpoch (abi.ChainEpoch) (int64) + if t.CurrEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CurrEpoch-1)); err != nil { + return err + } + } + + // t.CurrentBalance (big.Int) (struct) + if err := t.CurrentBalance.MarshalCBOR(w); err != nil { + return err + } + + // t.State (chaos.State) (struct) + if err := t.State.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error { + *t = InspectRuntimeReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 6 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Caller (address.APIAddress) (struct) + + { + + if err := t.Caller.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Caller: %w", err) + } + + } + // t.Receiver (address.APIAddress) (struct) + + { + + if err := t.Receiver.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Receiver: %w", err) + } + + } + // t.ValueReceived (big.Int) (struct) + + { + + if err := t.ValueReceived.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ValueReceived: %w", err) + } + + } + // t.CurrEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.CurrEpoch = abi.ChainEpoch(extraI) + } + // t.CurrentBalance (big.Int) (struct) + + { + + if err := t.CurrentBalance.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CurrentBalance: %w", err) + } + + } + // t.State (chaos.State) (struct) + + { + + if err := t.State.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.State: %w", err) + } + + } + return nil +} diff --git a/tools/conformance/chaos/gen/gen.go b/tools/conformance/chaos/gen/gen.go new file mode 100644 index 0000000000..c5b49c8ec6 --- /dev/null +++ b/tools/conformance/chaos/gen/gen.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/filecoin-project/venus/tools/conformance/chaos" + + gen "github.com/whyrusleeping/cbor-gen" +) + +func main() { + if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "chaos", + chaos.State{}, + chaos.CallerValidationArgs{}, + chaos.CreateActorArgs{}, + chaos.ResolveAddressResponse{}, + chaos.SendArgs{}, + chaos.SendReturn{}, + chaos.MutateStateArgs{}, + chaos.AbortWithArgs{}, + chaos.InspectRuntimeReturn{}, + ); err != nil { + panic(err) + } +} diff --git a/tools/conformance/chaos/ids.go b/tools/conformance/chaos/ids.go new file mode 100644 index 0000000000..6b0ad86a74 --- /dev/null +++ b/tools/conformance/chaos/ids.go @@ -0,0 +1,29 @@ +package chaos + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +// ChaosActorCodeCID is the CID by which this kind of actor will be identified. +var ChaosActorCodeCID = func() cid.Cid { + builder := cid.V1Builder{Codec: cid.Raw, MhType: multihash.IDENTITY} + c, err := builder.Sum([]byte("fil/1/chaos")) + if err != nil { + panic(err) + } + return c +}() + +// Address is the singleton address of this actor. Its value is 98 +// (builtin.FirstNonSingletonActorId - 2), as 99 is reserved for the burnt funds +// singleton. +var Address = func() address.Address { + // the address before the burnt funds address (99) + addr, err := address.NewIDAddress(98) + if err != nil { + panic(err) + } + return addr +}() diff --git a/tools/conformance/chaos/state.go b/tools/conformance/chaos/state.go new file mode 100644 index 0000000000..b036a08fde --- /dev/null +++ b/tools/conformance/chaos/state.go @@ -0,0 +1,32 @@ +package chaos + +import ( + "fmt" + "io" +) + +// State is the state for the chaos actor used by some methods to invoke +// behaviours in the vm or runtime. +type State struct { + // Value can be updated by chaos actor methods to test illegal state + // mutations when the state is in readonly mode for example. + Value string + // Unmarshallable is a sentinel value. If the slice contains no values, the + // State struct will encode as CBOR without issue. If the slice is non-nil, + // CBOR encoding will fail. + Unmarshallable []*UnmarshallableCBOR +} + +// UnmarshallableCBOR is a type that cannot be marshalled or unmarshalled to +// CBOR despite implementing the CBORMarshaler and CBORUnmarshaler interface. +type UnmarshallableCBOR struct{} + +// UnmarshalCBOR will fail to unmarshal the value from CBOR. +func (t *UnmarshallableCBOR) UnmarshalCBOR(io.Reader) error { //nolint:staticcheck + return fmt.Errorf("failed to unmarshal cbor") +} + +// MarshalCBOR will fail to marshal the value to CBOR. +func (t *UnmarshallableCBOR) MarshalCBOR(io.Writer) error { //nolint:staticcheck + return fmt.Errorf("failed to marshal cbor") +} diff --git a/tools/conformance/corpus_test.go b/tools/conformance/corpus_test.go new file mode 100644 index 0000000000..6f304f0141 --- /dev/null +++ b/tools/conformance/corpus_test.go @@ -0,0 +1,143 @@ +package conformance + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/filecoin-project/test-vectors/schema" +) + +var invokees = map[schema.Class]func(Reporter, string, *schema.TestVector, *schema.Variant){ + schema.ClassMessage: ExecuteMessageVector, + schema.ClassTipset: ExecuteTipsetVector, +} + +const ( + // EnvSkipConformance, if 1, skips the conformance test suite. + EnvSkipConformance = "SKIP_CONFORMANCE" + + // EnvCorpusRootDir is the name of the environment variable where the path + // to an alternative corpus location can be provided. + // + // The default is defaultCorpusRoot. + EnvCorpusRootDir = "CORPUS_DIR" + + // defaultCorpusRoot is the directory where the test vector corpus is hosted. + // It is mounted on the Lotus repo as a git submodule. + // + // When running this test, the corpus root can be overridden through the + // -conformance.corpus CLI flag to run an alternate corpus. + defaultCorpusRoot = "../../extern/test-vectors/corpus" +) + +// ignore is a set of paths relative to root to skip. +var ignore = map[string]struct{}{ + ".git": {}, + "schema.json": {}, +} + +// TestConformance is the entrypoint test that runs all test vectors found +// in the corpus root directory. +// +// It locates all json files via a recursive walk, skipping over the ignore set, +// as well as files beginning with _. It parses each file as a test vector, and +// runs it via the Driver. +func TestConformance(t *testing.T) { + tf.UnitTest(t) + if skip := strings.TrimSpace(os.Getenv(EnvSkipConformance)); skip == "1" { + t.SkipNow() + } + // corpusRoot is the effective corpus root path, taken from the `-conformance.corpus` CLI flag, + // falling back to defaultCorpusRoot if not provided. + corpusRoot := defaultCorpusRoot + if dir := strings.TrimSpace(os.Getenv(EnvCorpusRootDir)); dir != "" { + corpusRoot = dir + } + + var vectors []string + err := filepath.Walk(corpusRoot+"/", func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Fatal(err) + } + + filename := filepath.Base(path) + rel, err := filepath.Rel(corpusRoot, path) + if err != nil { + t.Fatal(err) + } + + if _, ok := ignore[rel]; ok { + // skip over using the right error. + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + if info.IsDir() { + // dive into directories. + return nil + } + if filepath.Ext(path) != ".json" { + // skip if not .json. + return nil + } + if ignored := strings.HasPrefix(filename, "_"); ignored { + // ignore files starting with _. + t.Logf("ignoring: %s", rel) + return nil + } + vectors = append(vectors, rel) + return nil + }) + if err != nil { + t.Fatal(err) + } + + if len(vectors) == 0 { + t.Fatalf("no test vectors found") + } + + // Run a test for each vector. + for _, v := range vectors { + path := filepath.Join(corpusRoot, v) + raw, err := os.ReadFile(path) + if err != nil { + t.Fatalf("failed to read test raw file: %s", path) + } + + var vector schema.TestVector + err = json.Unmarshal(raw, &vector) + if err != nil { + t.Errorf("failed to parse test vector %s: %s; skipping", path, err) + continue + } + + t.Run(v, func(t *testing.T) { + for _, h := range vector.Hints { + if h == schema.HintIncorrect { + t.Logf("skipping vector marked as incorrect: %s", vector.Meta.ID) + t.SkipNow() + } + } + + // dispatch the execution depending on the vector class. + invokee, ok := invokees[vector.Class] + if !ok { + return + // t.Fatalf("unsupported test vector class: %s", vector.Class) + } + + for _, variant := range vector.Pre.Variants { + variant := variant + t.Run(variant.ID, func(t *testing.T) { + invokee(t, v, &vector, &variant) + }) + } + }) + } +} diff --git a/tools/conformance/driver.go b/tools/conformance/driver.go new file mode 100644 index 0000000000..df2114baa1 --- /dev/null +++ b/tools/conformance/driver.go @@ -0,0 +1,359 @@ +package conformance + +import ( + "context" + gobig "math/big" + "os" + + "github.com/filecoin-project/venus/pkg/consensus" + "github.com/filecoin-project/venus/pkg/fvm" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/venus/app/node" + "github.com/filecoin-project/venus/fixtures/networks" + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/consensusfault" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" // enable bls signatures + _ "github.com/filecoin-project/venus/pkg/crypto/secp" // enable secp signatures + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/register" + "github.com/filecoin-project/venus/pkg/vmsupport" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/filecoin-project/test-vectors/schema" + "github.com/filecoin-project/venus/tools/conformance/chaos" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" +) + +var ( + // DefaultCirculatingSupply is the fallback circulating supply returned by + // the driver's CircSupplyCalculator function, used if the vector specifies + // no circulating supply. + DefaultCirculatingSupply = types.TotalFilecoinInt + + // DefaultBaseFee to use in the VM, if one is not supplied in the vector. + DefaultBaseFee = abi.NewTokenAmount(100) +) + +type Driver struct { + ctx context.Context + selector schema.Selector + vmFlush bool +} + +type DriverOpts struct { + // DisableVMFlush, when true, avoids calling LegacyVM.Flush(), forces a blockstore + // recursive copy, from the temporary buffer blockstore, to the real + // system's blockstore. Disabling LegacyVM flushing is useful when extracting test + // vectors and trimming state, as we don't want to force an accidental + // deep copy of the state tree. + // + // Disabling LegacyVM flushing almost always should go hand-in-hand with + // LOTUS_DISABLE_VM_BUF=iknowitsabadidea. That way, state tree writes are + // immediately committed to the blockstore. + DisableVMFlush bool +} + +func NewDriver(ctx context.Context, selector schema.Selector, opts DriverOpts) *Driver { + return &Driver{ctx: ctx, selector: selector, vmFlush: !opts.DisableVMFlush} +} + +type ExecuteTipsetResult struct { + ReceiptsRoot cid.Cid + PostStateRoot cid.Cid + + // AppliedMessages stores the messages that were applied, in the order they + // were applied. It includes implicit messages (cron, rewards). + AppliedMessages []*types.Message + // AppliedResults stores the results of AppliedMessages, in the same order. + AppliedResults []*vm.Ret +} + +// ExecuteTipset executes the supplied tipset on top of the state represented +// by the preroot CID. +// +// parentEpoch is the last epoch in which an actual tipset was processed. This +// is used by Lotus for null block counting and cron firing. +// +// This method returns the the receipts root, the poststate root, and the LegacyVM +// message results. The latter _include_ implicit messages, such as cron ticks +// and reward withdrawal per miner. +func (d *Driver) ExecuteTipset(bs blockstoreutil.Blockstore, chainDs ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset, execEpoch abi.ChainEpoch) (*ExecuteTipsetResult, error) { + ipldStore := cbor.NewCborStore(bs) + mainNetParams := networks.Mainnet() + node.SetNetParams(&mainNetParams.Network) + // chainstore + chainStore := chain.NewStore(chainDs, bs, cid.Undef, chain.NewMockCirculatingSupplyCalculator()) // load genesis from car + + //drand + /*genBlk, err := chainStore.GetGenesisBlock(context.TODO()) + if err != nil { + return nil, err + } + + drand, err := beacon.DrandConfigSchedule(genBlk.Timestamp, mainNetParams.Network.BlockDelay, mainNetParams.Network.DrandSchedule) + if err != nil { + return nil, err + }*/ + + // chain fork + chainFork, err := fork.NewChainFork(context.TODO(), chainStore, ipldStore, bs, &mainNetParams.Network) + faultChecker := consensusfault.NewFaultChecker(chainStore, chainFork) + syscalls := vmsupport.NewSyscalls(faultChecker, impl.ProofVerifier) + if err != nil { + return nil, err + } + + var ( + ctx = context.Background() + vmOption = vm.VmOption{ + CircSupplyCalculator: func(context.Context, abi.ChainEpoch, tree.Tree) (abi.TokenAmount, error) { + return big.Zero(), nil + }, + LookbackStateGetter: vmcontext.LookbackStateGetterForTipset(ctx, chainStore, chainFork, nil), + NetworkVersion: chainFork.GetNetworkVersion(ctx, execEpoch), + Rnd: NewFixedRand(), + BaseFee: big.NewFromGo(&tipset.BaseFee), + Fork: chainFork, + Epoch: execEpoch, + GasPriceSchedule: gas.NewPricesSchedule(mainNetParams.Network.ForkUpgradeParam), + PRoot: preroot, + Bsstore: bs, + SysCallsImpl: syscalls, + Tracing: true, + } + ) + + blocks := make([]types.BlockMessagesInfo, 0, len(tipset.Blocks)) + for _, b := range tipset.Blocks { + sb := types.BlockMessagesInfo{ + Block: &types.BlockHeader{ + Miner: b.MinerAddr, + ElectionProof: &types.ElectionProof{ + WinCount: b.WinCount, + }, + }, + } + for _, m := range b.Messages { + msg, err := types.DecodeMessage(m) + if err != nil { + return nil, err + } + switch msg.From.Protocol() { + case address.SECP256K1: + sb.SecpkMessages = append(sb.SecpkMessages, &types.SignedMessage{ + Message: *msg, + Signature: crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: make([]byte, 65), + }, + }) + case address.BLS: + sb.BlsMessages = append(sb.BlsMessages, msg) + default: + // sneak in messages originating from other addresses as both kinds. + // these should fail, as they are actually invalid senders. + /*sb.SECPMessages = append(sb.SECPMessages, &types.SignedMessage{ + Message: *msg, + Signature: crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: make([]byte, 65), + }, + })*/ + sb.BlsMessages = append(sb.BlsMessages, msg) // todo use interface for message + sb.BlsMessages = append(sb.BlsMessages, msg) + } + } + blocks = append(blocks, sb) + } + + var ( + messages []*types.Message + results []*vm.Ret + ) + + circulatingSupplyCalculator := chain.NewCirculatingSupplyCalculator(bs, preroot, mainNetParams.Network.ForkUpgradeParam) + processor := consensus.NewDefaultProcessor(syscalls, circulatingSupplyCalculator) + + postcid, receipt, err := processor.ApplyBlocks(ctx, blocks, nil, preroot, parentEpoch, execEpoch, vmOption, func(_ cid.Cid, msg *types.Message, ret *vm.Ret) error { + messages = append(messages, msg) + results = append(results, ret) + return nil + }) + if err != nil { + return nil, err + } + receiptsroot, err := chain.GetReceiptRoot(receipt) + if err != nil { + return nil, err + } + + ret := &ExecuteTipsetResult{ + ReceiptsRoot: receiptsroot, + PostStateRoot: postcid, + AppliedMessages: messages, + AppliedResults: results, + } + return ret, nil +} + +type ExecuteMessageParams struct { + Preroot cid.Cid + Epoch abi.ChainEpoch + Message *types.Message + CircSupply abi.TokenAmount + BaseFee abi.TokenAmount + NetworkVersion network.Version + + Rand vmcontext.HeadChainRandomness +} + +// ExecuteMessage executes a conformance test vector message in a temporary LegacyVM. +func (d *Driver) ExecuteMessage(bs blockstoreutil.Blockstore, params ExecuteMessageParams) (*vm.Ret, cid.Cid, error) { + if !d.vmFlush { + // do not flush the LegacyVM, just the state tree; this should be used with + // LOTUS_DISABLE_VM_BUF enabled, so writes will anyway be visible. + _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea") + } + actorBuilder := register.DefaultActorBuilder + register.GetDefaultActros() + coderLoader := actorBuilder.Build() + + if params.Rand == nil { + params.Rand = NewFixedRand() + } + mainNetParams := networks.Mainnet() + node.SetNetParams(&mainNetParams.Network) + ipldStore := cbor.NewCborStore(bs) + chainDs := ds.NewMapDatastore() // just mock one + // chainstore + chainStore := chain.NewStore(chainDs, bs, cid.Undef, chain.NewMockCirculatingSupplyCalculator()) // load genesis from car + + // chain fork + chainFork, err := fork.NewChainFork(context.TODO(), chainStore, ipldStore, bs, &mainNetParams.Network) + faultChecker := consensusfault.NewFaultChecker(chainStore, chainFork) + syscalls := vmsupport.NewSyscalls(faultChecker, impl.ProofVerifier) + if err != nil { + return nil, cid.Undef, err + } + var ( + ctx = context.Background() + vmOption = vm.VmOption{ + CircSupplyCalculator: func(ctx context.Context, epoch abi.ChainEpoch, tree tree.Tree) (abi.TokenAmount, error) { + return params.CircSupply, nil + }, + LookbackStateGetter: vmcontext.LookbackStateGetterForTipset(ctx, chainStore, chainFork, nil), + NetworkVersion: params.NetworkVersion, + Rnd: params.Rand, + BaseFee: params.BaseFee, + Fork: chainFork, + ActorCodeLoader: &coderLoader, + Epoch: params.Epoch, + GasPriceSchedule: gas.NewPricesSchedule(mainNetParams.Network.ForkUpgradeParam), + PRoot: params.Preroot, + Bsstore: bs, + SysCallsImpl: syscalls, + } + ) + + var vmi vm.Interface + // register the chaos actor if required by the vector. + if chaosOn, ok := d.selector["chaos_actor"]; ok && chaosOn == "true" { + av, _ := actorstypes.VersionForNetwork(params.NetworkVersion) + actorBuilder.AddMany(av, nil, builtin.MakeRegistryLegacy([]rtt.VMActor{chaos.Actor{}})) + coderLoader = actorBuilder.Build() + vmOption.ActorCodeLoader = &coderLoader + vmi, err = vm.NewLegacyVM(ctx, vmOption) + if err != nil { + return nil, cid.Undef, err + } + } else { + if params.NetworkVersion >= network.Version16 { + vmi, err = fvm.NewFVM(ctx, &vmOption) + if err != nil { + return nil, cid.Undef, err + } + } else { + vmi, err = vm.NewLegacyVM(ctx, vmOption) + if err != nil { + return nil, cid.Undef, err + } + } + } + + ret, err := vmi.ApplyMessage(ctx, toChainMsg(params.Message)) + if err != nil { + return nil, cid.Undef, err + } + + var root cid.Cid + if d.vmFlush { + // flush the LegacyVM, committing the state tree changes and forcing a + // recursive copy from the temporary blcokstore to the real blockstore. + root, err = vmi.Flush(ctx) + if err != nil { + return nil, cid.Undef, err + } + } else { + root, err = vmi.(vm.Interpreter).StateTree().Flush(d.ctx) + if err != nil { + return nil, cid.Undef, err + } + } + + return ret, root, err +} + +// toChainMsg injects a synthetic 0-filled signature of the right length to +// messages that originate from secp256k senders, leaving all +// others untouched. +// TODO: generate a signature in the DSL so that it's encoded in +// +// the test vector. +func toChainMsg(msg *types.Message) (ret types.ChainMsg) { + ret = msg + if msg.From.Protocol() == address.SECP256K1 { + ret = &types.SignedMessage{ + Message: *msg, + Signature: crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: make([]byte, 65), + }, + } + } + return ret +} + +// BaseFeeOrDefault converts a basefee as passed in a test vector (go *big.Int +// type) to an abi.TokenAmount, or if nil it returns the DefaultBaseFee. +func BaseFeeOrDefault(basefee *gobig.Int) abi.TokenAmount { + if basefee == nil { + return DefaultBaseFee + } + return big.NewFromGo(basefee) +} + +// CircSupplyOrDefault converts a circulating supply as passed in a test vector +// (go *big.Int type) to an abi.TokenAmount, or if nil it returns the +// DefaultCirculatingSupply. +func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount { + if circSupply == nil { + return DefaultCirculatingSupply + } + return big.NewFromGo(circSupply) +} diff --git a/tools/conformance/rand_fixed.go b/tools/conformance/rand_fixed.go new file mode 100644 index 0000000000..4bb6bc569c --- /dev/null +++ b/tools/conformance/rand_fixed.go @@ -0,0 +1,28 @@ +package conformance + +import ( + "context" + + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" +) + +type fixedRand struct{} + +var _ vmcontext.HeadChainRandomness = (*fixedRand)(nil) + +// NewFixedRand creates a test vm.Rand that always returns fixed bytes value +// of utf-8 string 'i_am_random_____i_am_random_____'. +func NewFixedRand() vmcontext.HeadChainRandomness { + return &fixedRand{} +} + +func (r *fixedRand) ChainGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. +} + +func (r *fixedRand) ChainGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. +} diff --git a/tools/conformance/rand_record.go b/tools/conformance/rand_record.go new file mode 100644 index 0000000000..4f46e3ab47 --- /dev/null +++ b/tools/conformance/rand_record.go @@ -0,0 +1,131 @@ +package conformance + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + v1api "github.com/filecoin-project/venus/venus-shared/api/chain/v1" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/test-vectors/schema" +) + +type RecordingRand struct { + reporter Reporter + api v1api.FullNode + // once guards the loading of the head tipset. + // can be removed when https://github.com/filecoin-project/lotus/issues/4223 + // is fixed. + once sync.Once + head types.TipSetKey + lk sync.Mutex + recorded schema.Randomness +} + +func (r *RecordingRand) ChainGetRandomnessFromBeacon(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) ChainGetRandomnessFromTickets(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +var _ vmcontext.HeadChainRandomness = (*RecordingRand)(nil) + +// NewRecordingRand returns a vm.Rand implementation that proxies calls to a +// full Lotus node via JSON-RPC, and records matching rules and responses so +// they can later be embedded in test vectors. +func NewRecordingRand(reporter Reporter, api v1api.FullNode) *RecordingRand { + return &RecordingRand{reporter: reporter, api: api} +} + +func (r *RecordingRand) loadHead() { + head, err := r.api.ChainHead(context.TODO()) + if err != nil { + panic(fmt.Sprintf("could not fetch chain head while fetching randomness: %s", err)) + } + r.head = head.Key() +} + +func (r *RecordingRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + r.once.Do(r.loadHead) + // FullNode's StateGetRandomnessFromTickets handles whether we should be looking forward or back + ret, err := r.api.StateGetRandomnessFromTickets(ctx, pers, round, entropy, r.head) + if err != nil { + return ret, err + } + + r.reporter.Logf("fetched and recorded chain randomness for: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret) + + match := schema.RandomnessMatch{ + On: schema.RandomnessRule{ + Kind: schema.RandomnessChain, + DomainSeparationTag: int64(pers), + Epoch: int64(round), + Entropy: entropy, + }, + Return: []byte(ret), + } + r.lk.Lock() + r.recorded = append(r.recorded, match) + r.lk.Unlock() + + return ret, err +} + +func (r *RecordingRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + r.once.Do(r.loadHead) + ret, err := r.api.StateGetRandomnessFromBeacon(ctx, pers, round, entropy, r.head) + if err != nil { + return ret, err + } + + r.reporter.Logf("fetched and recorded beacon randomness for: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret) + + match := schema.RandomnessMatch{ + On: schema.RandomnessRule{ + Kind: schema.RandomnessBeacon, + DomainSeparationTag: int64(pers), + Epoch: int64(round), + Entropy: entropy, + }, + Return: []byte(ret), + } + r.lk.Lock() + r.recorded = append(r.recorded, match) + r.lk.Unlock() + + return ret, err +} + +func (r *RecordingRand) Recorded() schema.Randomness { + r.lk.Lock() + defer r.lk.Unlock() + + return r.recorded +} diff --git a/tools/conformance/rand_replay.go b/tools/conformance/rand_replay.go new file mode 100644 index 0000000000..c60c4d5af7 --- /dev/null +++ b/tools/conformance/rand_replay.go @@ -0,0 +1,88 @@ +package conformance + +import ( + "bytes" + "context" + + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/test-vectors/schema" +) + +type ReplayingRand struct { + reporter Reporter + recorded schema.Randomness + fallback vmcontext.HeadChainRandomness +} + +var _ vmcontext.HeadChainRandomness = (*ReplayingRand)(nil) + +// NewReplayingRand replays recorded randomness when requested, falling back to +// fixed randomness if the value cannot be found; hence this is a safe +// backwards-compatible replacement for fixedRand. +func NewReplayingRand(reporter Reporter, recorded schema.Randomness) *ReplayingRand { + return &ReplayingRand{ + reporter: reporter, + recorded: recorded, + fallback: NewFixedRand(), + } +} + +func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) { + for _, other := range r.recorded { + if other.On.Kind == requested.Kind && + other.On.Epoch == requested.Epoch && + other.On.DomainSeparationTag == requested.DomainSeparationTag && + bytes.Equal(other.On.Entropy, requested.Entropy) { + return other.Return, true + } + } + return nil, false +} + +func (r *ReplayingRand) ChainGetRandomnessFromBeacon(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy, false) +} + +func (r *ReplayingRand) ChainGetRandomnessFromTickets(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return r.getChainRandomness(ctx, pers, round, entropy, false) +} + +func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { + rule := schema.RandomnessRule{ + Kind: schema.RandomnessChain, + DomainSeparationTag: int64(pers), + Epoch: int64(round), + Entropy: entropy, + } + + if ret, ok := r.match(rule); ok { + r.reporter.Logf("returning saved chain randomness: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret) + return ret, nil + } + + r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) + + return r.fallback.ChainGetRandomnessFromTickets(ctx, pers, round, entropy) +} + +func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { + rule := schema.RandomnessRule{ + Kind: schema.RandomnessBeacon, + DomainSeparationTag: int64(pers), + Epoch: int64(round), + Entropy: entropy, + } + + if ret, ok := r.match(rule); ok { + r.reporter.Logf("returning saved beacon randomness: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret) + return ret, nil + } + + r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) + + return r.fallback.ChainGetRandomnessFromBeacon(ctx, pers, round, entropy) +} diff --git a/tools/conformance/reporter.go b/tools/conformance/reporter.go new file mode 100644 index 0000000000..1cd2d389db --- /dev/null +++ b/tools/conformance/reporter.go @@ -0,0 +1,62 @@ +package conformance + +import ( + "log" + "os" + "sync/atomic" + "testing" + + "github.com/fatih/color" +) + +// Reporter is a contains a subset of the testing.T methods, so that the +// Execute* functions in this package can be used inside or outside of +// go test runs. +type Reporter interface { + Helper() + + Log(args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Logf(format string, args ...interface{}) + FailNow() + Failed() bool +} + +var _ Reporter = (*testing.T)(nil) + +// LogReporter wires the Reporter methods to the log package. It is appropriate +// to use when calling the Execute* functions from a standalone CLI program. +type LogReporter struct { + failed int32 +} + +var _ Reporter = (*LogReporter)(nil) + +func (*LogReporter) Helper() {} + +func (*LogReporter) Log(args ...interface{}) { + log.Println(args...) +} + +func (*LogReporter) Logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +func (*LogReporter) FailNow() { + os.Exit(1) +} + +func (l *LogReporter) Failed() bool { + return atomic.LoadInt32(&l.failed) == 1 +} + +func (l *LogReporter) Errorf(format string, args ...interface{}) { + atomic.StoreInt32(&l.failed, 1) + log.Println(color.HiRedString("❌ "+format, args...)) +} + +func (l *LogReporter) Fatalf(format string, args ...interface{}) { + atomic.StoreInt32(&l.failed, 1) + log.Fatal(color.HiRedString("❌ "+format, args...)) +} diff --git a/tools/conformance/runner.go b/tools/conformance/runner.go new file mode 100644 index 0000000000..ca0df8afa9 --- /dev/null +++ b/tools/conformance/runner.go @@ -0,0 +1,270 @@ +package conformance + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "fmt" + "os" + "os/exec" + "strconv" + + "github.com/filecoin-project/go-state-types/network" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/fatih/color" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/test-vectors/schema" + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + offline "github.com/ipfs/go-ipfs-exchange-offline" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipld/go-car" + + "github.com/filecoin-project/venus/pkg/vm" +) + +// ExecuteMessageVector executes a message-class test vector. +func ExecuteMessageVector(r Reporter, v string, vector *schema.TestVector, variant *schema.Variant) { + var ( + ctx = context.Background() + baseEpoch = variant.Epoch + nv = network.Version(variant.NetworkVersion) + root = vector.Pre.StateTree.RootCID + ) + + // Load the CAR into a new temporary blockstore. + bs, err := LoadVectorCAR(vector.CAR) + if err != nil { + r.Fatalf("failed to load the vector CAR: %w", err) + } + // Create a new Driver. + driver := NewDriver(ctx, vector.Selector, DriverOpts{DisableVMFlush: true}) + + // Apply every message. + for i, m := range vector.ApplyMessages { + msg, err := types.DecodeMessage(m.Bytes) + if err != nil { + r.Fatalf("failed to deserialize message: %s", err) + } + + // add the epoch offset if one is set. + if m.EpochOffset != nil { + baseEpoch += *m.EpochOffset + } + + // Execute the message. + var ret *vm.Ret + ret, root, err = driver.ExecuteMessage(bs, ExecuteMessageParams{ + Preroot: root, + Epoch: abi.ChainEpoch(baseEpoch), + Message: msg, + BaseFee: BaseFeeOrDefault(vector.Pre.BaseFee), + CircSupply: CircSupplyOrDefault(vector.Pre.CircSupply), + Rand: NewReplayingRand(r, vector.Randomness), + NetworkVersion: nv, + }) + if err != nil { + r.Fatalf("fatal failure when executing message: %s", err) + } + + // Assert that the receipt matches what the test vector expects. + AssertMsgResult(r, vector.Post.Receipts[i], ret, strconv.Itoa(i)) + } + + // Once all messages are applied, assert that the final state root matches + // the expected postcondition root. + if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual { + r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) + dumpThreeWayStateDiff(r, vector, bs, root) + r.FailNow() + } +} + +// ExecuteTipsetVector executes a tipset-class test vector. +func ExecuteTipsetVector(r Reporter, v string, vector *schema.TestVector, variant *schema.Variant) { + var ( + ctx = context.Background() + baseEpoch = abi.ChainEpoch(variant.Epoch) + root = vector.Pre.StateTree.RootCID + tmpds = ds.NewMapDatastore() + ) + + // Load the vector CAR into a new temporary blockstore. + bs, err := LoadVectorCAR(vector.CAR) + if err != nil { + r.Fatalf("failed to load the vector CAR: %w", err) + } + + // Create a new Driver. + driver := NewDriver(ctx, vector.Selector, DriverOpts{}) + + // Apply every tipset. + var receiptsIdx int + prevEpoch := baseEpoch + for i, ts := range vector.ApplyTipsets { + ts := ts // capture + execEpoch := baseEpoch + abi.ChainEpoch(ts.EpochOffset) + ret, err := driver.ExecuteTipset(bs, tmpds, root, prevEpoch, &ts, execEpoch) + if err != nil { + r.Fatalf("failed to apply tipset %d message: %s", i, err) + } + + for j, v := range ret.AppliedResults { + AssertMsgResult(r, vector.Post.Receipts[receiptsIdx], v, fmt.Sprintf("%d of tipset %d", j, i)) + receiptsIdx++ + } + + // Compare the receipts root. + if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual { + r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual) + } + + prevEpoch = execEpoch + root = ret.PostStateRoot + } + + // Once all messages are applied, assert that the final state root matches + // the expected postcondition root. + if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual { + r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) + dumpThreeWayStateDiff(r, vector, bs, root) + r.FailNow() + } +} + +// AssertMsgResult compares a message result. It takes the expected receipt +// encoded in the vector, the actual receipt returned by Lotus, and a message +// label to log in the assertion failure message to facilitate debugging. +func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.Ret, label string) { + r.Helper() + + if expected, actual := exitcode.ExitCode(expected.ExitCode), actual.Receipt.ExitCode; expected != actual { + r.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual) + } + if expected, actual := expected.GasUsed, actual.Receipt.GasUsed; expected != actual { + r.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual) + } + if expected, actual := []byte(expected.ReturnValue), actual.Receipt.Return; !bytes.Equal(expected, actual) { + r.Errorf("return value of msg %s did not match; expected: %s, got: %s", label, base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(actual)) + } +} + +func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstoreutil.Blockstore, actual cid.Cid) { + // check if statediff exists; if not, skip. + if err := exec.Command("statediff", "--help").Run(); err != nil { + r.Log("could not dump 3-way state tree diff upon test failure: statediff command not found") + r.Log("install statediff with:") + r.Log("$ git clone https://github.com/filecoin-project/statediff.git") + r.Log("$ cd statediff") + r.Log("$ go generate ./...") + r.Log("$ go install ./cmd/statediff") + return + } + + tmpCar, err := writeStateToTempCAR(bs, + vector.Pre.StateTree.RootCID, + vector.Post.StateTree.RootCID, + actual, + ) + if err != nil { + r.Fatalf("failed to write temporary state CAR: %s", err) + } + defer os.RemoveAll(tmpCar) //nolint:errcheck + + color.NoColor = false // enable colouring. + + var ( + a = color.New(color.FgMagenta, color.Bold).Sprint("(A) expected final state") + b = color.New(color.FgYellow, color.Bold).Sprint("(B) actual final state") + c = color.New(color.FgCyan, color.Bold).Sprint("(C) initial state") + d1 = color.New(color.FgGreen, color.Bold).Sprint("[Δ1]") + d2 = color.New(color.FgGreen, color.Bold).Sprint("[Δ2]") + d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]") + ) + + printDiff := func(left, right cid.Cid) { + cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String()) + b, err := cmd.CombinedOutput() + if err != nil { + r.Fatalf("statediff failed: %s", err) + } + r.Log(string(b)) + } + + bold := color.New(color.Bold).SprintfFunc() + + // run state diffs. + r.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c)) + + r.Log(bold("--- %s left: %s; right: %s ---", d1, a, b)) + printDiff(vector.Post.StateTree.RootCID, actual) + + r.Log(bold("--- %s left: %s; right: %s ---", d2, c, b)) + printDiff(vector.Pre.StateTree.RootCID, actual) + + r.Log(bold("--- %s left: %s; right: %s ---", d3, c, a)) + printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID) +} + +// writeStateToTempCAR writes the provided roots to a temporary CAR that'll be +// cleaned up via t.Cleanup(). It returns the full path of the temp file. +func writeStateToTempCAR(bs blockstoreutil.Blockstore, roots ...cid.Cid) (string, error) { + tmp, err := os.CreateTemp("", "lotus-tests-*.car") + if err != nil { + return "", fmt.Errorf("failed to create temp file to dump CAR for diffing: %w", err) + } + + carWalkFn := func(nd format.Node) (out []*format.Link, err error) { + for _, link := range nd.Links() { + if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed { + continue + } + // ignore things we don't have, the state tree is incomplete. + if has, err := bs.Has(context.TODO(), link.Cid); err != nil { + return nil, err + } else if has { + out = append(out, link) + } + } + return out, nil + } + + var ( + offl = offline.Exchange(bs) + blkserv = blockservice.New(bs, offl) + dserv = merkledag.NewDAGService(blkserv) + ) + + err = car.WriteCarWithWalker(context.Background(), dserv, roots, tmp, carWalkFn) + if err != nil { + return "", fmt.Errorf("failed to dump CAR for diffing: %w", err) + } + _ = tmp.Close() + return tmp.Name(), nil +} + +func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstoreutil.Blockstore, error) { + ds := ds.NewMapDatastore() + bs := blockstoreutil.NewBlockstore(ds) + + // Read the base64-encoded CAR from the vector, and inflate the gzip. + buf := bytes.NewReader(vectorCAR) + r, err := gzip.NewReader(buf) + if err != nil { + return nil, fmt.Errorf("failed to inflate gzipped CAR: %s", err) + } + defer r.Close() // nolint + + // Load the CAR embedded in the test vector into the blockstore. + _, err = car.LoadCar(context.TODO(), bs, r) + if err != nil { + return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err) + } + return bs, nil +} diff --git a/tools/fast/action_actor.go b/tools/fast/action_actor.go deleted file mode 100644 index a3b1fad73e..0000000000 --- a/tools/fast/action_actor.go +++ /dev/null @@ -1,35 +0,0 @@ -package fast - -import ( - "context" - "io" - - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// ActorLs runs the `actor ls` command against the filecoin process. -func (f *Filecoin) ActorLs(ctx context.Context) ([]commands.ActorView, error) { - args := []string{"go-filecoin", "actor", "ls"} - - dec, err := f.RunCmdLDJSONWithStdin(ctx, nil, args...) - if err != nil { - return nil, err - } - - views := []commands.ActorView{} - for dec.More() { - var view commands.ActorView - err := dec.Decode(&view) - if err != nil { - if err == io.EOF { - break - } - - return nil, err - } - - views = append(views, view) - } - - return views, nil -} diff --git a/tools/fast/action_address.go b/tools/fast/action_address.go deleted file mode 100644 index fa45468ebe..0000000000 --- a/tools/fast/action_address.go +++ /dev/null @@ -1,38 +0,0 @@ -package fast - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/libp2p/go-libp2p-core/peer" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// AddressNew runs the address new command against the filecoin process. -func (f *Filecoin) AddressNew(ctx context.Context) (address.Address, error) { - var newAddress address.Address - if err := f.RunCmdJSONWithStdin(ctx, nil, &newAddress, "go-filecoin", "address", "new"); err != nil { - return address.Undef, err - } - return newAddress, nil -} - -// AddressLs runs the address ls command against the filecoin process. -func (f *Filecoin) AddressLs(ctx context.Context) ([]address.Address, error) { - // the command returns an AddressListResult - var alr commands.AddressLsResult - if err := f.RunCmdJSONWithStdin(ctx, nil, &alr, "go-filecoin", "address", "ls"); err != nil { - return nil, err - } - return alr.Addresses, nil -} - -// AddressLookup runs the address lookup command against the filecoin process. -func (f *Filecoin) AddressLookup(ctx context.Context, addr address.Address) (peer.ID, error) { - var ownerPeer peer.ID - if err := f.RunCmdJSONWithStdin(ctx, nil, &ownerPeer, "go-filecoin", "address", "lookup", addr.String()); err != nil { - return "", err - } - return ownerPeer, nil -} diff --git a/tools/fast/action_bootstrap.go b/tools/fast/action_bootstrap.go deleted file mode 100644 index d431dcbd4f..0000000000 --- a/tools/fast/action_bootstrap.go +++ /dev/null @@ -1,20 +0,0 @@ -package fast - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// BootstrapLs runs the `bootstrap ls` command against the filecoin process. -func (f *Filecoin) BootstrapLs(ctx context.Context) (*commands.BootstrapLsResult, error) { - var out commands.BootstrapLsResult - args := []string{"go-filecoin", "bootstrap", "ls"} - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &out, nil - -} diff --git a/tools/fast/action_chain.go b/tools/fast/action_chain.go deleted file mode 100644 index ec31b54fea..0000000000 --- a/tools/fast/action_chain.go +++ /dev/null @@ -1,34 +0,0 @@ -package fast - -import ( - "context" - "encoding/json" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" -) - -// ChainHead runs the chain head command against the filecoin process. -func (f *Filecoin) ChainHead(ctx context.Context) ([]cid.Cid, error) { - var out []cid.Cid - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "chain", "head"); err != nil { - return nil, err - } - return out, nil - -} - -// ChainLs runs the chain ls command against the filecoin process. -func (f *Filecoin) ChainLs(ctx context.Context) (*json.Decoder, error) { - return f.RunCmdLDJSONWithStdin(ctx, nil, "go-filecoin", "chain", "ls") -} - -// ChainStatus runs the chain status command against the filecoin process. -func (f *Filecoin) ChainStatus(ctx context.Context) (*chain.Status, error) { - var out *chain.Status - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "chain", "status"); err != nil { - return nil, err - } - return out, nil -} diff --git a/tools/fast/action_client.go b/tools/fast/action_client.go deleted file mode 100644 index 360ff41be0..0000000000 --- a/tools/fast/action_client.go +++ /dev/null @@ -1,85 +0,0 @@ -package fast - -import ( - "context" - "encoding/json" - "fmt" - "io" - - "github.com/filecoin-project/go-fil-markets/storagemarket/network" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// ClientCat runs the client cat command against the filecoin process. -// A ReadCloser is returned representing the data. -// TODO(frrist): address buffering in filecoin plugins to exert appropriate backpressure on the -// reader IPTB returns. -func (f *Filecoin) ClientCat(ctx context.Context, cid cid.Cid) (io.ReadCloser, error) { - out, err := f.RunCmdWithStdin(ctx, nil, "go-filecoin", "client", "cat", cid.String()) - if err != nil { - return nil, err - } - return out.Stdout(), err -} - -// ClientImport runs the client import data command against the filecoin process. -func (f *Filecoin) ClientImport(ctx context.Context, data files.File) (cid.Cid, error) { - var out cid.Cid - if err := f.RunCmdJSONWithStdin(ctx, data, &out, "go-filecoin", "client", "import"); err != nil { - return cid.Undef, err - } - return out, nil -} - -// ClientProposeStorageDeal runs the client propose-storage-deal command against the filecoin process. -func (f *Filecoin) ClientProposeStorageDeal(ctx context.Context, data cid.Cid, - miner address.Address, ask uint64, duration uint64, options ...ActionOption) (*network.Response, error) { - - var out network.Response - sData := data.String() - sMiner := miner.String() - sAsk := fmt.Sprintf("%d", ask) - sDuration := fmt.Sprintf("%d", duration) - - args := []string{"go-filecoin", "client", "propose-storage-deal", sMiner, sData, sAsk, sDuration} - for _, opt := range options { - args = append(args, opt()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - return &out, nil -} - -// ClientQueryStorageDeal runs the client query-storage-deal command against the filecoin process. -func (f *Filecoin) ClientQueryStorageDeal(ctx context.Context, prop cid.Cid) (*network.Response, error) { - var out network.Response - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "client", "query-storage-deal", prop.String()); err != nil { - return nil, err - } - return &out, nil -} - -// ClientVerifyStorageDeal runs the client verify-storage-deal command against the filecoin process. -func (f *Filecoin) ClientVerifyStorageDeal(ctx context.Context, prop cid.Cid) (*commands.VerifyStorageDealResult, error) { - var out commands.VerifyStorageDealResult - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "client", "verify-storage-deal", prop.String()); err != nil { - return nil, err - } - - return &out, nil -} - -// ClientListAsks runs the client list-asks command against the filecoin process. -// A json decoer is returned that asks may be decoded from. -func (f *Filecoin) ClientListAsks(ctx context.Context) (*json.Decoder, error) { - return f.RunCmdLDJSONWithStdin(ctx, nil, "go-filecoin", "client", "list-asks") -} diff --git a/tools/fast/action_config.go b/tools/fast/action_config.go deleted file mode 100644 index eb3df93fab..0000000000 --- a/tools/fast/action_config.go +++ /dev/null @@ -1,42 +0,0 @@ -package fast - -import ( - "context" - "encoding/json" - "fmt" -) - -// ConfigGet runs the `config` command against the filecoin process, and decodes the -// output into `v`. -func (f *Filecoin) ConfigGet(ctx context.Context, key string, v interface{}) error { - args := []string{"go-filecoin", "config", key} - - if err := f.RunCmdJSONWithStdin(ctx, nil, v, args...); err != nil { - return err - } - - return nil -} - -// ConfigSet runs the `config` command against the filecoin process, encoding `v` as -// the value. -func (f *Filecoin) ConfigSet(ctx context.Context, key string, v interface{}) error { - value, err := json.Marshal(v) - if err != nil { - return err - } - - args := []string{"go-filecoin", "config", key, string(value)} - - out, err := f.RunCmdWithStdin(ctx, nil, args...) - if err != nil { - return err - } - - // check command exit code - if out.ExitCode() > 0 { - return fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - return nil -} diff --git a/tools/fast/action_dag.go b/tools/fast/action_dag.go deleted file mode 100644 index 0942dff559..0000000000 --- a/tools/fast/action_dag.go +++ /dev/null @@ -1,20 +0,0 @@ -package fast - -import ( - "context" - - cid "github.com/ipfs/go-cid" -) - -// DagGet runs the `dag get` command against the filecoin process -func (f *Filecoin) DagGet(ctx context.Context, ref cid.Cid) (map[string]interface{}, error) { - var out map[string]interface{} - - sRef := ref.String() - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "dag", "get", sRef); err != nil { - return nil, err - } - - return out, nil -} diff --git a/tools/fast/action_deals.go b/tools/fast/action_deals.go deleted file mode 100644 index e105a5686a..0000000000 --- a/tools/fast/action_deals.go +++ /dev/null @@ -1,17 +0,0 @@ -package fast - -import ( - "context" - "encoding/json" -) - -// DealsList runs the `deals list` command against the filecoin process -func (f *Filecoin) DealsList(ctx context.Context, options ...ActionOption) (*json.Decoder, error) { - args := []string{"go-filecoin", "deals", "list"} - - for _, option := range options { - args = append(args, option()...) - } - - return f.RunCmdLDJSONWithStdin(ctx, nil, args...) -} diff --git a/tools/fast/action_dht.go b/tools/fast/action_dht.go deleted file mode 100644 index 2a0fc89616..0000000000 --- a/tools/fast/action_dht.go +++ /dev/null @@ -1,46 +0,0 @@ -package fast - -import ( - "context" - "encoding/json" - "io" - - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/multiformats/go-multiaddr" -) - -// DHTFindPeer runs the `dht findpeer` command against the filecoin process -func (f *Filecoin) DHTFindPeer(ctx context.Context, pid peer.ID) ([]multiaddr.Multiaddr, error) { - decoder, err := f.RunCmdLDJSONWithStdin(ctx, nil, "go-filecoin", "dht", "findpeer", pid.String()) - if err != nil { - return nil, err - } - - var out []multiaddr.Multiaddr - for { - var addr string - if err := decoder.Decode(&addr); err != nil { - if err == io.EOF { - break - } - - return []multiaddr.Multiaddr{}, err - } - - ma, err := multiaddr.NewMultiaddr(addr) - if err != nil { - return []multiaddr.Multiaddr{}, err - } - - out = append(out, ma) - } - - return out, nil -} - -// DHTFindProvs runs the `dht findprovs` command against the filecoin process -func (f *Filecoin) DHTFindProvs(ctx context.Context, key cid.Cid) (*json.Decoder, error) { - args := []string{"go-filecoin", "dht", "findprovs", key.String()} - return f.RunCmdLDJSONWithStdin(ctx, nil, args...) -} diff --git a/tools/fast/action_id.go b/tools/fast/action_id.go deleted file mode 100644 index 64de7dda9f..0000000000 --- a/tools/fast/action_id.go +++ /dev/null @@ -1,23 +0,0 @@ -package fast - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// ID runs the `id` command against the filecoin process -func (f *Filecoin) ID(ctx context.Context, options ...ActionOption) (*commands.IDDetails, error) { - var out commands.IDDetails - args := []string{"go-filecoin", "id"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &out, nil -} diff --git a/tools/fast/action_inspector.go b/tools/fast/action_inspector.go deleted file mode 100644 index af31484dbd..0000000000 --- a/tools/fast/action_inspector.go +++ /dev/null @@ -1,93 +0,0 @@ -package fast - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/pkg/config" -) - -// InspectAll runs the `inspect all` command against the filecoin process -func (f *Filecoin) InspectAll(ctx context.Context, options ...ActionOption) (*commands.AllInspectorInfo, error) { - var out commands.AllInspectorInfo - - args := []string{"go-filecoin", "inspect", "all"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &out, nil -} - -// InspectRuntime runs the `inspect runtime` command against the filecoin process -func (f *Filecoin) InspectRuntime(ctx context.Context, options ...ActionOption) (*commands.RuntimeInfo, error) { - var out commands.RuntimeInfo - - args := []string{"go-filecoin", "inspect", "runtime"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &out, nil -} - -// InspectDisk runs the `inspect disk` command against the filecoin process -func (f *Filecoin) InspectDisk(ctx context.Context, options ...ActionOption) (*commands.DiskInfo, error) { - var out commands.DiskInfo - - args := []string{"go-filecoin", "inspect", "disk"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &out, nil -} - -// InspectMemory runs the `inspect memory` command against the filecoin process -func (f *Filecoin) InspectMemory(ctx context.Context, options ...ActionOption) (*commands.MemoryInfo, error) { - var out commands.MemoryInfo - - args := []string{"go-filecoin", "inspect", "memory"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &out, nil -} - -// InspectConfig runs the `inspect config` command against the filecoin process -func (f *Filecoin) InspectConfig(ctx context.Context, options ...ActionOption) (*config.Config, error) { - var out config.Config - - args := []string{"go-filecoin", "inspect", "config"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &out, nil -} diff --git a/tools/fast/action_message.go b/tools/fast/action_message.go deleted file mode 100644 index c448a73f0b..0000000000 --- a/tools/fast/action_message.go +++ /dev/null @@ -1,55 +0,0 @@ -package fast - -import ( - "context" - "strconv" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - cid "github.com/ipfs/go-cid" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// MessageSend runs the `message send` command against the filecoin process. -func (f *Filecoin) MessageSend(ctx context.Context, target address.Address, method abi.MethodNum, options ...ActionOption) (cid.Cid, error) { - var out commands.MessageSendResult - - args := []string{"go-filecoin", "message", "send"} - - for _, option := range options { - args = append(args, option()...) - } - - args = append(args, target.String()) - - if method != builtin.MethodSend { - args = append(args, strconv.Itoa(int(method))) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return cid.Undef, err - } - - return out.Cid, nil -} - -// MessageWait runs the `message wait` command against the filecoin process. -func (f *Filecoin) MessageWait(ctx context.Context, mcid cid.Cid, options ...ActionOption) (commands.WaitResult, error) { - var out commands.WaitResult - - args := []string{"go-filecoin", "message", "wait"} - - for _, option := range options { - args = append(args, option()...) - } - - args = append(args, mcid.String()) - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return commands.WaitResult{}, err - } - - return out, nil -} diff --git a/tools/fast/action_miner.go b/tools/fast/action_miner.go deleted file mode 100644 index 9e2634f8de..0000000000 --- a/tools/fast/action_miner.go +++ /dev/null @@ -1,102 +0,0 @@ -package fast - -import ( - "context" - "math/big" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" -) - -// MinerCreate runs the `miner create` command against the filecoin process -func (f *Filecoin) MinerCreate(ctx context.Context, collateral *big.Int, options ...ActionOption) (address.Address, error) { - var out commands.MinerCreateResult - - args := []string{"go-filecoin", "miner", "create"} - - for _, option := range options { - args = append(args, option()...) - } - - args = append(args, collateral.String()) - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return address.Undef, err - } - - return out.Address, nil -} - -// MinerUpdatePeerid runs the `miner update-peerid` command against the filecoin process -func (f *Filecoin) MinerUpdatePeerid(ctx context.Context, minerAddr address.Address, pid peer.ID, options ...ActionOption) (cid.Cid, error) { - var out commands.MinerUpdatePeerIDResult - - args := []string{"go-filecoin", "miner", "update-peerid"} - - for _, option := range options { - args = append(args, option()...) - } - - args = append(args, minerAddr.String(), pid.Pretty()) - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return cid.Undef, err - } - - return out.Cid, nil -} - -// MinerStatus runs the `miner power` command against the filecoin process -func (f *Filecoin) MinerStatus(ctx context.Context, minerAddr address.Address) (porcelain.MinerStatus, error) { - var out porcelain.MinerStatus - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "miner", "status", minerAddr.String()); err != nil { - return porcelain.MinerStatus{}, err - } - - return out, nil -} - -// MinerSetPrice runs the `miner set-price` command against the filecoin process -func (f *Filecoin) MinerSetPrice(ctx context.Context, fil *big.Float, expiry *big.Int, options ...ActionOption) (*porcelain.MinerSetPriceResponse, error) { - var out commands.MinerSetPriceResult - - sFil := fil.Text('f', -1) - - args := []string{"go-filecoin", "miner", "set-price"} - - for _, option := range options { - args = append(args, option()...) - } - - args = append(args, sFil, expiry.String()) - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return &porcelain.MinerSetPriceResponse{ - MinerAddr: out.MinerAddress, - Price: out.Price, - }, nil -} - -// MinerSetWorker runs the `miner set-worker` command against the filecoin process -func (f *Filecoin) MinerSetWorker(ctx context.Context, newAddr address.Address, options ...ActionOption) (cid.Cid, error) { - var out cid.Cid - - args := []string{"go-filecoin", "miner", "set-worker", newAddr.String()} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return out, err - } - return out, nil -} diff --git a/tools/fast/action_miner_test.go b/tools/fast/action_miner_test.go deleted file mode 100644 index f55b9a2bd9..0000000000 --- a/tools/fast/action_miner_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package fast_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" -) - -func TestFilecoin_MinerPower(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("not working") - - ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fast.FilecoinOpts{}) - defer func() { - require.NoError(t, env.Teardown(ctx)) - }() - - expectedGenesisPower := uint64(131072) - assertPowerOutput(ctx, t, env.GenesisMiner, expectedGenesisPower, expectedGenesisPower) - - // TODO 3642 this test should check that miner's created with miner create have power -} - -func requireGetMinerAddress(ctx context.Context, t *testing.T, daemon *fast.Filecoin) address.Address { - var minerAddress address.Address - err := daemon.ConfigGet(ctx, "mining.minerAddress", &minerAddress) - require.NoError(t, err) - return minerAddress -} - -func assertPowerOutput(ctx context.Context, t *testing.T, d *fast.Filecoin, expMinerPwr, expTotalPwr uint64) { - minerAddr := requireGetMinerAddress(ctx, t, d) - status, err := d.MinerStatus(ctx, minerAddr) - require.NoError(t, err) - assert.Equal(t, expMinerPwr, status.QualityAdjustedPower.Uint64(), "for miner power") - assert.Equal(t, expTotalPwr, status.NetworkQualityAdjustedPower.Uint64(), "for total power") -} diff --git a/tools/fast/action_mining.go b/tools/fast/action_mining.go deleted file mode 100644 index 681d11fa14..0000000000 --- a/tools/fast/action_mining.go +++ /dev/null @@ -1,99 +0,0 @@ -package fast - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-address" - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" -) - -// MiningOnce runs the `mining once` command against the filecoin process -func (f *Filecoin) MiningOnce(ctx context.Context) (*block.Block, error) { - var out block.Block - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "mining", "once"); err != nil { - return nil, err - } - - return &out, nil -} - -// MiningSetup prepares the node to receive storage deals -func (f *Filecoin) MiningSetup(ctx context.Context) error { - out, err := f.RunCmdWithStdin(ctx, nil, "go-filecoin", "mining", "setup") - if err != nil { - return err - } - - if out.ExitCode() > 0 { - return fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - return nil -} - -// MiningStart runs the `mining Start` command against the filecoin process -func (f *Filecoin) MiningStart(ctx context.Context) error { - out, err := f.RunCmdWithStdin(ctx, nil, "go-filecoin", "mining", "start") - if err != nil { - return err - } - - if out.ExitCode() > 0 { - return fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - return nil -} - -// MiningStop runs the `mining stop` command against the filecoin process -func (f *Filecoin) MiningStop(ctx context.Context) error { - out, err := f.RunCmdWithStdin(ctx, nil, "go-filecoin", "mining", "stop") - if err != nil { - return err - } - - if out.ExitCode() > 0 { - return fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - return nil -} - -// MiningAddress runs the `mining address` command against the filecoin process -func (f *Filecoin) MiningAddress(ctx context.Context) (address.Address, error) { - var out address.Address - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "mining", "address"); err != nil { - return address.Undef, err - } - - return out, nil -} - -// MiningStatus runs the `mining status` command against the filecoin process -func (f *Filecoin) MiningStatus(ctx context.Context) (commands.MiningStatusResult, error) { - var out commands.MiningStatusResult - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "mining", "status"); err != nil { - return commands.MiningStatusResult{}, err - } - - return out, nil -} - -// SealNow seals any staged sectors -func (f *Filecoin) SealNow(ctx context.Context) error { - out, err := f.RunCmdWithStdin(ctx, nil, "go-filecoin", "mining", "seal-now") - if err != nil { - return err - } - - if out.ExitCode() > 0 { - return fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - return nil -} diff --git a/tools/fast/action_mpool.go b/tools/fast/action_mpool.go deleted file mode 100644 index 0f9924c759..0000000000 --- a/tools/fast/action_mpool.go +++ /dev/null @@ -1,24 +0,0 @@ -package fast - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// MpoolLs runs the `mpool ls` command against the filecoin process. -func (f *Filecoin) MpoolLs(ctx context.Context, options ...ActionOption) ([]*types.SignedMessage, error) { - var out []*types.SignedMessage - - args := []string{"go-filecoin", "mpool", "ls"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return []*types.SignedMessage{}, err - } - - return out, nil -} diff --git a/tools/fast/action_ping.go b/tools/fast/action_ping.go deleted file mode 100644 index 9870ecb311..0000000000 --- a/tools/fast/action_ping.go +++ /dev/null @@ -1,45 +0,0 @@ -package fast - -import ( - "context" - "io" - - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" -) - -// Ping runs the `ping` command against the filecoin process -func (f *Filecoin) Ping(ctx context.Context, pid peer.ID, options ...ActionOption) ([]commands.PingResult, error) { - sPid := pid.Pretty() - - args := []string{"go-filecoin", "ping"} - - for _, option := range options { - args = append(args, option()...) - } - - args = append(args, sPid) - - decoder, err := f.RunCmdLDJSONWithStdin(ctx, nil, args...) - if err != nil { - return nil, err - } - - var out []commands.PingResult - - for { - var result commands.PingResult - if err := decoder.Decode(&result); err != nil { - if err == io.EOF { - break - } - - return []commands.PingResult{}, err - } - - out = append(out, result) - } - - return out, nil -} diff --git a/tools/fast/action_protocol.go b/tools/fast/action_protocol.go deleted file mode 100644 index 060e93555d..0000000000 --- a/tools/fast/action_protocol.go +++ /dev/null @@ -1,18 +0,0 @@ -package fast - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" -) - -// Protocol runs the `protocol` command against the filecoin process -func (f *Filecoin) Protocol(ctx context.Context) (*porcelain.ProtocolParams, error) { - var out porcelain.ProtocolParams - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "protocol"); err != nil { - return nil, err - } - - return &out, nil -} diff --git a/tools/fast/action_retrieval_client.go b/tools/fast/action_retrieval_client.go deleted file mode 100644 index 5eb4c29523..0000000000 --- a/tools/fast/action_retrieval_client.go +++ /dev/null @@ -1,22 +0,0 @@ -package fast - -import ( - "context" - "fmt" - "io" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" -) - -// RetrievalClientRetrievePiece runs the retrieval-client retrieve-piece commands against the filecoin process. -func (f *Filecoin) RetrievalClientRetrievePiece(ctx context.Context, pieceCID cid.Cid, minerAddr address.Address) (io.ReadCloser, error) { - out, err := f.RunCmdWithStdin(ctx, nil, "go-filecoin", "retrieval-client", "retrieve-piece", minerAddr.String(), pieceCID.String()) - if err != nil { - return nil, err - } - if out.ExitCode() > 0 { - return nil, fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - return out.Stdout(), nil -} diff --git a/tools/fast/action_show.go b/tools/fast/action_show.go deleted file mode 100644 index 4d722def50..0000000000 --- a/tools/fast/action_show.go +++ /dev/null @@ -1,50 +0,0 @@ -package fast - -import ( - "context" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" -) - -// ShowHeader runs the `show header` command against the filecoin process -func (f *Filecoin) ShowHeader(ctx context.Context, ref cid.Cid) (*block.Block, error) { - var out block.Block - - sRef := ref.String() - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "show", "header", sRef); err != nil { - return nil, err - } - - return &out, nil -} - -// ShowMessages runs the `show messages` command against the filecoin process -func (f *Filecoin) ShowMessages(ctx context.Context, ref cid.Cid) ([]*types.SignedMessage, error) { - var out []*types.SignedMessage - - sRef := ref.String() - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "show", "messages", sRef); err != nil { - return nil, err - } - - return out, nil -} - -// ShowReceipts runs the `show receipts` command against the filecoin process -func (f *Filecoin) ShowReceipts(ctx context.Context, ref cid.Cid) ([]vm.MessageReceipt, error) { - var out []vm.MessageReceipt - - sRef := ref.String() - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, "go-filecoin", "show", "receipts", sRef); err != nil { - return nil, err - } - - return out, nil -} diff --git a/tools/fast/action_swarm.go b/tools/fast/action_swarm.go deleted file mode 100644 index 44880df128..0000000000 --- a/tools/fast/action_swarm.go +++ /dev/null @@ -1,44 +0,0 @@ -package fast - -import ( - "context" - - "github.com/libp2p/go-libp2p-core/peer" - "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/go-filecoin/internal/pkg/net" -) - -// SwarmConnect runs the `swarm connect` command against the filecoin process -func (f *Filecoin) SwarmConnect(ctx context.Context, addrs ...multiaddr.Multiaddr) (peer.ID, error) { - var out peer.ID - - args := []string{"go-filecoin", "swarm", "connect"} - - for _, addr := range addrs { - args = append(args, addr.String()) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return peer.ID(""), err - } - - return out, nil -} - -// SwarmPeers runs the `swarm peers` command against the filecoin process -func (f *Filecoin) SwarmPeers(ctx context.Context, options ...ActionOption) ([]net.SwarmConnInfo, error) { - var out net.SwarmConnInfos - - args := []string{"go-filecoin", "swarm", "peers"} - - for _, option := range options { - args = append(args, option()...) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &out, args...); err != nil { - return nil, err - } - - return out.Peers, nil -} diff --git a/tools/fast/action_wallet.go b/tools/fast/action_wallet.go deleted file mode 100644 index 8f7b374ec4..0000000000 --- a/tools/fast/action_wallet.go +++ /dev/null @@ -1,51 +0,0 @@ -package fast - -import ( - "context" - "strings" - - "github.com/filecoin-project/go-address" - files "github.com/ipfs/go-ipfs-files" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" -) - -// WalletBalance run the wallet balance command against the filecoin process. -func (f *Filecoin) WalletBalance(ctx context.Context, addr address.Address) (types.AttoFIL, error) { - var balance types.AttoFIL - if err := f.RunCmdJSONWithStdin(ctx, nil, &balance, "go-filecoin", "wallet", "balance", addr.String()); err != nil { - return types.ZeroAttoFIL, err - } - return balance, nil -} - -// WalletImport run the wallet import command against the filecoin process. -func (f *Filecoin) WalletImport(ctx context.Context, file files.File) ([]address.Address, error) { - // the command returns an AddressListResult - var alr commands.AddressLsResult - if err := f.RunCmdJSONWithStdin(ctx, file, &alr, "go-filecoin", "wallet", "import"); err != nil { - return nil, err - } - return alr.Addresses, nil -} - -// WalletExport run the wallet export command against the filecoin process. -func (f *Filecoin) WalletExport(ctx context.Context, addrs []address.Address) ([]*crypto.KeyInfo, error) { - // the command returns an KeyInfoListResult - var klr commands.WalletSerializeResult - // we expect to interact with an array of KeyInfo(s) - var out []*crypto.KeyInfo - var sAddrs []string - for _, a := range addrs { - sAddrs = append(sAddrs, a.String()) - } - - if err := f.RunCmdJSONWithStdin(ctx, nil, &klr, "go-filecoin", "wallet", "export", strings.Join(sAddrs, " ")); err != nil { - return nil, err - } - - // transform the KeyInfoListResult to an array of KeyInfo(s) - return append(out, klr.KeyInfo...), nil -} diff --git a/tools/fast/bin/localnet/README.md b/tools/fast/bin/localnet/README.md deleted file mode 100644 index 99a50cc444..0000000000 --- a/tools/fast/bin/localnet/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# localnet - -localnet is a FAST binary tool that quickly and easily, sets up a local network -on the users computer. The network will stay standing till the program is closed. - -### Example - -``` -localnet -shell -``` - -### Building - -The localnet tool expects that you can already build `go-filecoin`. Please refer -to the README in the root of this project for details. - -localnet is only compatible with `go-filecoin` binaries built from the same git ref. - -``` -go build -o localnet main.go -``` - -### Usage - -``` -Usage of ./localnet: - -binpath go-filecoin - set the binary used when executing go-filecoin commands - -blocktime duration - duration for blocktime (default 5s) - -miner-collateral string - amount of fil each miner will use for collateral (default "500") - -miner-count int - number of miners (default 5) - -miner-expiry string - expiry value used when creating ask for miners (default "86400") - -miner-price string - price value used when creating ask for miners (default "0.0000000010") - -shell - setup a filecoin client node and enter into a shell ready to use - -small-sectors - enables small sectors (default true) - -workdir string - set the working directory used to store filecoin repos -``` - -### Addional notes from the author - -The default settings are pretty close to what the devnets run. The tool defaults -to small sectors, but that can be changed by passing `-small-sectors=false`. To -make it a bit easier to use, there is also a `-shell` flag that can be passed -which will drop the user into a shell with a go-filecoin daemon already running -and ready to be used with `go-filecoin`. - -_Note: Using regular sized sectors with localnet can be incredibly taxing on a -system and should probably be avoided on laptops due to the number of miners -running. The overall miner count can be reduced from the default `5` by passing -the `-miner-count` flag._ - -``` -localnet $ ./localnet -small-sectors=false -shell -``` - -_I ran `./localnet -small-sectors=false -miner-count=2` on my laptop ( i7-8550U -CPU @ 1.80GHz / 16GB Ram) and it took just under 40 minutes, the equivalent with -small sectors took 2 minutes._ - -A few helpful things to note when working with localnet -1. All nodes and filecoin repositories will be cleaned up if the program exits. - The program will not exit till it receives a `SIGTERM` (ctrl-c) -2. Every command that is ran will be printed to the output, along with which node - ran it. The nodes repository is also printed first. - - `08:51:48.014 INFO /tmp/local: RunCmd: /tmp/localnet417209521/0 [go-filecoin ...` -3. The stdout and stderr are written to disk under the repository directory - - **stderr** `/tmp/localnet417209521/0/daemon.stderr` - - **stdout** `/tmp/localnet417209521/0/daemon.stdout` -4. The localnet tool will copy the `go-filecoin` binary specifed by `binpath` and - place it in a `bin` directory under each nodes repository which is used to execute - all commands. To ensure binary compatibility, it's best to execute this same binary -5. You can run commands against any of the nodes by using the `-repodir` flag with - the go-filecoin binary - - `/tmp/localnet417209521/0/bin/go-filecoin -repodir=/tmp/localnet417209521/0 id` - diff --git a/tools/fast/bin/localnet/main.go b/tools/fast/bin/localnet/main.go deleted file mode 100644 index f0e33b07af..0000000000 --- a/tools/fast/bin/localnet/main.go +++ /dev/null @@ -1,444 +0,0 @@ -package main - -// localnet -// -// localnet is a FAST binary tool that quickly and easily, sets up a local network -// on the users computer. The network will stay standing till the program is closed. - -import ( - "bytes" - "context" - "crypto/rand" - flg "flag" - "fmt" - "io" - "io/ioutil" - "math/big" - "os" - "os/exec" - "os/signal" - "path/filepath" - "runtime/debug" - "syscall" - "time" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" - files "github.com/ipfs/go-ipfs-files" - logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/environment" - "github.com/filecoin-project/go-filecoin/tools/fast/series" - lpfc "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" -) - -var ( - workdir string - binpath string - shell bool - blocktime = 5 * time.Second - err error - fil = 100000 - balance big.Int - smallSectors = true - minerCount = 5 - minerCollateral = big.NewInt(500) - minerPrice = big.NewFloat(0.000000001) - minerExpiry = big.NewInt(24 * 60 * 60) - - exitcode int - - flag = flg.NewFlagSet(os.Args[0], flg.ExitOnError) -) - -func init() { - - logging.SetDebugLogging() - - var ( - err error - minerCollateralArg = minerCollateral.Text(10) - minerPriceArg = minerPrice.Text('f', 10) - minerExpiryArg = minerExpiry.Text(10) - ) - - // We default to the binary built in the project directory, fallback - // to searching path. - binpath, err = getFilecoinBinary() - if err != nil { - // Look for `go-filecoin` in the path to set `binpath` default - // If the binary is not found, an error will be returned. If the - // error is ErrNotFound we ignore it. - // Error is handled after flag parsing so help can be shown without - // erroring first - binpath, err = exec.LookPath("go-filecoin") - if err != nil { - xerr, ok := err.(*exec.Error) - if ok && xerr.Err == exec.ErrNotFound { - err = nil - } - } - } - - flag.StringVar(&workdir, "workdir", workdir, "set the working directory used to store filecoin repos") - flag.StringVar(&binpath, "binpath", binpath, "set the binary used when executing `go-filecoin` commands") - flag.BoolVar(&shell, "shell", shell, "setup a filecoin client node and enter into a shell ready to use") - flag.BoolVar(&smallSectors, "small-sectors", smallSectors, "enables small sectors") - flag.DurationVar(&blocktime, "blocktime", blocktime, "duration for blocktime") - flag.IntVar(&minerCount, "miner-count", minerCount, "number of miners") - flag.StringVar(&minerCollateralArg, "miner-collateral", minerCollateralArg, "amount of fil each miner will use for collateral") - flag.StringVar(&minerPriceArg, "miner-price", minerPriceArg, "price value used when creating ask for miners") - flag.StringVar(&minerExpiryArg, "miner-expiry", minerExpiryArg, "expiry value used when creating ask for miners") - - // ExitOnError is set - flag.Parse(os.Args[1:]) // nolint: errcheck - - // If we failed to find `go-filecoin` and it was not set, handle the error - if len(binpath) == 0 { - msg := "failed when checking for `go-filecoin` binary;" - if err == nil { - err = fmt.Errorf("no binary provided or found") - msg = "please install or build `go-filecoin`;" - } - - handleError(err, msg) - os.Exit(1) - } - - _, ok := minerCollateral.SetString(minerCollateralArg, 10) - if !ok { - handleError(fmt.Errorf("could not parse miner-collateral")) - os.Exit(1) - } - - _, ok = minerPrice.SetString(minerPriceArg) - if !ok { - handleError(fmt.Errorf("could not parse miner-price")) - os.Exit(1) - } - - _, ok = minerExpiry.SetString(minerExpiryArg, 10) - if !ok { - handleError(fmt.Errorf("could not parse miner-expiry")) - os.Exit(1) - } - - // Set the initial balance - balance.SetInt64(int64(100 * fil)) -} - -func main() { - ctx, cancel := context.WithCancel(context.Background()) - - exit := make(chan struct{}, 1) - - go func() { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt, syscall.SIGTERM) - <-signals - fmt.Println("Ctrl-C received, starting shutdown") - cancel() - exit <- struct{}{} - }() - - defer func() { - if r := recover(); r != nil { - fmt.Println("recovered from panic", r) - fmt.Println("stacktrace from panic: \n" + string(debug.Stack())) - exitcode = 1 - } - os.Exit(exitcode) - }() - - if len(workdir) == 0 { - workdir, err = ioutil.TempDir("", "localnet") - if err != nil { - exitcode = handleError(err) - return - } - } - - if ok, err := isEmpty(workdir); !ok { - if err == nil { - err = fmt.Errorf("workdir is not empty: %s", workdir) - } - - exitcode = handleError(err, "fail when checking workdir;") - return - } - - env, err := environment.NewMemoryGenesis(&balance, workdir) - if err != nil { - exitcode = handleError(err) - return - } - - // Defer the teardown, this will shuteverything down for us - defer env.Teardown(ctx) // nolint: errcheck - - // Setup localfilecoin plugin options - options := make(map[string]string) - options[lpfc.AttrLogJSON] = "0" // Disable JSON logs - options[lpfc.AttrLogLevel] = "4" // Set log level to Info - options[lpfc.AttrFilecoinBinary] = binpath // Use the repo binary - - genesisURI := env.GenesisCar() - genesisMiner, err := env.GenesisMiner() - if err != nil { - exitcode = handleError(err, "failed to retrieve miner information from genesis;") - return - } - - fastenvOpts := fast.FilecoinOpts{ - InitOpts: []fast.ProcessInitOption{fast.POGenesisFile(genesisURI)}, - DaemonOpts: []fast.ProcessDaemonOption{fast.POBlockTime(blocktime)}, - } - - ctx = series.SetCtxSleepDelay(ctx, blocktime) - - // The genesis process is the filecoin node that loads the miner that is - // define with power in the genesis block, and the prefunnded wallet - genesis, err := env.NewProcess(ctx, lpfc.PluginName, options, fastenvOpts) - if err != nil { - exitcode = handleError(err, "failed to create genesis process;") - return - } - - err = series.SetupGenesisNode(ctx, genesis, genesisMiner.Address, files.NewReaderFile(genesisMiner.Owner)) - if err != nil { - exitcode = handleError(err, "failed series.SetupGenesisNode;") - return - } - - if err := genesis.MiningStart(ctx); err != nil { - exitcode = handleError(err, "failed to start mining on genesis node;") - return - } - - // Create the processes that we will use to become miners - var miners []*fast.Filecoin - for i := 0; i < minerCount; i++ { - miner, err := env.NewProcess(ctx, lpfc.PluginName, options, fastenvOpts) - if err != nil { - exitcode = handleError(err, "failed to create miner process;") - return - } - - miners = append(miners, miner) - } - - // We will now go through the process of creating miners - // InitAndStart - // 1. Initialize node - // 2. Start daemon - // - // Connect - // 3. Connect to genesis - // - // SendFilecoinDefaults - // 4. Issue FIL to node - // - // CreateStorageMinerWithAsk - // 5. Create a new miner - // 6. Set the miner price, and get ask - // - // ImportAndStore - // 7. Generated some random data and import it to genesis - // 8. Genesis proposes a storage deal with miner - // - // WaitForDealState - // 9. Query deal till complete - - var deals []*network.Response - - for _, miner := range miners { - err = series.InitAndStart(ctx, miner) - if err != nil { - exitcode = handleError(err, "failed series.InitAndStart;") - return - } - - err = series.Connect(ctx, genesis, miner) - if err != nil { - exitcode = handleError(err, "failed series.Connect;") - return - } - - err = series.SendFilecoinDefaults(ctx, genesis, miner, fil) - if err != nil { - exitcode = handleError(err, "failed series.SendFilecoinDefaults;") - return - } - - pparams, err := miner.Protocol(ctx) - if err != nil { - exitcode = handleError(err, "failed to get protocol;") - return - } - - sinfo := pparams.SupportedSectors[0] - - ask, err := series.CreateStorageMinerWithAsk(ctx, miner, minerCollateral, minerPrice, minerExpiry, sinfo.Size) - if err != nil { - exitcode = handleError(err, "failed series.CreateStorageMinerWithAsk;") - return - } - - if err := miner.MiningStart(ctx); err != nil { - exitcode = handleError(err, "failed miner.MiningStart;") - return - } - - var data bytes.Buffer - dataReader := io.LimitReader(rand.Reader, int64(sinfo.MaxPieceSize)) - dataReader = io.TeeReader(dataReader, &data) - _, deal, err := series.ImportAndStore(ctx, genesis, ask, files.NewReaderFile(dataReader)) - if err != nil { - exitcode = handleError(err, "failed series.ImportAndStore;") - return - } - - deals = append(deals, deal) - } - - for _, deal := range deals { - _, err = series.WaitForDealState(ctx, genesis, deal, storagemarket.StorageDealActive) - if err != nil { - exitcode = handleError(err, "failed series.WaitForDealState;") - return - } - } - - if shell { - client, err := env.NewProcess(ctx, lpfc.PluginName, options, fastenvOpts) - if err != nil { - exitcode = handleError(err, "failed to create client process;") - return - } - - err = series.InitAndStart(ctx, client) - if err != nil { - exitcode = handleError(err, "failed series.InitAndStart;") - return - } - - err = series.Connect(ctx, genesis, client) - if err != nil { - exitcode = handleError(err, "failed series.Connect;") - return - } - - err = series.SendFilecoinDefaults(ctx, genesis, client, fil) - if err != nil { - exitcode = handleError(err, "failed series.SendFilecoinDefaults;") - return - } - - interval, err := client.StartLogCapture() - if err != nil { - exitcode = handleError(err, "failed to start log capture;") - return - } - - if err := client.Shell(); err != nil { - exitcode = handleError(err, "failed to run client shell;") - return - } - - interval.Stop() - fmt.Println("===================================") - fmt.Println("===================================") - io.Copy(os.Stdout, interval) // nolint: errcheck - fmt.Println("===================================") - fmt.Println("===================================") - } - - fmt.Println("Finished!") - var nodeDetails []*commands.IDDetails - nodes := env.Processes() - for _, node := range nodes { - details, err := node.ID(ctx) - if err != nil { - exitcode = handleError(err, "failed to fetch details of node") - return - } - - nodeDetails = append(nodeDetails, details) - } - - fmt.Printf("Genesis %s\n", genesisURI) - for i, details := range nodeDetails { - for _, addr := range details.Addresses { - fmt.Printf("node %d addr: %s\n", i, addr) - } - } - - fmt.Println("Ctrl-C to exit") - - <-exit -} - -func handleError(err error, msg ...string) int { - if err == nil { - return 0 - } - - if len(msg) != 0 { - fmt.Println(msg[0], err) - } else { - fmt.Println(err) - } - - return 1 -} - -// https://stackoverflow.com/a/30708914 -func isEmpty(name string) (bool, error) { - f, err := os.Open(name) - if err != nil { - return false, err - } - defer f.Close() // nolint: errcheck - - _, err = f.Readdirnames(1) // Or f.Readdir(1) - if err == io.EOF { - return true, nil - } - return false, err // Either not empty or error, suits both cases -} - -func getFilecoinBinary() (string, error) { - gopath, err := getGoPath() - if err != nil { - return "", err - } - - bin := filepath.Join(gopath, "/src/github.com/filecoin-project/go-filecoin/go-filecoin") - _, err = os.Stat(bin) - if err != nil { - return "", err - } - - if os.IsNotExist(err) { - return "", err - } - - return bin, nil -} - -func getGoPath() (string, error) { - gp := os.Getenv("GOPATH") - if gp != "" { - return gp, nil - } - - home, err := homedir.Dir() - if err != nil { - return "", err - } - - return filepath.Join(home, "go"), nil -} diff --git a/tools/fast/docs/SHELL.md b/tools/fast/docs/SHELL.md deleted file mode 100644 index 1a87ccaba9..0000000000 --- a/tools/fast/docs/SHELL.md +++ /dev/null @@ -1,83 +0,0 @@ -### The FAST Shell - -FAST has a [`Shell`](https://godoc.org/github.com/filecoin-project/go-filecoin/tools/fast#Filecoin.Shell) which can be used to start the users shell with the environment setup to run `go-filecoin` commands against the process it is called on. - -The exact environment the shell will have is largely dependent on the plugin use are using. -Generally, this will be the Filecoin localplugin when writing tests using FAST. - -The Filecoin localplugin shell environment will have the following variables set in the shell for the user. - -| Name | Description | -|:---|:---| -| `FIL_PATH` | The value is set to the repository directory for the Filecoin node. Any `go-filecoin` commands ran in the shell will be executed against the Filecoin process for which the Shell method was invoked on in FAST. | -| `FIL_PID` | The value is set to the PID for the Filecoin daemon. | -| `FIL_BINARY` | The value is set to the binary running the Filecoin daemon. Please refer to the section below on `PATH` for more details. | -| `PATH` | The users `PATH` will be updated to include a location that contains the binary used for executing all `go-filecoin` commands. The `go-filecoin` binary included in this location itself is defined by either the value of the `localplugin.AttrFilecoinBinary`, or the first `go-filecoin` binary found in the users `PATH`.

_Note: The value of `FIL_BINARY` will not be the exact value. During node setup, the binary is copied to ensure it does not change during execution. `FIL_BINARY` will be this new path._ | - -The information around the `PATH` seems a little complex, but it's to ensure that there are no issues as a result of mixing binaries. -This has the advantage that while using FAST, users can re-compile `go-filecoin` without affecting constructed nodes. -It should be noted that the copying of the binary occurs during the call to `NewProcess`. - -It should also be noted that users shell configuration will be ran when the shell opens. -If shell configuration updates the `PATH` by appending to the front, if any of those directories contain `go-filecoin`, then the `go-filecoin` command inside of the FAST Shell will **not** point to the correct binary, because of this, it is best to actually execute commands using the `$FIL_BINARY` variable (eg: `$FIL_BINARY id`). - -### Using a FAST Shell in _go test_ - -The FAST Shell should only be used when running a single test. - -_Note: When using the FAST Shell, it's best to increase deadlines set for tests, as it's very easy to exceed them, and you will be kicked out of your shell._ -```diff -- ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second)) -+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Day)) -``` - -When using FAST inside of go tests, `stdin` is not properly configured to the users shell. -This results in the shell exiting immediately, with no error. -To resolve this issue the Filecoin localplugin will attach the fd defined by the environment variables `TTY` to the shell it opens. -This will allows the user to define which TTY to use for stdin. Generally this should be the same TTY of the shell `go test` is ran in. -This can easily be set by using the `tty` program. - -```shell -$ env TTY=$(tty) go test -v -run TestFilecoin -``` - -Test execution can be paused, much like a breakpoint, by dropping a call to `node.Shell()` anywhere _after_ the daemon has been started. -It is used to wrap the call of in a `require.NoError` which will ensure the test fails quickly if the expected shell has an issue starting. -You can also kill the test by exiting the shell with `exit 1` as the shell will return a non-zero exit code as an error. - -```go -require.NoError(node.Shell()) -``` - -While in the shell, no daemons are paused, but further test code execution is paused till the shell exits. - -#### FAST Shell use cases - -##### Attaching a debugger to the node - -The shell environment provides the daemon pid and binary through `FIL_PID` and `FIL_BINARY` respectfully. -A debugger, such as `dlv`, can be attached using these values. - -```shell -$ dlv $FIL_PID $FIL_BINARY -``` - -You won't be able to continue the test if the debugger is attached in the shell, but the values can be easily printed, and the debugger opened in a new shell. -Once you have everything setup and are ready to continue the test, simply exit the shell and the test will continue. - -##### Capturing daemon logs produced during shell use. - -Tests can be used to get nodes into certain state. -You may want to then use a shell to execute additional commands to debug an issue. -You may have added additional logging you want to look at, or look at existing logging that will be produced by commands you run. - -FAST Provides a [`StartLogCapture`](https://godoc.org/github.com/filecoin-project/go-filecoin/tools/fast#Filecoin.StartLogCapture) which will capture all output written to the daemons `stderr` until `Stop` is called. -The captured logs are stored in the return value of `StartLogCapture`, which can be copied to any `io.Writer`. - -```go -interval, err := node.StartLogCapture() -require.NoError(err) -require.NoError(node.Shell()) -interval.Stop() -io.Copy(os.Stdout, interval) -``` diff --git a/tools/fast/environment/environment.go b/tools/fast/environment/environment.go deleted file mode 100644 index db7a15c54a..0000000000 --- a/tools/fast/environment/environment.go +++ /dev/null @@ -1,68 +0,0 @@ -package environment - -import ( - "context" - "errors" - "io" - - "github.com/filecoin-project/go-address" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// ErrNoGenesisMiner is returned by GenesisMiner if the environment does not -// support providing a genesis miner. -var ErrNoGenesisMiner = errors.New("GenesisMiner not supported") - -// GenesisMiner contains the required information to setup a node as a genesis -// node. -type GenesisMiner struct { - // Address is the address of the miner on chain - Address address.Address - - // Owner is the private key of the wallet which is assoiated with the miner - Owner io.Reader -} - -// Environment defines the interface common among all environments that the -// FAST lib can work across. It helps smooth out the differences by providing -// a common ground to work from -type Environment interface { - // GenesisCar returns a location to the genesis.car file. This can be - // either an absolute path to a file on disk, or more commonly an http(s) - // url. - GenesisCar() string - - // GenesisMiner returns a structure which contains all the required - // information to load the existing miner that is defined in the - // genesis block. An ErrNoGenesisMiner may be returned if the environment - // does not support providing genesis miner information. - GenesisMiner() (*GenesisMiner, error) - - // Log returns a logger for the environment - Log() logging.EventLogger - - // NewProcess makes a new process for the environment. This doesn't - // always mean a new filecoin node though, NewProcess for some - // environments may create a Filecoin process that interacts with - // an already running filecoin node, and supplied the API multiaddr - // as options. - NewProcess(ctx context.Context, processType string, options map[string]string, eo fast.FilecoinOpts) (*fast.Filecoin, error) - - // Processes returns a slice of all processes the environment knows - // about. - Processes() []*fast.Filecoin - - // Teardown runs anything that the environment may need to do to - // be nice to the the execution area of this code. - Teardown(context.Context) error - - // TeardownProcess runs anything that the environment may need to do - // to remove a process from the environment in a clean way. - TeardownProcess(context.Context, *fast.Filecoin) error - - // GetFunds retrieves a fixed amount of tokens from the environment to the - // Filecoin processes default wallet address. - GetFunds(context.Context, *fast.Filecoin) error -} diff --git a/tools/fast/environment/environment_devnet.go b/tools/fast/environment/environment_devnet.go deleted file mode 100644 index fdca661c3d..0000000000 --- a/tools/fast/environment/environment_devnet.go +++ /dev/null @@ -1,219 +0,0 @@ -package environment - -// The devnet FAST environment provides an environment for using FAST with the deployed kittyhawk -// devnet infrasturture run by the Filecoin development team. It can be used to setup and manage nodes -// connected to either the nightly, test, or user devnets for running automation with the FAST library. - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "sync" - - "github.com/filecoin-project/go-address" - cid "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - iptb "github.com/ipfs/iptb/testbed" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// Devnet is a FAST lib environment that is meant to be used -// when working with kittyhawk devnets run by the Filecoin development team. -type Devnet struct { - config DevnetConfig - location string - - log logging.EventLogger - - processesMu sync.Mutex - processes []*fast.Filecoin - - processCountMu sync.Mutex - processCount int -} - -// DevnetConfig describes the dynamic resources of a network -type DevnetConfig struct { - // Name is the string value which can be used to configure bootstrap peers during init - Name string - - // GenesisLocation provides where the genesis.car for the network can be fetched from - GenesisLocation string - - // FaucetTap is the URL which can be used to request funds to a wallet - FaucetTap string -} - -// FindDevnetConfigByName returns a devnet configuration by looking it up by name -func FindDevnetConfigByName(name string) (DevnetConfig, error) { - if config, ok := devnetConfigs[name]; ok { - return config, nil - } - - return DevnetConfig{}, fmt.Errorf("failed to look up config for network %s", name) -} - -// NewDevnet builds an environment that uses deployed infrastructure to -// the kittyhawk devnets. -func NewDevnet(config DevnetConfig, location string) (Environment, error) { - env := &Devnet{ - config: config, - location: location, - log: logging.Logger("environment"), - } - - if err := os.MkdirAll(env.location, 0775); err != nil { - return nil, err - } - - return env, nil -} - -// GenesisCar provides a url where the genesis file can be fetched from -func (e *Devnet) GenesisCar() string { - return e.config.GenesisLocation -} - -// GenesisMiner returns a ErrNoGenesisMiner for this environment -func (e *Devnet) GenesisMiner() (*GenesisMiner, error) { - return nil, ErrNoGenesisMiner -} - -// Log returns the logger for the environment. -func (e *Devnet) Log() logging.EventLogger { - return e.log -} - -// NewProcess builds a iptb process of the given type and options passed. The -// process is tracked by the environment and returned. -func (e *Devnet) NewProcess(ctx context.Context, processType string, options map[string]string, eo fast.FilecoinOpts) (*fast.Filecoin, error) { - e.processesMu.Lock() - defer e.processesMu.Unlock() - - e.processCountMu.Lock() - defer e.processCountMu.Unlock() - - ns := iptb.NodeSpec{ - Type: processType, - Dir: fmt.Sprintf("%s/%d", e.location, e.processCount), - Attrs: options, - } - e.processCount = e.processCount + 1 - - e.log.Infof("New Process type: %s, dir: %s", processType, ns.Dir) - - if err := os.MkdirAll(ns.Dir, 0775); err != nil { - return nil, err - } - - c, err := ns.Load() - if err != nil { - return nil, err - } - - // We require a slightly more extended core interface - fc, ok := c.(fast.IPTBCoreExt) - if !ok { - return nil, fmt.Errorf("%s does not implement the extended IPTB.Core interface IPTBCoreExt", processType) - } - - p := fast.NewFilecoinProcess(ctx, fc, eo) - e.processes = append(e.processes, p) - return p, nil -} - -// Processes returns all processes the environment knows about. -func (e *Devnet) Processes() []*fast.Filecoin { - e.processesMu.Lock() - defer e.processesMu.Unlock() - return e.processes[:] -} - -// Teardown stops all of the nodes and cleans up the environment. -func (e *Devnet) Teardown(ctx context.Context) error { - e.processesMu.Lock() - defer e.processesMu.Unlock() - - e.log.Info("Teardown environment") - for _, p := range e.processes { - if err := p.StopDaemon(ctx); err != nil { - return err - } - } - - return os.RemoveAll(e.location) -} - -// TeardownProcess stops the running process and removes it from the -// environment. -func (e *Devnet) TeardownProcess(ctx context.Context, p *fast.Filecoin) error { - e.processesMu.Lock() - defer e.processesMu.Unlock() - - e.log.Infof("Teardown process: %s", p.String()) - if err := p.StopDaemon(ctx); err != nil { - return err - } - - for i, n := range e.processes { - if n == p { - e.processes = append(e.processes[:i], e.processes[i+1:]...) - break - } - } - - // remove the provess from the process list - return os.RemoveAll(p.Dir()) -} - -// GetFunds retrieves a fixed amount of tokens from the environment to the -// Filecoin processes default wallet address. -// GetFunds will send a request to the Faucet, the amount of tokens returned and -// number of requests permitted is determined by the Faucet configuration. -func (e *Devnet) GetFunds(ctx context.Context, p *fast.Filecoin) error { - e.processesMu.Lock() - defer e.processesMu.Unlock() - - e.log.Infof("GetFunds for process: %s", p.String()) - var toAddr address.Address - if err := p.ConfigGet(ctx, "wallet.defaultAddress", &toAddr); err != nil { - return err - } - - data := url.Values{} - data.Set("target", toAddr.String()) - - resp, err := http.PostForm(e.config.FaucetTap, data) - if err != nil { - return err - } - - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - switch resp.StatusCode { - case 200: - msgcid := resp.Header.Get("Message-Cid") - mcid, err := cid.Decode(msgcid) - if err != nil { - return err - } - - if _, err := p.MessageWait(ctx, mcid); err != nil { - return err - } - return nil - case 400: - return fmt.Errorf("Bad Request: %s", string(b)) - case 429: - return fmt.Errorf("Rate Limit: %s", string(b)) - default: - return fmt.Errorf("Unhandled Status: %s", resp.Status) - } -} diff --git a/tools/fast/environment/environment_devnet_config.go b/tools/fast/environment/environment_devnet_config.go deleted file mode 100644 index eb8a6c483a..0000000000 --- a/tools/fast/environment/environment_devnet_config.go +++ /dev/null @@ -1,19 +0,0 @@ -package environment - -var devnetConfigs = map[string]DevnetConfig{ - "user": { - Name: "user", - GenesisLocation: "https://genesis.user.kittyhawk.wtf/genesis.car", - FaucetTap: "https://faucet.user.kittyhawk.wtf/tap", - }, - "nightly": { - Name: "nightly", - GenesisLocation: "https://genesis.nightly.kittyhawk.wtf/genesis.car", - FaucetTap: "https://faucet.nightly.kittyhawk.wtf/tap", - }, - "staging": { - Name: "staging", - GenesisLocation: "https://genesis.staging.kittyhawk.wtf/genesis.car", - FaucetTap: "https://faucet.staging.kittyhawk.wtf/tap", - }, -} diff --git a/tools/fast/environment/environment_memory_genesis.go b/tools/fast/environment/environment_memory_genesis.go deleted file mode 100644 index dea10a1a72..0000000000 --- a/tools/fast/environment/environment_memory_genesis.go +++ /dev/null @@ -1,267 +0,0 @@ -package environment - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "math/big" - "net" - "net/http" - "net/url" - "os" - "sync" - - "github.com/filecoin-project/go-address" - logging "github.com/ipfs/go-log/v2" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/series" - gengen "github.com/filecoin-project/go-filecoin/tools/gengen/util" - - iptb "github.com/ipfs/iptb/testbed" -) - -// MemoryGenesis is a FAST lib environment that is meant to be used -// when working locally, on the same network / machine. It's great for writing -// functional tests! -type MemoryGenesis struct { - genesisCar []byte - genesisMinerOwner commands.WalletSerializeResult - genesisMinerAddr address.Address - - location string - - genesisServer *http.Server - genesisServerAddr string - - log logging.EventLogger - - processesMu sync.Mutex - processes []*fast.Filecoin - - processCountMu sync.Mutex - processCount int -} - -// NewMemoryGenesis builds an environment with a local genesis that can be used -// to initialize nodes and create a genesis node. The genesis file is provided by an http -// server. -func NewMemoryGenesis(funds *big.Int, location string) (Environment, error) { - env := &MemoryGenesis{ - location: location, - log: logging.Logger("environment"), - } - - if err := env.buildGenesis(funds); err != nil { - return nil, err - } - - if err := os.MkdirAll(env.location, 0775); err != nil { - return nil, err - } - - if err := env.startGenesisServer(); err != nil { - return nil, err - } - - return env, nil -} - -// GetFunds retrieves a fixed amount of tokens from the environment to the -// Filecoin processes default wallet address. -// GetFunds will cause the genesis node to send 1000 filecoin to process `p`. -func (e *MemoryGenesis) GetFunds(ctx context.Context, p *fast.Filecoin) error { - e.log.Infof("GetFunds for process: %s", p.String()) - return series.SendFilecoinDefaults(ctx, e.Processes()[0], p, 1000) -} - -// GenesisCar provides a url where the genesis file can be fetched from -func (e *MemoryGenesis) GenesisCar() string { - uri := url.URL{ - Host: e.genesisServerAddr, - Path: "genesis.car", - Scheme: "http", - } - - return uri.String() -} - -// GenesisMiner provides required information to create a genesis node and -// load the wallet. -func (e *MemoryGenesis) GenesisMiner() (*GenesisMiner, error) { - owner, err := json.Marshal(e.genesisMinerOwner) - if err != nil { - return nil, err - } - - return &GenesisMiner{ - Address: e.genesisMinerAddr, - Owner: bytes.NewBuffer(owner), - }, nil -} - -// Log returns the logger for the environment. -func (e *MemoryGenesis) Log() logging.EventLogger { - return e.log -} - -// NewProcess builds a iptb process of the given type and options passed. The -// process is tracked by the environment and returned. -func (e *MemoryGenesis) NewProcess(ctx context.Context, processType string, options map[string]string, eo fast.FilecoinOpts) (*fast.Filecoin, error) { - e.processesMu.Lock() - defer e.processesMu.Unlock() - - e.processCountMu.Lock() - defer e.processCountMu.Unlock() - - ns := iptb.NodeSpec{ - Type: processType, - Dir: fmt.Sprintf("%s/%d", e.location, e.processCount), - Attrs: options, - } - e.processCount = e.processCount + 1 - - e.log.Infof("New Process type: %s, dir: %s", processType, ns.Dir) - - if err := os.MkdirAll(ns.Dir, 0775); err != nil { - return nil, err - } - - c, err := ns.Load() - if err != nil { - return nil, err - } - - // We require a slightly more extended core interface - fc, ok := c.(fast.IPTBCoreExt) - if !ok { - return nil, fmt.Errorf("%s does not implement the extended IPTB.Core interface IPTBCoreExt", processType) - } - - p := fast.NewFilecoinProcess(ctx, fc, eo) - e.processes = append(e.processes, p) - return p, nil -} - -// Processes returns all processes the environment knows about. -func (e *MemoryGenesis) Processes() []*fast.Filecoin { - e.processesMu.Lock() - defer e.processesMu.Unlock() - return e.processes[:] -} - -// Teardown stops all of the nodes and cleans up the environment. -func (e *MemoryGenesis) Teardown(ctx context.Context) error { - e.processesMu.Lock() - defer e.processesMu.Unlock() - - e.log.Info("Teardown environment") - for _, p := range e.processes { - if err := p.StopDaemon(ctx); err != nil { - return err - } - } - - if err := e.genesisServer.Shutdown(ctx); err != nil { - return err - } - - return os.RemoveAll(e.location) -} - -// TeardownProcess stops the running process and removes it from the -// environment. -func (e *MemoryGenesis) TeardownProcess(ctx context.Context, p *fast.Filecoin) error { - e.processesMu.Lock() - defer e.processesMu.Unlock() - - e.log.Infof("Teardown process: %s", p.String()) - if err := p.StopDaemon(ctx); err != nil { - return err - } - - for i, n := range e.processes { - if n == p { - e.processes = append(e.processes[:i], e.processes[i+1:]...) - break - } - } - - // remove the provess from the process list - return os.RemoveAll(p.Dir()) -} - -// startGenesisServer builds and starts a server which will serve the genesis -// file, the url for the genesis.car is returned by GenesisCar() -func (e *MemoryGenesis) startGenesisServer() error { - handler := http.NewServeMux() - handler.HandleFunc("/genesis.car", func(w http.ResponseWriter, req *http.Request) { - car := bytes.NewBuffer(e.genesisCar) - if n, err := io.Copy(w, car); err != nil { - e.log.Errorf(`Failed to serve "/genesis.car" after writing %d bytes with error %s`, n, err) - } - }) - - e.genesisServer = &http.Server{Handler: handler} - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return err - } - - e.genesisServerAddr = ln.Addr().String() - - go func() { - if err := e.genesisServer.Serve(ln); err != nil && err != http.ErrServerClosed { - e.log.Errorf("Genesis file server: %s", err) - } - }() - - return nil -} - -// buildGenesis builds a genesis with the specified funds. -func (e *MemoryGenesis) buildGenesis(funds *big.Int) error { - commCfgs, err := gengen.MakeCommitCfgs(100) - if err != nil { - return err - } - cfg := &gengen.GenesisCfg{ - KeysToGen: 1, - PreallocatedFunds: []string{ - funds.String(), - }, - Miners: []*gengen.CreateStorageMinerConfig{ - { - Owner: 0, - SealProofType: constants.DevSealProofType, - CommittedSectors: commCfgs, - }, - }, - Network: "gfctest", - } - - var genbuffer bytes.Buffer - info, err := gengen.GenGenesisCar(cfg, &genbuffer) - if err != nil { - return err - } - - if len(info.Keys) == 0 { - return fmt.Errorf("no key was generated") - } - - if len(info.Miners) == 0 { - return fmt.Errorf("no miner was generated") - } - - e.genesisCar = genbuffer.Bytes() - e.genesisMinerOwner = commands.WalletSerializeResult{KeyInfo: info.Keys} - e.genesisMinerAddr = info.Miners[0].Address - - return nil -} diff --git a/tools/fast/environment/environment_memory_genesis_test.go b/tools/fast/environment/environment_memory_genesis_test.go deleted file mode 100644 index 0daa2de104..0000000000 --- a/tools/fast/environment/environment_memory_genesis_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package environment - -import ( - "context" - "io/ioutil" - "math/big" - "os" - "testing" - - iptb "github.com/ipfs/iptb/testbed" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - mockplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/mock" -) - -// must register all filecoin iptb plugins -func init() { - _, err := iptb.RegisterPlugin(iptb.IptbPlugin{ - From: "", - NewNode: mockplugin.NewNode, - PluginName: mockplugin.PluginName, - BuiltIn: true, - }, false) - - if err != nil { - panic(err) - } -} - -func TestMemoryGenesis(t *testing.T) { - tf.UnitTest(t) - - t.Run("SetupTeardown", func(t *testing.T) { - ctx := context.Background() - - testDir, err := ioutil.TempDir(".", "environmentTest") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(testDir)) - }() - - env, err := NewMemoryGenesis(big.NewInt(100000), testDir) - localenv := env.(*MemoryGenesis) - assert.NoError(t, err) - assert.NotNil(t, env) - assert.Equal(t, testDir, localenv.location) - - // did we create the dir correctly? - _, err = os.Stat(localenv.location) - assert.NoError(t, err) - - // did we teardown correctly? - assert.NoError(t, env.Teardown(ctx)) - assert.Equal(t, 0, len(env.Processes())) - _, existsErr := os.Stat(localenv.location) - assert.True(t, os.IsNotExist(existsErr)) - }) - - t.Run("ProcessCreateAndTeardown", func(t *testing.T) { - ctx := context.Background() - - testDir, err := ioutil.TempDir(".", "environmentTest") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(testDir)) - }() - - env, err := NewMemoryGenesis(big.NewInt(100000), testDir) - require.NoError(t, err) - - p, err := env.NewProcess(ctx, mockplugin.PluginName, nil, fast.FilecoinOpts{}) - assert.NoError(t, err) - assert.NotNil(t, p) - assert.Equal(t, 1, len(env.Processes())) - - // did we create the process dir correctly? - _, err = os.Stat(p.Dir()) - assert.NoError(t, err) - - assert.NoError(t, env.TeardownProcess(ctx, p)) - assert.Equal(t, 0, len(env.Processes())) - - // did we teardown the process correctly? - _, existsErr := os.Stat(p.Dir()) - assert.True(t, os.IsNotExist(existsErr)) - }) -} diff --git a/tools/fast/fastesting/assertions.go b/tools/fast/fastesting/assertions.go deleted file mode 100644 index 044bbd74fc..0000000000 --- a/tools/fast/fastesting/assertions.go +++ /dev/null @@ -1,23 +0,0 @@ -package fastesting - -import ( - "bytes" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// AssertStdErrContains verifies that the last command stderr of 'fast' contains the -// string 'expected' -func AssertStdErrContains(t *testing.T, fast *fast.Filecoin, expected string) { - var cmdOutBytes []byte - w := bytes.NewBuffer(cmdOutBytes) - written, err := io.Copy(w, fast.LastCmdStdErr()) - require.NoError(t, err) - require.True(t, written > 0) - assert.Contains(t, string(w.Bytes()), expected) -} diff --git a/tools/fast/fastesting/basic.go b/tools/fast/fastesting/basic.go deleted file mode 100644 index 42247ccbc6..0000000000 --- a/tools/fast/fastesting/basic.go +++ /dev/null @@ -1,187 +0,0 @@ -package fastesting - -import ( - "context" - "io/ioutil" - "math/big" - "strings" - "testing" - "time" - - "github.com/ipfs/go-ipfs-files" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/environment" - "github.com/filecoin-project/go-filecoin/tools/fast/series" - localplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" -) - -// TestEnvironment provides common setup for writing tests using FAST -type TestEnvironment struct { - environment.Environment - - t *testing.T - ctx context.Context - - pluginName string - pluginOpts map[string]string - - fastenvOpts fast.FilecoinOpts - - GenesisMiner *fast.Filecoin -} - -// NewTestEnvironment creates a TestEnvironment with a basic setup for writing tests using the FAST library. -func NewTestEnvironment(ctx context.Context, t *testing.T, fastenvOpts fast.FilecoinOpts) (context.Context, *TestEnvironment) { - - // Create a directory for the test using the test name (mostly for FAST) - // Replace the forward slash as tempdir can't handle them - dir, err := ioutil.TempDir("", strings.Replace(t.Name(), "/", ".", -1)) - require.NoError(t, err) - - // Create an environment that includes a genesis block with 1MM FIL - env, err := environment.NewMemoryGenesis(big.NewInt(1000000), dir) - require.NoError(t, err) - - defer func() { - dumpEnvOutputOnFail(t, env.Processes()) - }() - - // Setup options for nodes. - options := make(map[string]string) - options[localplugin.AttrLogJSON] = "1" // Enable JSON logs - options[localplugin.AttrLogLevel] = "5" // Set log level to Debug - options[localplugin.AttrFilecoinBinary] = testhelpers.MustGetFilecoinBinary() // Get the filecoin binary - - genesisURI := env.GenesisCar() - genesisMiner, err := env.GenesisMiner() - require.NoError(t, err) - - fastenvOpts.InitOpts = append([]fast.ProcessInitOption{fast.POGenesisFile(genesisURI)}, fastenvOpts.InitOpts...) - - if isMissingBlockTimeOpt(fastenvOpts) { - fastenvOpts.DaemonOpts = append([]fast.ProcessDaemonOption{fast.POBlockTime(time.Millisecond)}, fastenvOpts.DaemonOpts...) - } - - // Setup the first node which is used to help coordinate the other nodes by providing - // funds, mining for the network, etc - genesis, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) - require.NoError(t, err) - - err = series.SetupGenesisNode(ctx, genesis, genesisMiner.Address, files.NewReaderFile(genesisMiner.Owner)) - require.NoError(t, err) - - // Define a MiningOnce function which will bet set on the context to provide - // a way to mine blocks in the series used during testing - var miningOnce series.MiningOnceFunc = func() { - _, err := genesis.MiningOnce(ctx) - require.NoError(t, err) - } - - // Define a MessageWait function which will bet set on the context to provide - // a way to wait for a message to appear on the mining queue - var waitForMpool series.MpoolWaitFunc = func() { - _, err := genesis.MpoolLs(ctx, fast.AOWaitForCount(1)) - require.NoError(t, err) - } - - ctx = series.SetCtxMiningOnce(ctx, miningOnce) - ctx = series.SetCtxWaitForMpool(ctx, waitForMpool) - ctx = series.SetCtxSleepDelay(ctx, time.Second) - - return ctx, &TestEnvironment{ - Environment: env, - t: t, - ctx: ctx, - pluginName: localplugin.PluginName, - pluginOpts: options, - fastenvOpts: fastenvOpts, - GenesisMiner: genesis, - } -} - -// RequireNewNode builds a new node for the environment -func (env *TestEnvironment) RequireNewNode() *fast.Filecoin { - p, err := env.NewProcess(env.ctx, env.pluginName, env.pluginOpts, env.fastenvOpts) - require.NoError(env.t, err) - - return p -} - -// RequireNewNodeStarted builds a new node using RequireNewNode, then initializes -// and starts it -func (env *TestEnvironment) RequireNewNodeStarted() *fast.Filecoin { - p := env.RequireNewNode() - - err := series.InitAndStart(env.ctx, p) - require.NoError(env.t, err) - - return p -} - -// RequireNewNodeConnected builds a new node using RequireNewNodeStarted, then -// connect it to the environment GenesisMiner node -func (env *TestEnvironment) RequireNewNodeConnected() *fast.Filecoin { - p := env.RequireNewNodeStarted() - - err := series.Connect(env.ctx, env.GenesisMiner, p) - require.NoError(env.t, err) - - return p -} - -// RequireNewNodeWithFunds builds a new node using RequireNewNodeStarted, then -// sends it funds from the environment GenesisMiner node -func (env *TestEnvironment) RequireNewNodeWithFunds(funds int) *fast.Filecoin { - p := env.RequireNewNodeConnected() - - err := series.SendFilecoinDefaults(env.ctx, env.GenesisMiner, p, funds) - require.NoError(env.t, err) - - return p -} - -// Teardown stops all of the nodes and cleans up the environment. If the test failed, -// it will also print the last output of each process by calling `DumpLastOutput`. -// Output is logged using the Log method on the testing.T -func (env *TestEnvironment) Teardown(ctx context.Context) error { - env.DumpEnvOutputOnFail() - return env.Environment.Teardown(ctx) -} - -// DumpEnvOutputOnFail calls `DumpLastOutput for each process if the test failed. -func (env *TestEnvironment) DumpEnvOutputOnFail() { - dumpEnvOutputOnFail(env.t, env.Processes()) -} - -// RunAsyncMiner unset MiningOnce for conflict -func (env *TestEnvironment) RunAsyncMiner() context.Context { - var miningOnce series.MiningOnceFunc = func() {} - var mpoolWait series.MpoolWaitFunc = func() {} - env.ctx = series.SetCtxMiningOnce(env.ctx, miningOnce) - env.ctx = series.SetCtxWaitForMpool(env.ctx, mpoolWait) - return env.ctx -} - -// helper to dump the output using the t.Log method. -func dumpEnvOutputOnFail(t *testing.T, procs []*fast.Filecoin) { - if t.Failed() { - w := newLogWriter(t) - for _, node := range procs { - node.DumpLastOutput(w) - } - require.NoError(t, w.Close()) - } -} - -func isMissingBlockTimeOpt(opts fast.FilecoinOpts) bool { - for _, fn := range opts.DaemonOpts { - s := fn() - if len(s) > 0 && s[0] == "--block-time" { - return false - } - } - return true -} diff --git a/tools/fast/fastesting/basic_test.go b/tools/fast/fastesting/basic_test.go deleted file mode 100644 index f23769be9d..0000000000 --- a/tools/fast/fastesting/basic_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package fastesting_test - -import ( - "context" - "testing" - "time" - - "github.com/ipfs/go-log/v2" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" -) - -func TestSetFilecoinOpts(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("not working") - log.SetDebugLogging() - - fastOpts := fast.FilecoinOpts{ - DaemonOpts: []fast.ProcessDaemonOption{fast.POBlockTime(10 * time.Millisecond)}, - } - - ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fastOpts) - clientNode := env.GenesisMiner - require.NoError(t, clientNode.MiningStart(ctx)) - defer func() { - err := env.Teardown(ctx) - require.NoError(t, err) - }() -} - -func TestNoFilecoinOpts(t *testing.T) { - tf.IntegrationTest(t) - t.Skip("not working") - log.SetDebugLogging() - - ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fast.FilecoinOpts{}) - - clientNode := env.GenesisMiner - require.NoError(t, clientNode.MiningStart(ctx)) - defer func() { - err := env.Teardown(ctx) - require.NoError(t, err) - }() -} diff --git a/tools/fast/fastesting/deployment.go b/tools/fast/fastesting/deployment.go deleted file mode 100644 index 808bb26246..0000000000 --- a/tools/fast/fastesting/deployment.go +++ /dev/null @@ -1,186 +0,0 @@ -package fastesting - -import ( - "context" - "io/ioutil" - "math/big" - "strings" - "testing" - "time" - - "github.com/ipfs/go-ipfs-files" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/environment" - "github.com/filecoin-project/go-filecoin/tools/fast/series" - localplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" -) - -// DeploymentEnvironment provides common setup for writing tests which will run against -// a deployed network using FAST -type DeploymentEnvironment struct { - environment.Environment - - t *testing.T - ctx context.Context - - pluginName string - pluginOpts map[string]string - - postInitFn func(context.Context, *fast.Filecoin) error - - fastenvOpts fast.FilecoinOpts -} - -// NewDeploymentEnvironment creates a DeploymentEnvironment with a basic setup for writing -// tests using the FAST library. DeploymentEnvironment also supports testing locally using -// the `local` network which will handle setting up a mining node and updating bootstrap -// peers. The local network runs at 5 second blocktimes. -func NewDeploymentEnvironment(ctx context.Context, t *testing.T, network string, fastenvOpts fast.FilecoinOpts) (context.Context, *DeploymentEnvironment) { - - // Create a directory for the test using the test name (mostly for FAST) - // Replace the forward slash as tempdir can't handle them - dir, err := ioutil.TempDir("", strings.Replace(t.Name(), "/", ".", -1)) - require.NoError(t, err) - - if network == "local" { - return makeLocal(ctx, t, dir, fastenvOpts) - } - - return makeDevnet(ctx, t, network, dir, fastenvOpts) -} - -func makeLocal(ctx context.Context, t *testing.T, dir string, fastenvOpts fast.FilecoinOpts) (context.Context, *DeploymentEnvironment) { - // Create an environment to connect to the devnet - env, err := environment.NewMemoryGenesis(big.NewInt(1000000), dir) - require.NoError(t, err) - - defer func() { - dumpEnvOutputOnFail(t, env.Processes()) - }() - - // Setup options for nodes. - options := make(map[string]string) - options[localplugin.AttrLogJSON] = "0" - options[localplugin.AttrLogLevel] = "5" - options[localplugin.AttrFilecoinBinary] = testhelpers.MustGetFilecoinBinary() - - genesisURI := env.GenesisCar() - genesisMiner, err := env.GenesisMiner() - require.NoError(t, err) - - fastenvOpts.InitOpts = append([]fast.ProcessInitOption{fast.POGenesisFile(genesisURI)}, fastenvOpts.InitOpts...) - fastenvOpts.DaemonOpts = append([]fast.ProcessDaemonOption{fast.POBlockTime(time.Second * 5)}, fastenvOpts.DaemonOpts...) - - ctx = series.SetCtxSleepDelay(ctx, time.Second*5) - - // Setup the first node which is used to help coordinate the other nodes by providing - // funds, mining for the network, etc - genesis, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) - require.NoError(t, err) - - err = series.SetupGenesisNode(ctx, genesis, genesisMiner.Address, files.NewReaderFile(genesisMiner.Owner)) - require.NoError(t, err) - - err = genesis.MiningStart(ctx) - require.NoError(t, err) - - details, err := genesis.ID(ctx) - require.NoError(t, err) - - return ctx, &DeploymentEnvironment{ - Environment: env, - t: t, - ctx: ctx, - pluginName: localplugin.PluginName, - pluginOpts: options, - fastenvOpts: fastenvOpts, - postInitFn: func(ctx context.Context, node *fast.Filecoin) error { - config, err := node.Config() - if err != nil { - return err - } - - config.Bootstrap.Addresses = []string{details.Addresses[0].String()} - config.Bootstrap.MinPeerThreshold = 1 - config.Bootstrap.Period = "10s" - - return node.WriteConfig(config) - }, - } -} - -func makeDevnet(ctx context.Context, t *testing.T, network string, dir string, fastenvOpts fast.FilecoinOpts) (context.Context, *DeploymentEnvironment) { - // Create an environment that includes a genesis block with 1MM FIL - networkConfig, err := environment.FindDevnetConfigByName(network) - require.NoError(t, err) - - env, err := environment.NewDevnet(networkConfig, dir) - require.NoError(t, err) - - defer func() { - dumpEnvOutputOnFail(t, env.Processes()) - }() - - // Setup options for nodes. - options := make(map[string]string) - options[localplugin.AttrLogJSON] = "0" // Enable JSON logs - options[localplugin.AttrLogLevel] = "5" // Set log level to Debug - options[localplugin.AttrFilecoinBinary] = testhelpers.MustGetFilecoinBinary() // Get the filecoin binary - - genesisURI := env.GenesisCar() - - fastenvOpts.InitOpts = append(fastenvOpts.InitOpts, fast.POGenesisFile(genesisURI), fast.PODevnet(networkConfig.Name)) - - ctx = series.SetCtxSleepDelay(ctx, time.Second*30) - - return ctx, &DeploymentEnvironment{ - Environment: env, - t: t, - ctx: ctx, - pluginName: localplugin.PluginName, - pluginOpts: options, - fastenvOpts: fastenvOpts, - postInitFn: func(ctx context.Context, node *fast.Filecoin) error { - return nil - }, - } -} - -// RequireNewNodeStarted builds a new node using RequireNewNode, then initializes -// and starts it -func (env *DeploymentEnvironment) RequireNewNodeStarted() *fast.Filecoin { - p, err := env.NewProcess(env.ctx, env.pluginName, env.pluginOpts, env.fastenvOpts) - require.NoError(env.t, err) - - err = series.InitAndStart(env.ctx, p, env.postInitFn) - require.NoError(env.t, err) - - return p -} - -// RequireNewNodeWithFunds builds a new node using RequireNewNodeStarted, then -// sends it funds from the environment GenesisMiner node -func (env *DeploymentEnvironment) RequireNewNodeWithFunds() *fast.Filecoin { - p := env.RequireNewNodeStarted() - - err := env.GetFunds(env.ctx, p) - require.NoError(env.t, err) - - return p -} - -// Teardown stops all of the nodes and cleans up the environment. If the test failed, -// it will also print the last output of each process by calling `DumpLastOutput`. -// Output is logged using the Log method on the testing.T -func (env *DeploymentEnvironment) Teardown(ctx context.Context) error { - env.DumpEnvOutputOnFail() - return env.Environment.Teardown(ctx) -} - -// DumpEnvOutputOnFail calls `DumpLastOutput for each process if the test failed. -func (env *DeploymentEnvironment) DumpEnvOutputOnFail() { - dumpEnvOutputOnFail(env.t, env.Processes()) -} diff --git a/tools/fast/fastesting/log_writer.go b/tools/fast/fastesting/log_writer.go deleted file mode 100644 index 4ea9c53e93..0000000000 --- a/tools/fast/fastesting/log_writer.go +++ /dev/null @@ -1,62 +0,0 @@ -package fastesting - -import ( - "bufio" - "io" - "sync" -) - -type logWriter struct { - bpr *bufio.Reader - pw io.WriteCloser - wg sync.WaitGroup - out logf -} - -type logf interface { - Logf(string, ...interface{}) -} - -// newLogWriter returns a io.WriteCloser which will take all lines written to -// it and call out.Logf with it. This is currently used with the FAST DumpLastOutput -// to print the output of a command to the test logger. -func newLogWriter(out logf) io.WriteCloser { - pr, pw := io.Pipe() - bpr := bufio.NewReader(pr) - - p := &logWriter{ - pw: pw, - bpr: bpr, - out: out, - } - - p.wg.Add(1) - go p.writeOut() - - return p -} - -func (p *logWriter) writeOut() { - defer p.wg.Done() - for { - l, err := p.bpr.ReadBytes('\n') - if len(l) != 0 { - p.out.Logf(string(l)) - } - if err != nil { - break - } - } -} - -// Write the bytes b using t.Logf on each full line -func (p *logWriter) Write(b []byte) (int, error) { - return p.pw.Write(b) -} - -// Close the writer -func (p *logWriter) Close() error { - err := p.pw.Close() - p.wg.Wait() - return err -} diff --git a/tools/fast/fastesting/log_writer_test.go b/tools/fast/fastesting/log_writer_test.go deleted file mode 100644 index ae76615784..0000000000 --- a/tools/fast/fastesting/log_writer_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package fastesting - -import ( - "bytes" - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" -) - -type tlogWriter struct { - buf bytes.Buffer -} - -func (w *tlogWriter) Logf(format string, args ...interface{}) { - fmt.Fprintf(&w.buf, format, args...) -} - -func TestLogWriter(t *testing.T) { - tf.UnitTest(t) - - input := []string{ - "line1\n", - "line2\n", - "line3\n", - "line4\n", - "line5\n", - } - - out := &tlogWriter{} - lw := newLogWriter(out) - - for _, line := range input { - _, err := lw.Write([]byte(fmt.Sprintf("%s", line))) - require.NoError(t, err) - } - - require.NoError(t, lw.Close()) - - require.Equal(t, strings.Join(input, ""), out.buf.String()) -} diff --git a/tools/fast/fastutil/debug_utils.go b/tools/fast/fastutil/debug_utils.go deleted file mode 100644 index 5db0c999b4..0000000000 --- a/tools/fast/fastutil/debug_utils.go +++ /dev/null @@ -1,56 +0,0 @@ -package fastutil - -import ( - "encoding/json" - "fmt" - "github.com/ipfs/iptb/testbed/interfaces" - "io" - "io/ioutil" - "strings" -) - -// Output represent the output of a command run -type Output struct { - Args []string - ExitCode int - Error error - Stderr string - Stdout string -} - -// DumpOutput prints the output to the io.Writer in a human readable way -func DumpOutput(w io.Writer, output testbedi.Output) { - fmt.Fprintf(w, ">>>> start-dump\n") // nolint: errcheck - fmt.Fprintf(w, "---- command %s\n", strings.Join(output.Args(), " ")) // nolint: errcheck - fmt.Fprintf(w, "---- exit-code %d\n", output.ExitCode()) // nolint: errcheck - - err := output.Error() - if err != nil { - fmt.Fprintf(w, "---- error %s\n", output.Error()) // nolint: errcheck - } else { - fmt.Fprintf(w, "---- error nil\n") // nolint: errcheck - } - - fmt.Fprintf(w, "---- stdout\n") // nolint: errcheck - io.Copy(w, output.Stdout()) // nolint: errcheck - fmt.Fprintf(w, "---- stderr\n") // nolint: errcheck - io.Copy(w, output.Stderr()) // nolint: errcheck - fmt.Fprintf(w, "<<<< end-dump\n") // nolint: errcheck -} - -// DumpOutputJSON prints the json encoded output to the io.Writer -func DumpOutputJSON(w io.Writer, output testbedi.Output) { - stdout, _ := ioutil.ReadAll(output.Stdout()) // nolint: errcheck - stderr, _ := ioutil.ReadAll(output.Stderr()) // nolint: errcheck - - jout := &Output{ - Args: output.Args(), - ExitCode: output.ExitCode(), - Error: output.Error(), - Stderr: string(stderr), - Stdout: string(stdout), - } - - enc := json.NewEncoder(w) - _ = enc.Encode(jout) // nolint: errcheck -} diff --git a/tools/fast/fastutil/helpers_test.go b/tools/fast/fastutil/helpers_test.go deleted file mode 100644 index bb2132b4ac..0000000000 --- a/tools/fast/fastutil/helpers_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package fastutil - -import ( - "bytes" - "fmt" - "io" - "strings" - "testing" - - "github.com/pmezard/go-difflib/difflib" -) - -func writeLine(seed int, ws ...io.Writer) error { - alphabet := "0123456789ABCDEF" - var line strings.Builder - for x := 0; x < 64; x++ { - c := alphabet[(x+seed)%len(alphabet)] - line.WriteByte(c) - } - - line.WriteByte('\n') - - for _, w := range ws { - n, err := w.Write([]byte(line.String())) - - if err != nil { - return err - } - - if n != line.Len() { - return fmt.Errorf("did not write entire line") - } - - } - - return nil -} - -func compare(t *testing.T, expected, actual []byte) { - if !bytes.Equal(expected, actual) { - diff := difflib.UnifiedDiff{ - A: difflib.SplitLines(string(expected)), - B: difflib.SplitLines(string(actual)), - FromFile: "Expected", - ToFile: "Actual", - Context: 3, - } - text, _ := difflib.GetUnifiedDiffString(diff) - - t.Logf("\n%s", text) - t.Fatal("data does not match") - } -} - -func writeLines(seed int, num int, ws ...io.Writer) error { - for i := 0; i < num; i++ { - if err := writeLine(seed+i, ws...); err != nil { - return err - } - } - - return nil -} diff --git a/tools/fast/fastutil/line_puller.go b/tools/fast/fastutil/line_puller.go deleted file mode 100644 index 9c54fb6ff4..0000000000 --- a/tools/fast/fastutil/line_puller.go +++ /dev/null @@ -1,63 +0,0 @@ -package fastutil - -import ( - "bufio" - "context" - "io" - "sync" - "time" -) - -// LinePuller provides an easy way to pull complete lines (ending in \n) from -// a source to a sink. -type LinePuller struct { - sourceMu sync.Mutex - source *bufio.Reader - sink io.Writer -} - -// NewLinePuller returns a LinePuller that will read complete lines -// from the source to the sink when started (see Start) on the provided -// frequency. -func NewLinePuller(source io.Reader, sink io.Writer) *LinePuller { - return &LinePuller{ - source: bufio.NewReader(source), - sink: sink, - } -} - -// StartPulling will call Pull on an interval of freq. -func (lp *LinePuller) StartPulling(ctx context.Context, freq time.Duration) error { - ticker := time.NewTicker(freq) - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - if err := lp.Pull(); err != nil { - return err - } - } - } -} - -// Pull reads in all data from the source and writes each line out to the sink. -func (lp *LinePuller) Pull() error { - lp.sourceMu.Lock() - defer lp.sourceMu.Unlock() - for { - line, rerr := lp.source.ReadBytes('\n') - if rerr != nil && rerr != io.EOF { - return rerr - } - - _, err := lp.sink.Write(line) - if err != nil { - return err - } - - if rerr == io.EOF { - return nil - } - } -} diff --git a/tools/fast/fastutil/line_puller_test.go b/tools/fast/fastutil/line_puller_test.go deleted file mode 100644 index f857e29fcd..0000000000 --- a/tools/fast/fastutil/line_puller_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package fastutil - -import ( - "bytes" - "errors" - "io" - "testing" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/stretchr/testify/require" -) - -func TestLinePuller(t *testing.T) { - tf.UnitTest(t) - - t.Run("pull on empty source", func(t *testing.T) { - var source bytes.Buffer - var sink bytes.Buffer - - lp := NewLinePuller(&source, &sink) - err := lp.Pull() - require.NoError(t, err) - }) - - t.Run("pull one line", func(t *testing.T) { - var source bytes.Buffer - var sink bytes.Buffer - - lp := NewLinePuller(&source, &sink) - - source.WriteString("Filecoin\n") - - err := lp.Pull() - require.NoError(t, err) - - require.Equal(t, "Filecoin\n", sink.String()) - - }) - - t.Run("pull many lines", func(t *testing.T) { - var source bytes.Buffer - var sink bytes.Buffer - var expected bytes.Buffer - - lp := NewLinePuller(&source, &sink) - - require.NoError(t, writeLines(0, 1000, &source, &expected)) - - err := lp.Pull() - require.NoError(t, err) - - compare(t, expected.Bytes(), sink.Bytes()) - }) - - t.Run("pull after EOF", func(t *testing.T) { - var source manualReader - var sink bytes.Buffer - var expected bytes.Buffer - - lp := NewLinePuller(&source, &sink) - - source.bytes = []byte("Hello World\n") - source.err = io.EOF - - expected.Write(source.bytes) - - err := lp.Pull() - require.NoError(t, err) - - expected.Write(source.bytes) - - err = lp.Pull() - require.NoError(t, err) - - compare(t, expected.Bytes(), sink.Bytes()) - }) - - t.Run("source returns error", func(t *testing.T) { - var source manualReader - var sink bytes.Buffer - var expected bytes.Buffer - - lp := NewLinePuller(&source, &sink) - - source.bytes = []byte{} - source.err = errors.New("An error") - - err := lp.Pull() - require.Equal(t, err, source.err) - - compare(t, expected.Bytes(), sink.Bytes()) - }) -} - -type manualReader struct { - bytes []byte - err error -} - -func (r *manualReader) Read(p []byte) (int, error) { - if len(p) < len(r.bytes) { - panic("manualReader bytes is larger than read buffer") - } - - return copy(p, r.bytes), r.err -} diff --git a/tools/fast/fastutil/stream_recorder.go b/tools/fast/fastutil/stream_recorder.go deleted file mode 100644 index 17717fc87d..0000000000 --- a/tools/fast/fastutil/stream_recorder.go +++ /dev/null @@ -1,107 +0,0 @@ -package fastutil - -import ( - "bytes" - "sync" -) - -// IntervalRecorder is an io.Writer which provides an easy way to record intervals of -// data written to it. -type IntervalRecorder struct { - // buf is the internal buffer that stores all data written to the - // IntervalRecorder until an interval is started, or stopped. - bufMu sync.Mutex - buf bytes.Buffer - - intervalsMu sync.Mutex - intervals []*Interval -} - -// Interval is a bytes.Buffer containing all data that has been written -// to an IntervalRecorder from the Intervals creation until Stop is called. -// You should not create this structure directly but instead use the Start -// method on the IntervalRecorder. -type Interval struct { - bytes.Buffer - done func() -} - -// NewIntervalRecorder returns a new IntervalRecorder. -func NewIntervalRecorder() *IntervalRecorder { - return &IntervalRecorder{} -} - -// Write appends the contents of p to the IntervalRecorder. The return value is the -// length of p; err is always nil. If the internal buffer comes to large, Write will -// panic with bytes.ErrToLarge. -// See bytes.Buffer for more info: https://golang.org/pkg/bytes/#Buffer.Write -func (lw *IntervalRecorder) Write(p []byte) (int, error) { - lw.bufMu.Lock() - defer lw.bufMu.Unlock() - return lw.buf.Write(p) -} - -// Transfers all the data in the IntervalRecorder to each interval currently -// being tracked. -func (lw *IntervalRecorder) drain() { - lw.bufMu.Lock() - defer lw.bufMu.Unlock() - buf := lw.buf.Bytes() - - lw.intervalsMu.Lock() - defer lw.intervalsMu.Unlock() - for _, interval := range lw.intervals { - // See https://golang.org/pkg/bytes/#Buffer.Write - // The return value n is the length of p; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - interval.Write(buf) // nolint: err - } - - // Everything in lw.buf, now exists in each record, so we reset the buffer - // so it does not grow too massive - lw.buf.Reset() -} - -// Adds a new interval which will be written to on each drain. -func (lw *IntervalRecorder) addInterval(interval *Interval) { - lw.intervalsMu.Lock() - defer lw.intervalsMu.Unlock() - - lw.intervals = append(lw.intervals, interval) -} - -// Removes an interval so that it no longer receive data. -func (lw *IntervalRecorder) removeInterval(interval *Interval) { - lw.intervalsMu.Lock() - defer lw.intervalsMu.Unlock() - - for i, v := range lw.intervals { - if interval == v { - lw.intervals = append(lw.intervals[:i], lw.intervals[i+1:]...) - break - } - } -} - -// Start creates a new Interval, all data written to the IntervalRecorder -// after Start returns will be available in the interval. The interval should -// not be read from until Stop is called. -// See Stop for more detail -func (lw *IntervalRecorder) Start() *Interval { - lw.drain() - - interval := &Interval{} - interval.done = func() { - lw.drain() - lw.removeInterval(interval) - } - - lw.addInterval(interval) - - return interval -} - -// Stop will stop any further data being written to the interval after it returns. -func (r *Interval) Stop() { - r.done() -} diff --git a/tools/fast/fastutil/stream_recorder_test.go b/tools/fast/fastutil/stream_recorder_test.go deleted file mode 100644 index 20e6b756af..0000000000 --- a/tools/fast/fastutil/stream_recorder_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package fastutil - -import ( - "bytes" - "io/ioutil" - "testing" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/stretchr/testify/require" -) - -type window struct { - interval *Interval - expected *bytes.Buffer -} - -func TestOverlappingWindows(t *testing.T) { - tf.UnitTest(t) - - var err error - - sr := NewIntervalRecorder() - - // Fill up the buffer with some extra stuff - err = writeLines(0, 10, sr) - require.NoError(t, err) - - // Open our first window - win1 := window{ - interval: sr.Start(), - expected: bytes.NewBuffer(nil), - } - - // Write some data to the window - err = writeLines(1, 10, sr, win1.expected) - require.NoError(t, err) - - // Open new window - win2 := window{ - interval: sr.Start(), - expected: bytes.NewBuffer(nil), - } - - // Write data, which should show up in both windows - err = writeLines(2, 10, sr, win1.expected, win2.expected) - require.NoError(t, err) - - // Close the first window - win1.interval.Stop() - - // Write data, which should show up in only the second window - err = writeLines(3, 10, sr, win2.expected) - require.NoError(t, err) - - // Close the second window - win2.interval.Stop() - - for _, win := range []window{win1, win2} { - data, err := ioutil.ReadAll(win.interval) - require.NoError(t, err) - - compare(t, win.expected.Bytes(), data) - } - -} - -func TestEmbeddedWindows(t *testing.T) { - tf.UnitTest(t) - - var err error - - sr := NewIntervalRecorder() - - // Fill up the buffer with some extra stuff - err = writeLines(0, 10, sr) - require.NoError(t, err) - - // Open our first window - win1 := window{ - interval: sr.Start(), - expected: bytes.NewBuffer(nil), - } - - // Write some data to the window - err = writeLines(1, 10, sr, win1.expected) - require.NoError(t, err) - - // Open new window - win2 := window{ - interval: sr.Start(), - expected: bytes.NewBuffer(nil), - } - - // Write data, which should show up in both windows - err = writeLines(2, 10, sr, win1.expected, win2.expected) - require.NoError(t, err) - - // Close the second window - win2.interval.Stop() - - // Write data, which should show up in only the first window - err = writeLines(3, 10, sr, win1.expected) - require.NoError(t, err) - - // Close the first window - win1.interval.Stop() - - for _, win := range []window{win1, win2} { - data, err := ioutil.ReadAll(win.interval) - require.NoError(t, err) - - compare(t, win.expected.Bytes(), data) - } -} diff --git a/tools/fast/process.go b/tools/fast/process.go deleted file mode 100644 index c75727520a..0000000000 --- a/tools/fast/process.go +++ /dev/null @@ -1,286 +0,0 @@ -package fast - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - - logging "github.com/ipfs/go-log/v2" - iptb "github.com/ipfs/iptb/testbed" - "github.com/ipfs/iptb/testbed/interfaces" - "github.com/libp2p/go-libp2p-core/peer" - - fcconfig "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/tools/fast/fastutil" - dockerplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/docker" - localplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" -) - -var ( - // ErrDoubleInitOpts is returned by InitDaemon when both init options are provided by FilecoinOpts - // in NewProcess as well as passed to InitDaemon directly. - ErrDoubleInitOpts = errors.New("cannot provide both init options through environment and arguments") - - // ErrDoubleDaemonOpts is returned by StartDaemon when both init options are provided by FilecoinOpts - // in NewProcess as well as passed to StartDaemon directly. - ErrDoubleDaemonOpts = errors.New("cannot provide both daemon options through environment and arguments") -) - -// FilecoinOpts are used define process init and daemon options for the environment. -type FilecoinOpts struct { - InitOpts []ProcessInitOption - DaemonOpts []ProcessDaemonOption -} - -// must register all filecoin iptb plugins first. -func init() { - _, err := iptb.RegisterPlugin(iptb.IptbPlugin{ - From: "", - NewNode: localplugin.NewNode, - PluginName: localplugin.PluginName, - BuiltIn: true, - }, false) - - if err != nil { - panic(err) - } - - _, err = iptb.RegisterPlugin(iptb.IptbPlugin{ - From: "", - NewNode: dockerplugin.NewNode, - PluginName: dockerplugin.PluginName, - BuiltIn: true, - }, false) - - if err != nil { - panic(err) - } -} - -// IPTBCoreExt is an extended interface of the iptb.Core. It defines additional requirement. -type IPTBCoreExt interface { - testbedi.Core - testbedi.Config - - // StderrReader is require to gather daemon logs during action execution - StderrReader() (io.ReadCloser, error) -} - -// Filecoin represents a wrapper around the iptb Core interface. -type Filecoin struct { - PeerID peer.ID - - initOpts []ProcessInitOption - daemonOpts []ProcessDaemonOption - - Log logging.EventLogger - - core IPTBCoreExt - ctx context.Context - - lastCmdOutput testbedi.Output - - stderr io.ReadCloser - - lpCtx context.Context - lpCancel context.CancelFunc - lpErr error - lp *fastutil.LinePuller - ir fastutil.IntervalRecorder -} - -// NewFilecoinProcess returns a pointer to a Filecoin process that wraps the IPTB core interface `c`. -func NewFilecoinProcess(ctx context.Context, c IPTBCoreExt, eo FilecoinOpts) *Filecoin { - return &Filecoin{ - core: c, - Log: logging.Logger(c.String()), - ctx: ctx, - initOpts: eo.InitOpts, - daemonOpts: eo.DaemonOpts, - } -} - -// InitDaemon initializes the filecoin daemon process. -func (f *Filecoin) InitDaemon(ctx context.Context, args ...string) (testbedi.Output, error) { - if len(args) != 0 && len(f.initOpts) != 0 { - return nil, ErrDoubleInitOpts - } - - if len(args) == 0 { - for _, opt := range f.initOpts { - args = append(args, opt()...) - } - } - - f.Log.Infof("InitDaemon: %s %s", f.core.Dir(), args) - - return f.core.Init(ctx, args...) -} - -// StartDaemon starts the filecoin daemon process. -func (f *Filecoin) StartDaemon(ctx context.Context, wait bool, args ...string) (testbedi.Output, error) { - if len(args) != 0 && len(f.daemonOpts) != 0 { - return nil, ErrDoubleDaemonOpts - } - - if len(args) == 0 { - for _, opt := range f.daemonOpts { - args = append(args, opt()...) - } - } - - f.Log.Infof("StartDaemon: %s %s", f.core.Dir(), args) - - out, err := f.core.Start(ctx, wait, args...) - if err != nil { - return nil, err - } - - if err := f.setupStderrCapturing(); err != nil { - return nil, err - } - - idinfo, err := f.ID(ctx) - if err != nil { - return nil, err - } - - f.PeerID = idinfo.ID - - return out, nil -} - -// StopDaemon stops the filecoin daemon process. -func (f *Filecoin) StopDaemon(ctx context.Context) error { - if err := f.core.Stop(ctx); err != nil { - // TODO this may break the `IsAlive` parameter - return err - } - - return f.teardownStderrCapturing() -} - -// Shell starts a user shell targeting the filecoin process. Exact behavior is plugin -// dependent. Please refer to the plugin documentation for more information. -func (f *Filecoin) Shell() error { - return f.core.Shell(f.ctx, []testbedi.Core{}) -} - -// Dir returns the dirtectory used by the filecoin process. -func (f *Filecoin) Dir() string { - return f.core.Dir() -} - -// String returns the string representation of the filecoin process. -func (f *Filecoin) String() string { - return f.core.String() -} - -// DumpLastOutput writes all the output (args, exit-code, error, stderr, stdout) of the last ran -// command from RunCmdWithStdin, RunCmdJSONWithStdin, or RunCmdLDJSONWithStdin. -func (f *Filecoin) DumpLastOutput(w io.Writer) { - if f.lastCmdOutput != nil { - fastutil.DumpOutput(w, f.lastCmdOutput) - } else { - fmt.Fprintln(w, "") // nolint: errcheck - } -} - -// DumpLastOutputJSON writes all the output (args, exit-code, error, stderr, stdout) of the last ran -// command from RunCmdWithStdin, RunCmdJSONWithStdin, or RunCmdLDJSONWithStdin as json. -func (f *Filecoin) DumpLastOutputJSON(w io.Writer) { - if f.lastCmdOutput != nil { - fastutil.DumpOutputJSON(w, f.lastCmdOutput) - } else { - fmt.Fprintln(w, "{}") // nolint: errcheck - } -} - -// LastCmdStdErr is the standard error output from the last command run -func (f *Filecoin) LastCmdStdErr() io.ReadCloser { - return f.lastCmdOutput.Stderr() -} - -// LastCmdStdErrStr is a shortcut to just get the output as string -func (f *Filecoin) LastCmdStdErrStr() (string, error) { - buf := new(bytes.Buffer) - out := f.LastCmdStdErr() - if _, err := buf.ReadFrom(out); err != nil { - return "", err - } - return buf.String(), nil -} - -// RunCmdWithStdin runs `args` against Filecoin process `f`, a testbedi.Output and an error are returned. -func (f *Filecoin) RunCmdWithStdin(ctx context.Context, stdin io.Reader, args ...string) (testbedi.Output, error) { - if ctx == nil { - ctx = f.ctx - } - f.Log.Infof("RunCmd: %s %s", f.core.Dir(), args) - out, err := f.core.RunCmd(ctx, stdin, args...) - if err != nil { - return nil, err - } - - f.lastCmdOutput = out - return out, nil -} - -// RunCmdJSONWithStdin runs `args` against Filecoin process `f`. The '--enc=json' flag -// is appened to the command specified by `args`, the result of the command is marshaled into `v`. -func (f *Filecoin) RunCmdJSONWithStdin(ctx context.Context, stdin io.Reader, v interface{}, args ...string) error { - args = append(args, "--enc=json") - out, err := f.RunCmdWithStdin(ctx, stdin, args...) - if err != nil { - return err - } - - // check command exit code - if out.ExitCode() > 0 { - return fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - dec := json.NewDecoder(out.Stdout()) - return dec.Decode(v) -} - -// RunCmdLDJSONWithStdin runs `args` against Filecoin process `f`. The '--enc=json' flag -// is appened to the command specified by `args`. The result of the command is returned -// as a json.Decoder that may be used to read and decode JSON values from the result of -// the command. -func (f *Filecoin) RunCmdLDJSONWithStdin(ctx context.Context, stdin io.Reader, args ...string) (*json.Decoder, error) { - args = append(args, "--enc=json") - out, err := f.RunCmdWithStdin(ctx, stdin, args...) - if err != nil { - return nil, err - } - - // check command exit code - if out.ExitCode() > 0 { - return nil, fmt.Errorf("filecoin command: %s, exited with non-zero exitcode: %d", out.Args(), out.ExitCode()) - } - - return json.NewDecoder(out.Stdout()), nil -} - -// Config return the config file of the FAST process. -func (f *Filecoin) Config() (*fcconfig.Config, error) { - fcc, err := f.core.Config() - if err != nil { - return nil, err - } - cfg, ok := fcc.(*fcconfig.Config) - if !ok { - return nil, fmt.Errorf("failed to cast filecoin config struct") - } - - return cfg, nil -} - -// WriteConfig writes the config `cgf` to the FAST process's repo. -func (f *Filecoin) WriteConfig(cfg *fcconfig.Config) error { - return f.core.WriteConfig(cfg) -} diff --git a/tools/fast/process_action_options.go b/tools/fast/process_action_options.go deleted file mode 100644 index ec6ea95553..0000000000 --- a/tools/fast/process_action_options.go +++ /dev/null @@ -1,139 +0,0 @@ -package fast - -import ( - "fmt" - "math/big" - "strconv" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/libp2p/go-libp2p-core/peer" -) - -// ActionOption is used to pass optional arguments to actions. -// Thought it's not necessary, we use function options to enforce -// coding standards not not passing string options directly into -// the actions. -type ActionOption func() []string - -// AOPrice provides the `--gas-price=` option to actions -func AOPrice(price *big.Float) ActionOption { - sPrice := price.Text('f', -1) - return func() []string { - return []string{"--gas-price", sPrice} - } -} - -// AOLimit provides the `--gas-limit=` option to actions -func AOLimit(limit uint64) ActionOption { - sLimit := fmt.Sprintf("%d", limit) - return func() []string { - return []string{"--gas-limit", sLimit} - } -} - -// AOFromAddr provides the `--from=` option to actions -func AOFromAddr(fromAddr address.Address) ActionOption { - sFromAddr := fromAddr.String() - return func() []string { - return []string{"--from", sFromAddr} - } -} - -// AOMinerAddr provides the `--miner=` option to actions -func AOMinerAddr(minerAddr address.Address) ActionOption { - sMinerAddr := minerAddr.String() - return func() []string { - return []string{"--miner", sMinerAddr} - } -} - -// AOPeerid provides the `--peerid=` option to actions -func AOPeerid(pid peer.ID) ActionOption { - sPid := pid.Pretty() - return func() []string { - return []string{"--peerid", sPid} - } -} - -// AOFormat provides the `--format=` option to actions -func AOFormat(format string) ActionOption { - return func() []string { - return []string{"--format", format} - } -} - -// AOCount provides the `--count=` option to actions -func AOCount(count uint) ActionOption { - sCount := fmt.Sprintf("%d", count) - return func() []string { - return []string{"--count", sCount} - } -} - -// AOVerbose provides the `--verbose` option to actions -func AOVerbose() ActionOption { - return func() []string { - return []string{"--verbose"} - } -} - -// AOStreams provides the `--streams` option to actions -func AOStreams() ActionOption { - return func() []string { - return []string{"--streams"} - } -} - -// AOLatency provides the `--latency` option to actions -func AOLatency() ActionOption { - return func() []string { - return []string{"--latency"} - } -} - -// AOValue provides the `--value` option to actions -func AOValue(value int) ActionOption { - sValue := fmt.Sprintf("%d", value) - return func() []string { - return []string{"--value", sValue} - } -} - -// AOPayer provides the `--payer=` option to actions -func AOPayer(payer address.Address) ActionOption { - sPayer := payer.String() - return func() []string { - return []string{"--payer", sPayer} - } -} - -// AOValidAt provides the `--validate=` option to actions -func AOValidAt(bh abi.ChainEpoch) ActionOption { - sBH := strconv.FormatInt(int64(bh), 10) - return func() []string { - return []string{"--validat", sBH} - } -} - -// AOAllowDuplicates provides the --allow-duplicates option to client propose-storage-deal -func AOAllowDuplicates(allow bool) ActionOption { - sAllowDupes := fmt.Sprintf("--allow-duplicates=%t", allow) - return func() []string { - return []string{sAllowDupes} - } -} - -// AOSectorSize provides the `--sectorsize` option to actions -func AOSectorSize(ba abi.SectorSize) ActionOption { - return func() []string { - return []string{"--sectorsize", strconv.FormatUint(uint64(ba), 10)} - } -} - -// AOWaitForCount provides the `--wait-for-count` option to actions -func AOWaitForCount(count uint) ActionOption { - return func() []string { - return []string{"--wait-for-count", strconv.Itoa(int(count))} - } -} diff --git a/tools/fast/process_options.go b/tools/fast/process_options.go deleted file mode 100644 index d19407c745..0000000000 --- a/tools/fast/process_options.go +++ /dev/null @@ -1,84 +0,0 @@ -package fast - -import ( - "fmt" - "time" - - "github.com/multiformats/go-multiaddr" -) - -// ProcessInitOption are options passed to process init. -type ProcessInitOption func() []string - -// POGenesisFile provides the `--genesisfile=` option to process at init -func POGenesisFile(uri string) ProcessInitOption { - return func() []string { - return []string{"--genesisfile", uri} - } -} - -// POPeerKeyFile provides the `--peerkeyfile=` option to process at init -func POPeerKeyFile(pkf string) ProcessInitOption { - return func() []string { - return []string{"--peerkeyfile", pkf} - } -} - -// POAutoSealIntervalSeconds provides the `--auto-seal-interval-seconds=` option to process at init -func POAutoSealIntervalSeconds(seconds int) ProcessInitOption { - return func() []string { - return []string{"--auto-seal-interval-seconds", fmt.Sprintf("%d", seconds)} - } -} - -// PODevnet provides the `--devnet-` option to process at init -func PODevnet(net string) ProcessInitOption { - return func() []string { - return []string{fmt.Sprintf("--devnet-%s", net)} - } -} - -// PODevnetStaging provides the `--devnet-staging` option to process at init -func PODevnetStaging() ProcessInitOption { - return func() []string { - return []string{"--devnet-staging"} - } -} - -// PODevnetNightly provides the `--devnet-nightly` option to process at init -func PODevnetNightly() ProcessInitOption { - return func() []string { - return []string{"--devnet-nightly"} - } -} - -// PODevnetUser provides the `--devnet-user` option to process at init -func PODevnetUser() ProcessInitOption { - return func() []string { - return []string{"--devnet-user"} - } -} - -// ProcessDaemonOption are options passed to process when starting. -type ProcessDaemonOption func() []string - -// POBlockTime provides the `--block-time=` to process when starting. -func POBlockTime(d time.Duration) ProcessDaemonOption { - return func() []string { - return []string{"--block-time", d.String()} - } -} - -// POIsRelay provides the `--is-relay` to process when starting. -func POIsRelay() ProcessDaemonOption { - return func() []string { - return []string{"--is-relay"} - } -} - -// POSwarmRelayPublic provides the `--swarmrelaypublic=` to process when starting. -func POSwarmRelayPublic(a multiaddr.Multiaddr) ProcessDaemonOption { - return func() []string { - return []string{"--swarmrelaypublic", a.String()} - } -} diff --git a/tools/fast/process_stderr_recorder.go b/tools/fast/process_stderr_recorder.go deleted file mode 100644 index 9344c46e9c..0000000000 --- a/tools/fast/process_stderr_recorder.go +++ /dev/null @@ -1,58 +0,0 @@ -package fast - -import ( - "context" - "time" - - "github.com/filecoin-project/go-filecoin/tools/fast/fastutil" -) - -// setupStderrCpaturing opens a reader to the filcoin process to read the stderr -// and then builds a LinePuller to read each line from stderr. This will ensure -// that only complete lines are written to the IntervalRecorder, so that the -// intervals we capture always contain complete log lines -func (f *Filecoin) setupStderrCapturing() error { - stderr, err := f.core.StderrReader() - if err != nil { - return err - } - - f.stderr = stderr - - f.lp = fastutil.NewLinePuller(stderr, &f.ir) - f.lpCtx, f.lpCancel = context.WithCancel(f.ctx) - - go func(ctx context.Context) { - err := f.lp.StartPulling(ctx, time.Millisecond*10) - if err == nil || err == context.Canceled || err == context.DeadlineExceeded { - return - } - - f.Log.Errorf("Stderr log capture failed with error: %s") - f.lpErr = err - }(f.lpCtx) - - return nil -} - -func (f *Filecoin) teardownStderrCapturing() error { - if f.lp != nil { - f.lpCancel() - } - if f.stderr != nil { - return f.stderr.Close() - } - return nil -} - -// StartLogCapture returns a fastutil.Interval, after calling fastutil.Interval#Stop -// all stderr logs generator between the call to StartLogCapture and then will -// be available. fastutil.Interval implements io.Reader (its a bytes.Buffer) -// If an error has occurred reading the stderr, the error will be returned here -func (f *Filecoin) StartLogCapture() (*fastutil.Interval, error) { - if f.lpErr != nil { - return nil, f.lpErr - } - - return f.ir.Start(), nil -} diff --git a/tools/fast/process_stderr_recorder_test.go b/tools/fast/process_stderr_recorder_test.go deleted file mode 100644 index 83a9c45767..0000000000 --- a/tools/fast/process_stderr_recorder_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package fast - -import ( - "context" - "io/ioutil" - "testing" - - iptb "github.com/ipfs/iptb/testbed" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - mockplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/mock" -) - -func TestStartLogCapture(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - dir := "mockdir" - - ns := iptb.NodeSpec{ - Type: mockplugin.PluginName, - Dir: dir, - Attrs: nil, - } - - c, err := ns.Load() - assert.NoError(t, err) - - fc, ok := c.(IPTBCoreExt) - require.True(t, ok) - - mfc := NewFilecoinProcess(ctx, fc, FilecoinOpts{}) - err = mfc.setupStderrCapturing() - require.NoError(t, err) - - t.Run("test capture logs", func(t *testing.T) { - capture, err := mfc.StartLogCapture() - require.NoError(t, err) - - _, err = mfc.RunCmdWithStdin(ctx, nil, "add-to-daemonstderr", "hello") - require.NoError(t, err) - - err = mfc.lp.Pull() - require.NoError(t, err) - - capture.Stop() - - bb, err := ioutil.ReadAll(capture) - require.NoError(t, err) - - require.Equal(t, "hello\n", string(bb)) - }) - - err = mfc.teardownStderrCapturing() - require.NoError(t, err) -} diff --git a/tools/fast/process_test.go b/tools/fast/process_test.go deleted file mode 100644 index 288a239692..0000000000 --- a/tools/fast/process_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package fast - -import ( - "context" - "io" - "io/ioutil" - "testing" - "time" - - iptb "github.com/ipfs/iptb/testbed" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - mockplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/mock" -) - -// must register all filecoin iptb plugins -func init() { - _, err := iptb.RegisterPlugin(iptb.IptbPlugin{ - From: "", - NewNode: mockplugin.NewNode, - PluginName: mockplugin.PluginName, - BuiltIn: true, - }, false) - - if err != nil { - panic(err) - } -} - -func mustGetStdout(t *testing.T, out io.ReadCloser) string { - o, err := ioutil.ReadAll(out) - require.NoError(t, err) - return string(o) -} - -type testJSONOutParam struct { - Key string -} - -// A sanity test to ensure returning strings, json, and ldjson works as expected. -func TestRunCmds(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - dir := "mockdir" - - ns := iptb.NodeSpec{ - Type: mockplugin.PluginName, - Dir: dir, - Attrs: nil, - } - - c, err := ns.Load() - assert.NoError(t, err) - - fc, ok := c.(IPTBCoreExt) - require.True(t, ok) - - mfc := NewFilecoinProcess(ctx, fc, FilecoinOpts{}) - - t.Run("test RunCmdWithStdin", func(t *testing.T) { - out, err := mfc.RunCmdWithStdin(ctx, nil, "") - require.NoError(t, err) - outStr := mustGetStdout(t, out.Stdout()) - assert.Equal(t, "string", outStr) - }) - - t.Run("test RunCmdJSONWithStdin", func(t *testing.T) { - var outParam testJSONOutParam - err = mfc.RunCmdJSONWithStdin(ctx, nil, &outParam, "json") - require.NoError(t, err) - assert.Equal(t, "value", outParam.Key) - }) - - t.Run("test RunCmdLDJsonWithStdin", func(t *testing.T) { - var outLdParam testJSONOutParam - cmdDecoder, err := mfc.RunCmdLDJSONWithStdin(ctx, nil, "ldjson") - require.NoError(t, err) - assert.NoError(t, cmdDecoder.Decode(&outLdParam)) - assert.Equal(t, "value1", outLdParam.Key) - assert.NoError(t, cmdDecoder.Decode(&outLdParam)) - assert.Equal(t, "value2", outLdParam.Key) - }) -} - -func TestInitDaemon(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - dir := "mockdir" - - ns := iptb.NodeSpec{ - Type: mockplugin.PluginName, - Dir: dir, - Attrs: nil, - } - - c, err := ns.Load() - assert.NoError(t, err) - - fc, ok := c.(IPTBCoreExt) - require.True(t, ok) - - t.Run("providing both InitDaemon options and environment options", func(t *testing.T) { - - fastenvOpts := FilecoinOpts{ - InitOpts: []ProcessInitOption{POGenesisFile("http://example.com/genesis.car")}, - } - - mfc := NewFilecoinProcess(ctx, fc, fastenvOpts) - _, err := mfc.InitDaemon(context.Background(), "--foo") - require.Equal(t, ErrDoubleInitOpts, err) - }) -} - -func TestStartDaemon(t *testing.T) { - tf.UnitTest(t) - - ctx := context.Background() - dir := "mockdir" - - ns := iptb.NodeSpec{ - Type: mockplugin.PluginName, - Dir: dir, - Attrs: nil, - } - - c, err := ns.Load() - assert.NoError(t, err) - - fc, ok := c.(IPTBCoreExt) - require.True(t, ok) - - t.Run("providing both InitDaemon options and environment options", func(t *testing.T) { - - fastenvOpts := FilecoinOpts{ - DaemonOpts: []ProcessDaemonOption{POBlockTime(time.Second)}, - } - - mfc := NewFilecoinProcess(ctx, fc, fastenvOpts) - _, err := mfc.StartDaemon(context.Background(), true, "--foo") - require.Equal(t, ErrDoubleDaemonOpts, err) - }) -} diff --git a/tools/fast/series/connect.go b/tools/fast/series/connect.go deleted file mode 100644 index 6252fb38fc..0000000000 --- a/tools/fast/series/connect.go +++ /dev/null @@ -1,21 +0,0 @@ -package series - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// Connect issues a `swarm connect` to the `from` node, using the addresses of the `to` node -func Connect(ctx context.Context, from, to *fast.Filecoin) error { - details, err := to.ID(ctx) - if err != nil { - return err - } - - if _, err := from.SwarmConnect(ctx, details.Addresses...); err != nil { - return err - } - - return nil -} diff --git a/tools/fast/series/create_miner_with_ask.go b/tools/fast/series/create_miner_with_ask.go deleted file mode 100644 index c75f36ae55..0000000000 --- a/tools/fast/series/create_miner_with_ask.go +++ /dev/null @@ -1,23 +0,0 @@ -package series - -import ( - "context" - "math/big" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// CreateStorageMinerWithAsk setups a miner and sets an ask price. The created ask is -// returned. -func CreateStorageMinerWithAsk(ctx context.Context, miner *fast.Filecoin, collateral *big.Int, price *big.Float, expiry *big.Int, sectorSize abi.SectorSize) (porcelain.Ask, error) { - // Create miner - _, err := miner.MinerCreate(ctx, collateral, fast.AOSectorSize(sectorSize), fast.AOPrice(big.NewFloat(1.0)), fast.AOLimit(300)) - if err != nil { - return porcelain.Ask{}, err - } - - return SetPriceGetAsk(ctx, miner, price, expiry) -} diff --git a/tools/fast/series/ctx_mining_once.go b/tools/fast/series/ctx_mining_once.go deleted file mode 100644 index c64eca75dc..0000000000 --- a/tools/fast/series/ctx_mining_once.go +++ /dev/null @@ -1,63 +0,0 @@ -package series - -import ( - "context" -) - -type ctxMiningOnceKey struct{} -type ctxMessageWaitKey struct{} - -// Key used to store the MiningOnceFunc in the context -var miningOnceKey = ctxMiningOnceKey{} - -// MiningOnceFunc is the type for the value used when calling SetCtxMiningOnce -type MiningOnceFunc func() - -// Key used to store the MpoolWaitFunc in the context -var mpoolWaitKey = ctxMessageWaitKey{} - -// MpoolWaitFunc is a function that can wait for a message to appear in its queu -type MpoolWaitFunc func() - -// SetCtxMiningOnce returns a context with `fn` set in the context. To run the -// MiningOnceFunc value, call CtxMiningOnce. -func SetCtxMiningOnce(ctx context.Context, fn MiningOnceFunc) context.Context { - return context.WithValue(ctx, miningOnceKey, fn) -} - -// SetCtxWaitForMpool returns a context with `fn` set in the context. To run the -// MiningOnceFunc with a MpoolWaitFunc value, call CtxMiningOnceForBlockingCommand. -func SetCtxWaitForMpool(ctx context.Context, fn MpoolWaitFunc) context.Context { - return context.WithValue(ctx, mpoolWaitKey, fn) -} - -// CtxMiningOnce will call the MiningOnceFunc set on the context using -// SetMiningOnceFunc. If no value is set on the context, the call is a noop. -func CtxMiningOnce(ctx context.Context) { - miningOnce, ok := ctx.Value(miningOnceKey).(MiningOnceFunc) - if ok { - miningOnce() - } -} - -// CtxMiningNext will call CtxMiningOnce only after a message is in the message pool ready to mine. -// This lets us run blocking commands that require mining by configuring the mining beforehand. -func CtxMiningNext(ctx context.Context, count int) { - mpoolWait, ok := ctx.Value(mpoolWaitKey).(MpoolWaitFunc) - if !ok { - panic("series not configured with message wait function") - } - - miningOnce, ok := ctx.Value(miningOnceKey).(MiningOnceFunc) - if !ok { - panic("series not configured with mining once function") - } - - go func() { - for i := 0; i < count; i++ { - // wait for one message then mine it - mpoolWait() - miningOnce() - } - }() -} diff --git a/tools/fast/series/ctx_sleep_delay.go b/tools/fast/series/ctx_sleep_delay.go deleted file mode 100644 index c65c970a40..0000000000 --- a/tools/fast/series/ctx_sleep_delay.go +++ /dev/null @@ -1,37 +0,0 @@ -package series - -import ( - "context" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/clock" -) - -type ctxSleepDelayKey struct{} - -var ( - // Key used to set the time.Duration in the context - sleepDelayKey = ctxSleepDelayKey{} - - // Default delay - defaultSleepDelay = clock.DefaultEpochDuration -) - -// SetCtxSleepDelay returns a context with `d` set in the context. To sleep with -// the value, call CtxSleepDelay with the context. -func SetCtxSleepDelay(ctx context.Context, d time.Duration) context.Context { - return context.WithValue(ctx, sleepDelayKey, d) -} - -// CtxSleepDelay is a helper method to make sure people don't call `time.Sleep` -// or `time.After` themselves in series. It will use the time.Duration in the -// context, or default to `clock.epochDuration` from the go-filecoin/mining package. -// A channel is return which will receive a time.Time value after the delay. -func CtxSleepDelay(ctx context.Context) <-chan time.Time { - d, ok := ctx.Value(sleepDelayKey).(time.Duration) - if !ok { - d = defaultSleepDelay - } - - return time.After(d) -} diff --git a/tools/fast/series/find_deal_by_id.go b/tools/fast/series/find_deal_by_id.go deleted file mode 100644 index d6c23e83b3..0000000000 --- a/tools/fast/series/find_deal_by_id.go +++ /dev/null @@ -1,33 +0,0 @@ -package series - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/ipfs/go-cid" -) - -// FindDealByID looks for a deal using `DealsList` and returns the result where id matches the ProposalCid of -// the deal. -func FindDealByID(ctx context.Context, client *fast.Filecoin, id cid.Cid) (commands.DealsListResult, error) { - dec, err := client.DealsList(ctx) - if err != nil { - return commands.DealsListResult{}, err - } - - var dl commands.DealsListResult - - for dec.More() { - if err := dec.Decode(&dl); err != nil { - return commands.DealsListResult{}, err - } - - if dl.ProposalCid == id { - return dl, nil - } - } - - return commands.DealsListResult{}, fmt.Errorf("No deal found") -} diff --git a/tools/fast/series/get_head_block_height.go b/tools/fast/series/get_head_block_height.go deleted file mode 100644 index e262ef41b0..0000000000 --- a/tools/fast/series/get_head_block_height.go +++ /dev/null @@ -1,24 +0,0 @@ -package series - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// GetHeadBlockHeight will inspect the chain head and return the height -func GetHeadBlockHeight(ctx context.Context, client *fast.Filecoin) (abi.ChainEpoch, error) { - tipset, err := client.ChainHead(ctx) - if err != nil { - return 0, err - } - - block, err := client.ShowHeader(ctx, tipset[0]) - if err != nil { - return 0, err - } - - return block.Height, nil -} diff --git a/tools/fast/series/import_and_store.go b/tools/fast/series/import_and_store.go deleted file mode 100644 index 6e02f4c50e..0000000000 --- a/tools/fast/series/import_and_store.go +++ /dev/null @@ -1,38 +0,0 @@ -package series - -import ( - "context" - - "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" - - "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// ImportAndStore imports the `data` to the `client`, and proposes a storage -// deal using the provided `ask`, returning the cid of the import and the -// created deal. It uses a duration of 10 blocks -func ImportAndStore(ctx context.Context, client *fast.Filecoin, ask porcelain.Ask, data files.File) (cid.Cid, *network.Response, error) { - return ImportAndStoreWithDuration(ctx, client, ask, 10, data) -} - -// ImportAndStoreWithDuration imports the `data` to the `client`, and proposes a storage -// deal using the provided `ask`, returning the cid of the import and the -// created deal, using the provided duration.: -func ImportAndStoreWithDuration(ctx context.Context, client *fast.Filecoin, ask porcelain.Ask, duration uint64, data files.File) (cid.Cid, *network.Response, error) { - // Client neeeds to import the data - dcid, err := client.ClientImport(ctx, data) - if err != nil { - return cid.Undef, nil, err - } - - // Client makes a deal - deal, err := client.ClientProposeStorageDeal(ctx, dcid, ask.Miner, ask.ID, duration) - if err != nil { - return cid.Undef, nil, err - } - - return dcid, deal, nil -} diff --git a/tools/fast/series/init_and_start.go b/tools/fast/series/init_and_start.go deleted file mode 100644 index fed3bb7331..0000000000 --- a/tools/fast/series/init_and_start.go +++ /dev/null @@ -1,27 +0,0 @@ -package series - -import ( - "context" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// InitAndStart is a quick way to run Init and Start for a filecoin process. A variadic set of functions -// can be passed to run between init and the start of the daemon to make configuration changes. -func InitAndStart(ctx context.Context, node *fast.Filecoin, fns ...func(context.Context, *fast.Filecoin) error) error { - if _, err := node.InitDaemon(ctx); err != nil { - return err - } - - for _, fn := range fns { - if err := fn(ctx, node); err != nil { - return err - } - } - - if _, err := node.StartDaemon(ctx, true); err != nil { - return err - } - - return nil -} diff --git a/tools/fast/series/send_filecoin_defaults.go b/tools/fast/series/send_filecoin_defaults.go deleted file mode 100644 index 4e7be9d7aa..0000000000 --- a/tools/fast/series/send_filecoin_defaults.go +++ /dev/null @@ -1,30 +0,0 @@ -package series - -import ( - "context" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// SendFilecoinDefaults sends the `value` amount of fil from the default wallet -// address of the `from` node to the `to` node's default wallet, and waits for the -// message to be received by the `to` node. -func SendFilecoinDefaults(ctx context.Context, from, to *fast.Filecoin, value int) error { - var toAddr address.Address - if err := to.ConfigGet(ctx, "wallet.defaultAddress", &toAddr); err != nil { - return err - } - - mcid, err := SendFilecoinFromDefault(ctx, from, toAddr, value) - if err != nil { - return err - } - - if _, err := to.MessageWait(ctx, mcid); err != nil { - return err - } - - return nil -} diff --git a/tools/fast/series/send_filecoin_from_default.go b/tools/fast/series/send_filecoin_from_default.go deleted file mode 100644 index 2d77d57bbf..0000000000 --- a/tools/fast/series/send_filecoin_from_default.go +++ /dev/null @@ -1,37 +0,0 @@ -package series - -import ( - "context" - "math/big" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// SendFilecoinFromDefault will send the `value` of FIL from the default wallet -// address, per the config of the `node`, to the provided address `addr` and -// wait for the message to showup on chain. -// The waiting node is the sender, this does not guarantee that the message has -// been received by the targeted node of addr. -func SendFilecoinFromDefault(ctx context.Context, node *fast.Filecoin, addr address.Address, value int) (cid.Cid, error) { - var walletAddr address.Address - if err := node.ConfigGet(ctx, "wallet.defaultAddress", &walletAddr); err != nil { - return cid.Undef, err - } - - mcid, err := node.MessageSend(ctx, addr, builtin.MethodSend, fast.AOValue(value), fast.AOFromAddr(walletAddr), fast.AOPrice(big.NewFloat(1.0)), fast.AOLimit(300)) - if err != nil { - return cid.Undef, err - } - - CtxMiningOnce(ctx) - - if _, err := node.MessageWait(ctx, mcid); err != nil { - return cid.Undef, err - } - - return mcid, nil -} diff --git a/tools/fast/series/set_price_ask.go b/tools/fast/series/set_price_ask.go deleted file mode 100644 index 04cbd191ea..0000000000 --- a/tools/fast/series/set_price_ask.go +++ /dev/null @@ -1,24 +0,0 @@ -package series - -import ( - "context" - "fmt" - "math/big" - - "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// SetPriceGetAsk issues a `set-price` and tries, to the best it can, return the -// created ask. This series will run until it finds an ask, or the context is -// canceled. -func SetPriceGetAsk(ctx context.Context, miner *fast.Filecoin, price *big.Float, expiry *big.Int) (porcelain.Ask, error) { - // Set a price - _, err := miner.MinerSetPrice(ctx, price, expiry, fast.AOPrice(big.NewFloat(1.0)), fast.AOLimit(300)) - if err != nil { - return porcelain.Ask{}, err - } - - // Dragons: must be re-integrated with storage market module - return porcelain.Ask{}, fmt.Errorf("could not find ask") -} diff --git a/tools/fast/series/setup_genesis_node.go b/tools/fast/series/setup_genesis_node.go deleted file mode 100644 index 32a6f42b9a..0000000000 --- a/tools/fast/series/setup_genesis_node.go +++ /dev/null @@ -1,40 +0,0 @@ -package series - -import ( - "context" - "math/big" - - "github.com/filecoin-project/go-address" - files "github.com/ipfs/go-ipfs-files" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// SetupGenesisNode will initialize, start, configure, and issue the -// "start mining" command to the filecoin process `node`. Process `node` will -// be configured with miner `minerAddress`, and import the address of the miner -// `minerOwner`. Lastly the process `node` will start mining. -func SetupGenesisNode(ctx context.Context, node *fast.Filecoin, minerAddress address.Address, minerOwner files.File) error { - if _, err := node.InitDaemon(ctx); err != nil { - return err - } - - if _, err := node.StartDaemon(ctx, true); err != nil { - return err - } - - if err := node.ConfigSet(ctx, "mining.minerAddress", minerAddress.String()); err != nil { - return err - } - - wallet, err := node.WalletImport(ctx, minerOwner) - if err != nil { - return err - } - if err := node.ConfigSet(ctx, "wallet.defaultAddress", wallet[0].String()); err != nil { - return err - } - - _, err = node.MinerUpdatePeerid(ctx, minerAddress, node.PeerID, fast.AOFromAddr(wallet[0]), fast.AOPrice(big.NewFloat(.0000001)), fast.AOLimit(1)) - return err -} diff --git a/tools/fast/series/wait_for_block_height.go b/tools/fast/series/wait_for_block_height.go deleted file mode 100644 index 5a0b24ea8c..0000000000 --- a/tools/fast/series/wait_for_block_height.go +++ /dev/null @@ -1,29 +0,0 @@ -package series - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// WaitForBlockHeight will inspect the chain head and wait till the height is equal to or -// greater than the provide height `bh` -func WaitForBlockHeight(ctx context.Context, client *fast.Filecoin, bh abi.ChainEpoch) error { - for { - - hh, err := GetHeadBlockHeight(ctx, client) - if err != nil { - return err - } - - if hh >= bh { - break - } - - <-CtxSleepDelay(ctx) - } - - return nil -} diff --git a/tools/fast/series/wait_for_chain_message.go b/tools/fast/series/wait_for_chain_message.go deleted file mode 100644 index 2b86ff1b96..0000000000 --- a/tools/fast/series/wait_for_chain_message.go +++ /dev/null @@ -1,80 +0,0 @@ -package series - -import ( - "context" - "fmt" - "io" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/ipfs/go-cid" -) - -// MsgInfo contains the BlockCid for the message MsgCid -type MsgInfo struct { - BlockCid cid.Cid - MsgCid cid.Cid -} - -// MsgSearchFn is the function signature used to find a message -type MsgSearchFn func(context.Context, *fast.Filecoin, *types.SignedMessage) (bool, error) - -// WaitForChainMessage iterates over the chain until the provided function `fn` returns true. -func WaitForChainMessage(ctx context.Context, node *fast.Filecoin, fn MsgSearchFn) (*MsgInfo, error) { - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-CtxSleepDelay(ctx): - dec, err := node.ChainLs(ctx) - if err != nil { - return nil, err - } - - for dec.More() { - var blks []block.Block - err := dec.Decode(&blks) - if err != nil { - if err == io.EOF { - break - } - - return nil, err - } - - if msgInfo, err := findMessageInBlockSlice(ctx, node, blks, fn); err == nil { - return msgInfo, nil - } - } - } - } -} - -func findMessageInBlockSlice(ctx context.Context, node *fast.Filecoin, blks []block.Block, fn MsgSearchFn) (*MsgInfo, error) { - for _, blk := range blks { - msgs, err := node.ShowMessages(ctx, blk.Messages.Cid) - if err != nil { - return nil, err - } - - for _, msg := range msgs { - found, err := fn(ctx, node, msg) - if err != nil { - return nil, err - } - - if found { - blockCid := blk.Cid() - msgCid, _ := msg.Cid() - - return &MsgInfo{ - BlockCid: blockCid, - MsgCid: msgCid, - }, nil - } - } - } - - return nil, fmt.Errorf("Message not found") -} diff --git a/tools/fast/series/wait_for_deal_state.go b/tools/fast/series/wait_for_deal_state.go deleted file mode 100644 index de5a0310d5..0000000000 --- a/tools/fast/series/wait_for_deal_state.go +++ /dev/null @@ -1,28 +0,0 @@ -package series - -import ( - "context" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// WaitForDealState will query the storage deal until its state matches the -// passed in `state`, or the context is canceled. -func WaitForDealState(ctx context.Context, client *fast.Filecoin, deal *network.Response, state storagemarket.StorageDealStatus) (*network.Response, error) { - for { - // Client waits around for the deal to be sealed - dr, err := client.ClientQueryStorageDeal(ctx, deal.Proposal) - if err != nil { - return nil, err - } - - if dr.State == state { - return dr, nil - } - - <-CtxSleepDelay(ctx) - } -} diff --git a/tools/fast/series/with_wallet.go b/tools/fast/series/with_wallet.go deleted file mode 100644 index caecd78abd..0000000000 --- a/tools/fast/series/with_wallet.go +++ /dev/null @@ -1,41 +0,0 @@ -package series - -import ( - "context" - "errors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/go-filecoin/tools/fast" -) - -// ErrWithWalletRestoreFailed is returned if the original address could not be restored. -var ErrWithWalletRestoreFailed = errors.New("failed to restore default wallet after WithWallet exited") - -// WithWallet can be used to temporarlly change the default wallet address of -// the node to sessionWallet for all FAST actions executed inside of sessionFn. -// -// WithWallet should be used when you want to temporarally change the default -// wallet address of the node. -// -// Error ErrWithWalletRestoreFailed will be returned if the original address -// could not be restored. -func WithWallet(ctx context.Context, fc *fast.Filecoin, sessionWallet address.Address, sessionFn func(*fast.Filecoin) error) (err error) { - var beforeAddress address.Address - if err = fc.ConfigGet(ctx, "wallet.defaultAddress", &beforeAddress); err != nil { - return - } - - if err = fc.ConfigSet(ctx, "wallet.defaultAddress", sessionWallet); err != nil { - return - } - - defer func() { - err = fc.ConfigSet(ctx, "wallet.defaultAddress", beforeAddress) - if err != nil { - err = ErrWithWalletRestoreFailed - } - }() - - return sessionFn(fc) -} diff --git a/tools/fast/tests/retrieval_test.go b/tools/fast/tests/retrieval_test.go deleted file mode 100644 index 6601527b55..0000000000 --- a/tools/fast/tests/retrieval_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package tests - -import ( - "bytes" - "context" - "crypto/rand" - "io" - "io/ioutil" - "math/big" - "testing" - "time" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - files "github.com/ipfs/go-ipfs-files" - logging "github.com/ipfs/go-log/v2" - - "github.com/stretchr/testify/require" - - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/fast" - "github.com/filecoin-project/go-filecoin/tools/fast/environment" - "github.com/filecoin-project/go-filecoin/tools/fast/series" - localplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" -) - -func init() { - // Enabling debug logging provides a lot of insight into what commands are - // being executed - logging.SetDebugLogging() -} - -// TestRetrieval exercises storing and retrieving with the filecoin protocols using a locally running -// temporary network. -func TestRetrievalLocalNetwork(t *testing.T) { - tf.FunctionalTest(t) - t.Skip("Long term solution: #3642") - - blocktime := time.Second * 5 - - // This test should run in 20 block times, with 120 seconds for sealing, and no longer - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(20*blocktime).Add(120*time.Second)) - defer cancel() - - // Create a directory for the test using the test name (mostly for FAST) - dir, err := ioutil.TempDir("", t.Name()) - require.NoError(t, err) - - // Create an environment that includes a genesis block with 1MM FIL - env, err := environment.NewMemoryGenesis(big.NewInt(1000000), dir) - require.NoError(t, err) - - // Teardown will shutdown all running processes the environment knows about - // and cleanup anything the evironment setup. This includes the directory - // the environment was created to use. - defer func() { - require.NoError(t, env.Teardown(ctx)) - }() - - // Setup options for nodes. - options := make(map[string]string) - options[localplugin.AttrLogJSON] = "0" // Disable JSON logs - options[localplugin.AttrLogLevel] = "4" // Set log level to Info - options[localplugin.AttrFilecoinBinary] = th.MustGetFilecoinBinary() // Set binary - - ctx = series.SetCtxSleepDelay(ctx, blocktime) - - genesisURI := env.GenesisCar() - genesisMiner, err := env.GenesisMiner() - require.NoError(t, err) - - fastenvOpts := fast.FilecoinOpts{ - InitOpts: []fast.ProcessInitOption{fast.POGenesisFile(genesisURI)}, - DaemonOpts: []fast.ProcessDaemonOption{fast.POBlockTime(blocktime)}, - } - - // Setup nodes used for the test - genesis, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) - require.NoError(t, err) - - miner, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) - require.NoError(t, err) - - client, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) - require.NoError(t, err) - - // Start setting up the nodes - // Setup Genesis - err = series.SetupGenesisNode(ctx, genesis, genesisMiner.Address, files.NewReaderFile(genesisMiner.Owner)) - require.NoError(t, err) - - err = genesis.MiningStart(ctx) - require.NoError(t, err) - - // Start Miner - err = series.InitAndStart(ctx, miner) - require.NoError(t, err) - - // Start Client - err = series.InitAndStart(ctx, client) - require.NoError(t, err) - - // Connect everything to the genesis node so it can issue filecoin when needed - err = series.Connect(ctx, genesis, miner) - require.NoError(t, err) - - err = series.Connect(ctx, genesis, client) - require.NoError(t, err) - - // Everyone needs FIL to deal with gas costs and make sure their wallets - // exists (sending FIL to a wallet addr creates it) - err = series.SendFilecoinDefaults(ctx, genesis, miner, 1000) - require.NoError(t, err) - - err = series.SendFilecoinDefaults(ctx, genesis, client, 1000) - require.NoError(t, err) - - RunRetrievalTest(ctx, t, miner, client) -} - -// TestRetrieval exercises storing and retreiving with the filecoin protocols on a kittyhawk deployed -// devnet. -func TestRetrievalDevnet(t *testing.T) { - tf.FunctionalTest(t) - - // Skip the test so it doesn't run - t.SkipNow() - - blocktime := time.Second * 30 - networkConfig, err := environment.FindDevnetConfigByName("nightly") - require.NoError(t, err) - - // This test should run in and hour and no longer - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(blocktime*120)) - defer cancel() - - // Create a directory for the test using the test name (mostly for FAST) - dir, err := ioutil.TempDir("", t.Name()) - require.NoError(t, err) - - // Create an environment that includes a genesis block with 1MM FIL - env, err := environment.NewDevnet(networkConfig, dir) - require.NoError(t, err) - - // Teardown will shutdown all running processes the environment knows about - // and cleanup anything the evironment setup. This includes the directory - // the environment was created to use. - defer func() { - require.NoError(t, env.Teardown(ctx)) - }() - - // Setup options for nodes. - options := make(map[string]string) - options[localplugin.AttrLogJSON] = "0" // Disable JSON logs - options[localplugin.AttrLogLevel] = "4" // Set log level to Info - options[localplugin.AttrFilecoinBinary] = th.MustGetFilecoinBinary() // Set binary - - ctx = series.SetCtxSleepDelay(ctx, blocktime) - - genesisURI := env.GenesisCar() - - fastenvOpts := fast.FilecoinOpts{ - InitOpts: []fast.ProcessInitOption{fast.POGenesisFile(genesisURI), fast.PODevnet(networkConfig.Name)}, - DaemonOpts: []fast.ProcessDaemonOption{}, - } - - miner, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) - require.NoError(t, err) - - client, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) - require.NoError(t, err) - - // Start Miner - err = series.InitAndStart(ctx, miner) - require.NoError(t, err) - - // Start Client - err = series.InitAndStart(ctx, client) - require.NoError(t, err) - - // Everyone needs FIL to deal with gas costs and make sure their wallets - // exists (sending FIL to a wallet addr creates it) - err = env.GetFunds(ctx, miner) - require.NoError(t, err) - - err = env.GetFunds(ctx, client) - require.NoError(t, err) - - RunRetrievalTest(ctx, t, miner, client) -} - -func RunRetrievalTest(ctx context.Context, t *testing.T, miner, client *fast.Filecoin) { - collateral := big.NewInt(10) // FIL - price := big.NewFloat(0.000000001) // price per byte/block - expiry := big.NewInt(24 * 60 * 60 / 30) // ~24 hours - - pparams, err := miner.Protocol(ctx) - require.NoError(t, err) - - sinfo := pparams.SupportedSectors[0] - - // Create a miner on the miner node - ask, err := series.CreateStorageMinerWithAsk(ctx, miner, collateral, price, expiry, sinfo.Size) - require.NoError(t, err) - - // Connect the client and the miner - require.NoError(t, series.Connect(ctx, client, miner)) - - // Start the miner - require.NoError(t, miner.MiningStart(ctx)) - - // Store some data with the miner with the given ask, returns the cid for - // the imported data, and the deal which was created - var data bytes.Buffer - dataReader := io.LimitReader(rand.Reader, int64(sinfo.MaxPieceSize)) - dataReader = io.TeeReader(dataReader, &data) - dcid, deal, err := series.ImportAndStore(ctx, client, ask, files.NewReaderFile(dataReader)) - require.NoError(t, err) - - // Wait for the deal to be complete - proposalResponse, err := series.WaitForDealState(ctx, client, deal, storagemarket.StorageDealActive) - require.NoError(t, err) - - _, err = client.MessageWait(ctx, *proposalResponse.PublishMessage) - require.NoError(t, err) - - // Verify PIP - _, err = client.ClientVerifyStorageDeal(ctx, deal.Proposal) - require.NoError(t, err) - - // Retrieve the stored piece of data - reader, err := client.RetrievalClientRetrievePiece(ctx, dcid, ask.Miner) - require.NoError(t, err) - - // Verify that it's all the same - retrievedData, err := ioutil.ReadAll(reader) - require.NoError(t, err) - require.Equal(t, data.Bytes(), retrievedData) -} diff --git a/tools/faucet/limiter/limiter.go b/tools/faucet/limiter/limiter.go deleted file mode 100644 index 9a9af63797..0000000000 --- a/tools/faucet/limiter/limiter.go +++ /dev/null @@ -1,79 +0,0 @@ -package limiter - -import ( - "sync" - "time" -) - -// Time interface defines required time methods for the Limiter struct. -// Primarily used for testing -type Time interface { - Until(time.Time) time.Duration -} - -// Limiter is used to restrict access to a resources till a future time -type Limiter struct { - addrsMu sync.Mutex - // addrs maps an address to the time when it is allowed to make additional requests - addrs map[string]time.Time - // time - time Time -} - -// NewLimiter returns a new limiter -func NewLimiter(tm Time) *Limiter { - l := &Limiter{} - - l.addrs = make(map[string]time.Time) - l.time = tm - - return l -} - -// Add limits value till a given time -func (l *Limiter) Add(addr string, t time.Time) { - l.addrsMu.Lock() - defer l.addrsMu.Unlock() - l.addrs[addr] = t -} - -// Ready checks to see if the time has expired. Returns a time.Duration -// for the time remaining till a true value will be returned -func (l *Limiter) Ready(addr string) (time.Duration, bool) { - l.addrsMu.Lock() - defer l.addrsMu.Unlock() - - return l.ready(addr) -} - -func (l *Limiter) ready(addr string) (time.Duration, bool) { - if t, ok := l.addrs[addr]; ok && l.time.Until(t) > 0 { - return l.time.Until(t), false - } - - return 0, true -} - -// Clear removes value from the limiter. Any calls to Ready for the value -// will return true after a call to Clear for it. -func (l *Limiter) Clear(addr string) { - l.addrsMu.Lock() - defer l.addrsMu.Unlock() - l.clear(addr) -} - -func (l *Limiter) clear(addr string) { - delete(l.addrs, addr) -} - -// Clean removes all values which a call to Ready would return true -func (l *Limiter) Clean() { - l.addrsMu.Lock() - defer l.addrsMu.Unlock() - - for addr := range l.addrs { - if _, ok := l.ready(addr); ok { - l.clear(addr) - } - } -} diff --git a/tools/faucet/limiter/limiter_test.go b/tools/faucet/limiter/limiter_test.go deleted file mode 100644 index 000cc6628e..0000000000 --- a/tools/faucet/limiter/limiter_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package limiter - -import ( - "testing" - "time" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/stretchr/testify/assert" -) - -type MockTime struct { - UntilReturn time.Duration -} - -func (mt *MockTime) Until(t time.Time) time.Duration { - return mt.UntilReturn -} - -func TestReady(t *testing.T) { - tf.UnitTest(t) - - addr := "Qmaddr" - - t.Run("Not ready before time elapses", func(t *testing.T) { - lockedFor := time.Microsecond * 50 - mt := &MockTime{} - mt.UntilReturn = lockedFor - - l := NewLimiter(mt) - - l.Add(addr, time.Now().Add(lockedFor)) - t0, ok := l.Ready(addr) - assert.False(t, ok) - assert.Equal(t, lockedFor, t0) - }) - - t.Run("Ready after time elapses", func(t *testing.T) { - lockedFor := time.Microsecond * 50 - - mt := &MockTime{} - mt.UntilReturn = time.Duration(0) - - l := NewLimiter(mt) - - l.Add(addr, time.Now().Add(lockedFor)) - t0, ok := l.Ready(addr) - assert.True(t, ok) - assert.Equal(t, time.Duration(0), t0) - }) - - t.Run("Ready if not added", func(t *testing.T) { - mt := &MockTime{} - mt.UntilReturn = time.Duration(0) - - l := NewLimiter(mt) - - t0, ok := l.Ready(addr) - assert.True(t, ok) - assert.Equal(t, time.Duration(0), t0) - }) - - t.Run("Ready after waiting returned duration", func(t *testing.T) { - lockedFor := time.Microsecond * 50 - mt := &MockTime{} - mt.UntilReturn = lockedFor - - l := NewLimiter(mt) - - l.Add(addr, time.Now().Add(lockedFor)) - - d0, ok := l.Ready(addr) - assert.False(t, ok) - assert.Equal(t, lockedFor, d0) - - mt.UntilReturn = time.Duration(0) - - d0, ok = l.Ready(addr) - assert.True(t, ok) - assert.Equal(t, time.Duration(0), d0) - }) -} - -func TestClear(t *testing.T) { - tf.UnitTest(t) - - addr := "Qmaddr" - - t.Run("Ready after clear", func(t *testing.T) { - lockedFor := time.Microsecond * 50 - mt := &MockTime{} - mt.UntilReturn = lockedFor - - l := NewLimiter(mt) - - l.Add(addr, time.Now().Add(lockedFor)) - _, ok := l.Ready(addr) - assert.False(t, ok) - - l.Clear(addr) - - _, ok = l.Ready(addr) - assert.True(t, ok) - }) -} - -func TestClean(t *testing.T) { - tf.UnitTest(t) - - addr := "Qmaddr" - - t.Run("Removes expired values", func(t *testing.T) { - lockedFor := time.Microsecond * 50 - mt := &MockTime{} - mt.UntilReturn = time.Duration(0) - - l := NewLimiter(mt) - - l.Add(addr, time.Now().Add(lockedFor)) - assert.Len(t, l.addrs, 1) - - l.Clean() - - assert.Len(t, l.addrs, 0) - - _, ok := l.Ready(addr) - assert.True(t, ok) - }) -} diff --git a/tools/faucet/main.go b/tools/faucet/main.go deleted file mode 100644 index 2a5b198010..0000000000 --- a/tools/faucet/main.go +++ /dev/null @@ -1,146 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "net/http" - "time" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-filecoin/tools/faucet/limiter" -) - -var log = logging.Logger("faucet") - -// Tick interval to cleanup wallet addrs that have passed the expiry time -var limiterCleanTick = time.Minute * 15 - -// Default timeout between wallet fund requests -var defaultLimiterExpiry = time.Hour * 1 - -func init() { - // Info level - logging.SetAllLoggers(4) -} - -type timeImpl struct{} - -// Until returns the time.Duration until time.Time t -func (mt *timeImpl) Until(t time.Time) time.Duration { - return time.Until(t) -} - -func main() { - filapi := flag.String("fil-api", "localhost:3453", "set the api address of the filecoin node to use") - filwal := flag.String("fil-wallet", "", "(required) set the wallet address for the controlled filecoin node to send funds from") - expiry := flag.Duration("limiter-expiry", defaultLimiterExpiry, "minimum time duration between faucet request to the same wallet addr") - faucetval := flag.Int64("faucet-val", 500, "set the amount of fil to pay to each requester") - flag.Parse() - - if *filwal == "" { - fmt.Println("ERROR: must provide wallet address to send funds from") - flag.Usage() - return - } - - addrLimiter := limiter.NewLimiter(&timeImpl{}) - - // Clean the limiter every limiterCleanTick - go func() { - c := time.Tick(limiterCleanTick) - for range c { - addrLimiter.Clean() - } - }() - - http.HandleFunc("/", displayForm) - http.HandleFunc("/tap", func(w http.ResponseWriter, r *http.Request) { - target := r.FormValue("target") - if target == "" { - http.Error(w, "must specify a target address to send FIL to", 400) - return - } - log.Infof("Request to send funds to: %s", target) - - addr, err := address.NewFromString(target) - if err != nil { - log.Errorf("failed to parse target address: %s %s", target, err) - http.Error(w, fmt.Sprintf("Failed to parse target address %s %s", target, err.Error()), 400) - return - } - - if readyIn, ok := addrLimiter.Ready(target); !ok { - log.Errorf("limit hit for target address %s", target) - w.Header().Add("Retry-After", fmt.Sprintf("%d", int64(readyIn/time.Second))) - http.Error(w, fmt.Sprintf("Too Many Requests, please wait %s", readyIn), http.StatusTooManyRequests) - return - } - - reqStr := fmt.Sprintf("http://%s/api/message/send?arg=%s&value=%d&from=%s&gas-price=0.0001&gas-limit=1000", *filapi, addr, *faucetval, *filwal) - log.Infof("Request URL: %s", reqStr) - - resp, err := http.Post(reqStr, "application/json", nil) - if err != nil { - log.Errorf("failed to Post request: %s", err) - http.Error(w, err.Error(), 500) - return - } - - out, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Errorf("failed to read response body: %s", err) - http.Error(w, "failed to read response", 500) - return - } - if resp.StatusCode != 200 { - log.Errorf("status: %s body: %s", resp.Status, string(out)) - http.Error(w, "failed to send funds", 500) - return - } - - msgResp := struct{ Cid cid.Cid }{} - - // result should be a message cid - if err := json.Unmarshal(out, &msgResp); err != nil { - log.Errorf("json unmarshal from response failed: %s", err) - log.Errorf("response data was: %s", out) - http.Error(w, "faucet unmarshal failed", 500) - return - } - msgcid := msgResp.Cid - - addrLimiter.Add(target, time.Now().Add(*expiry)) - - log.Info("Request successful. Message CID: %s", msgcid.String()) - w.Header().Add("Message-Cid", msgcid.String()) - w.WriteHeader(200) - fmt.Fprint(w, "Success! Message CID: ") // nolint: errcheck - fmt.Fprintln(w, msgcid.String()) // nolint: errcheck - }) - - panic(http.ListenAndServe(":9797", nil)) -} - -const form = ` - - -

What is your wallet address

-

You can find this by running:

- go-filecoin address ls -

Address:

-
- - -
- - -` - -func displayForm(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, form) // nolint: errcheck -} diff --git a/tools/gengen/README.md b/tools/gengen/README.md index 307d8320b5..bc31acedc9 100644 --- a/tools/gengen/README.md +++ b/tools/gengen/README.md @@ -11,7 +11,7 @@ go-filecon $ ./tools/gengen/gengen --keypath fixtures --out-car fixtures/genesis ### Building -The gengen tool expects that you can already build `go-filecoin`. Please refer +The gengen tool expects that you can already build `venus`. Please refer to the README in the root of this project for details. ``` diff --git a/tools/gengen/gencfg/main.go b/tools/gengen/gencfg/main.go index e301a91138..e6f43b8f94 100644 --- a/tools/gengen/gencfg/main.go +++ b/tools/gengen/gencfg/main.go @@ -20,6 +20,6 @@ func main() { panic(err) } - cid := commcid.DataCommitmentV1ToCID(bytes) + cid, _ := commcid.DataCommitmentV1ToCID(bytes) fmt.Printf("%s\n", cid) } diff --git a/tools/gengen/main.go b/tools/gengen/main.go index 65411c3ad7..72657ce71f 100644 --- a/tools/gengen/main.go +++ b/tools/gengen/main.go @@ -1,14 +1,16 @@ package main import ( + "encoding/hex" "encoding/json" flg "flag" "fmt" "os" - "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/tools/gengen/util" + "github.com/filecoin-project/venus/pkg/crypto" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" // enable bls signatures + _ "github.com/filecoin-project/venus/pkg/crypto/secp" // enable secp signatures + gengen "github.com/filecoin-project/venus/tools/gengen/util" ) func writeKey(ki *crypto.KeyInfo, name string, jsonout bool) error { @@ -17,8 +19,8 @@ func writeKey(ki *crypto.KeyInfo, name string, jsonout bool) error { return err } if !jsonout { - fmt.Fprintf(os.Stderr, "key: %s - %s\n", name, addr) // nolint: errcheck - fmt.Fprintf(os.Stderr, "run 'go-filecoin wallet import ./%s.key' to add private key for %[1]s to your wallet\n", name) // nolint: errcheck + fmt.Fprintf(os.Stderr, "key: %s - %s\n", name, addr) // nolint: errcheck + fmt.Fprintf(os.Stderr, "run 'venus wallet import ./%s.key' to add private key for %[1]s to your wallet\n", name) // nolint: errcheck } fi, err := os.Create(name + ".key") if err != nil { @@ -26,10 +28,10 @@ func writeKey(ki *crypto.KeyInfo, name string, jsonout bool) error { } defer fi.Close() // nolint: errcheck - var wir commands.WalletSerializeResult - wir.KeyInfo = append(wir.KeyInfo, ki) + // var wir cmd.WalletSerializeResult + // wir.KeyInfo = append(wir.KeyInfo, ki) - return json.NewEncoder(fi).Encode(wir) + return json.NewEncoder(hex.NewEncoder(fi)).Encode(ki) } /* gengen takes as input a json encoded 'Genesis Config' @@ -56,14 +58,12 @@ $ cat setup.json } $ cat setup.json | gengen > genesis.car -The outputted file can be used by go-filecoin during init to +The outputted file can be used by venus during init to set the initial genesis block: -$ go-filecoin init --genesisfile=genesis.car +$ venus daemon --genesisfile=genesis.car */ -var ( - flag = flg.NewFlagSet(os.Args[0], flg.ExitOnError) -) +var flag = flg.NewFlagSet(os.Args[0], flg.ExitOnError) func main() { jsonout := flag.Bool("json", false, "sets output to be json") diff --git a/tools/gengen/util/generator.go b/tools/gengen/util/generator.go index e56e4e2981..3f30e20fcd 100644 --- a/tools/gengen/util/generator.go +++ b/tools/gengen/util/generator.go @@ -1,97 +1,122 @@ package gengen import ( + "bytes" "context" "fmt" "io" mrand "math/rand" - address "github.com/filecoin-project/go-address" - amt "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - "github.com/filecoin-project/specs-actors/actors/builtin/system" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/venus/pkg/fork" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/pkg/vm/vmcontext" + + "github.com/filecoin-project/go-address" + ds "github.com/ipfs/go-datastore" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/account" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/cron" + init_ "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/reward" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/system" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" + "github.com/filecoin-project/specs-actors/v2/actors/util/adt" cid "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" mh "github.com/multiformats/go-multihash" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/drand" - e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/genesis" - "github.com/filecoin-project/go-filecoin/internal/pkg/proofs" - gfcstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor" - "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" - "github.com/filecoin-project/go-filecoin/internal/pkg/vmsupport" + xerrors "github.com/pkg/errors" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/config" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/genesis" + gfcstate "github.com/filecoin-project/venus/pkg/state" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm" + "github.com/filecoin-project/venus/pkg/vm/gas" + "github.com/filecoin-project/venus/pkg/vmsupport" + "github.com/filecoin-project/venus/venus-shared/actors" + blockstore "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" ) -type cstore struct { - ctx context.Context - cbor.IpldStore -} - -func (s *cstore) Context() context.Context { - return s.ctx -} +const InitialBaseFee = 100e6 -var ( - rewardActorInitialBalance = types.NewAttoFILFromFIL(1.4e9) -) +var rewardActorInitialBalance = types.FromFil(1.4e9) type GenesisGenerator struct { // actor state - stateTree state.Tree - store vm.Storage + stateTree tree.Tree + store blockstore.Blockstore cst cbor.IpldStore - vm genesis.VM - + vm vm.Interpreter + vmOption vm.VmOption keys []*crypto.KeyInfo // Keys for pre-alloc accounts vrkey *crypto.KeyInfo // Key for verified registry root pnrg *mrand.Rand - chainRand crypto.ChainRandomnessSource cfg *GenesisCfg } func NewGenesisGenerator(bs blockstore.Blockstore) *GenesisGenerator { - cst := cborutil.NewIpldStore(bs) - g := GenesisGenerator{} - g.stateTree = state.NewState(cst) - g.store = vm.NewStorage(bs) - g.vm = vm.NewVM(g.stateTree, &g.store, vmsupport.NewSyscalls(&vmsupport.NilFaultChecker{}, &proofs.FakeVerifier{})).(genesis.VM) - g.cst = cst - - g.chainRand = crypto.ChainRandomnessSource{Sampler: &crypto.GenesisSampler{VRFProof: genesis.Ticket.VRFProof}} - return &g + csc := func(context.Context, abi.ChainEpoch, tree.Tree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + cst := cbor.NewCborStore(bs) + syscallImpl := vmsupport.NewSyscalls(&vmsupport.NilFaultChecker{}, &impl.FakeVerifier{}) + chainRand := chain.NewGenesisRandomnessSource(genesis.Ticket.VRFProof) + chainDs := ds.NewMapDatastore() // just mock one + // chainstore + chainStore := chain.NewStore(chainDs, bs, cid.Undef, chain.NewMockCirculatingSupplyCalculator()) // load genesis from car + chainFork, err := fork.NewChainFork(context.TODO(), chainStore, cst, bs, config.NewDefaultConfig().NetworkParams) + if err != nil { + panic(xerrors.Errorf("create chain fork error %v", err)) + } + + vmOption := vm.VmOption{ + CircSupplyCalculator: csc, + NetworkVersion: network.Version6, + LookbackStateGetter: vmcontext.LookbackStateGetterForTipset(context.TODO(), chainStore, chainFork, nil), + Rnd: chainRand, + BaseFee: abi.NewTokenAmount(InitialBaseFee), + Epoch: 0, + GasPriceSchedule: gas.NewPricesSchedule(config.DefaultForkUpgradeParam), + Bsstore: bs, + PRoot: cid.Undef, + SysCallsImpl: syscallImpl, + Fork: chainFork, + } + vm, err := vm.NewLegacyVM(context.Background(), vmOption) + if err != nil { + panic(xerrors.Errorf("create state error, should never come here")) + } + + return &GenesisGenerator{ + stateTree: vm.StateTree(), + store: bs, + cst: cst, + vm: vm, + vmOption: vmOption, + } } func (g *GenesisGenerator) Init(cfg *GenesisCfg) error { g.pnrg = mrand.New(mrand.NewSource(cfg.Seed)) - keys, err := genKeys(cfg.KeysToGen, g.pnrg) if err != nil { return err } keys = append(keys, cfg.ImportKeys...) g.keys = keys - vrKey, err := crypto.NewSecpKeyFromSeed(g.pnrg) if err != nil { return err @@ -99,43 +124,66 @@ func (g *GenesisGenerator) Init(cfg *GenesisCfg) error { g.vrkey = &vrKey // Monkey patch all proof types into the specs-actors package variable - newSupportedTypes := make(map[abi.RegisteredProof]struct{}) + newSupportedTypes := make(map[abi.RegisteredSealProof]struct{}) for _, mCfg := range cfg.Miners { newSupportedTypes[mCfg.SealProofType] = struct{}{} } // Switch reference rather than mutate in place to avoid concurrent map mutation (in tests). - miner.SupportedProofTypes = newSupportedTypes + miner.PreCommitSealProofTypesV0 = newSupportedTypes g.cfg = cfg return nil } -func (g *GenesisGenerator) flush(ctx context.Context) (cid.Cid, error) { - err := g.store.Flush() +func (g *GenesisGenerator) createSingletonActor(ctx context.Context, addr address.Address, codeCid cid.Cid, balance abi.TokenAmount, stateFn func() (interface{}, error)) (*types.Actor, error) { + if addr.Protocol() != address.ID { + return nil, fmt.Errorf("non-singleton actor would be missing from Init actor's address table") + } + state, err := stateFn() if err != nil { - return cid.Undef, err + return nil, fmt.Errorf("failed to create state: %v", err) } - return g.stateTree.Commit(ctx) + headCid, err := g.cst.Put(context.Background(), state) + if err != nil { + return nil, fmt.Errorf("failed to store state") + } + + a := types.Actor{ + Code: codeCid, + Nonce: 0, + Balance: balance, + Head: headCid, + } + if err := g.stateTree.SetActor(ctx, addr, &a); err != nil { + return nil, fmt.Errorf("failed to create actor during genesis block creation") + } + + return &a, nil } -func (g *GenesisGenerator) createSingletonActor(ctx context.Context, addr address.Address, codeCid cid.Cid, balance abi.TokenAmount, stateFn func() (interface{}, error)) (*actor.Actor, error) { +func (g *GenesisGenerator) updateSingletonActor(ctx context.Context, addr address.Address, stateFn func(actor2 *types.Actor) (interface{}, error)) (*types.Actor, error) { if addr.Protocol() != address.ID { return nil, fmt.Errorf("non-singleton actor would be missing from Init actor's address table") } - state, err := stateFn() + oldActor, found, err := g.stateTree.GetActor(ctx, addr) + if !found || err != nil { + return nil, fmt.Errorf("failed to create state: %v", err) + } + + state, err := stateFn(oldActor) if err != nil { - return nil, fmt.Errorf("failed to create state") + return nil, fmt.Errorf("failed to create state: %v", err) } - headCid, _, err := g.store.Put(context.Background(), state) + headCid, err := g.cst.Put(context.Background(), state) if err != nil { return nil, fmt.Errorf("failed to store state") } - a := actor.Actor{ - Code: e.NewCid(codeCid), - CallSeqNum: 0, - Balance: balance, - Head: e.NewCid(headCid), + a := types.Actor{ + Code: oldActor.Code, + Nonce: 0, + Balance: oldActor.Balance, + Head: headCid, } if err := g.stateTree.SetActor(ctx, addr, &a); err != nil { return nil, fmt.Errorf("failed to create actor during genesis block creation") @@ -145,11 +193,11 @@ func (g *GenesisGenerator) createSingletonActor(ctx context.Context, addr addres } func (g *GenesisGenerator) setupBuiltInActors(ctx context.Context) error { - emptyMap, err := adt.MakeEmptyMap(g.vm.ContextStore()).Root() + emptyMap, err := adt.MakeEmptyMap(adt.WrapStore(ctx, g.cst)).Root() if err != nil { return err } - emptyArray, err := adt.MakeEmptyArray(g.vm.ContextStore()).Root() + emptyArray, err := adt.MakeEmptyArray(adt.WrapStore(ctx, g.cst)).Root() if err != nil { return err } @@ -169,7 +217,7 @@ func (g *GenesisGenerator) setupBuiltInActors(ctx context.Context) error { } _, err = g.createSingletonActor(ctx, builtin.InitActorAddr, builtin.InitActorCodeID, big.Zero(), func() (interface{}, error) { - emptyMap, err := adt.MakeEmptyMap(g.vm.ContextStore()).Root() + emptyMap, err := adt.MakeEmptyMap(adt.WrapStore(ctx, g.cst)).Root() if err != nil { return nil, err } @@ -180,25 +228,35 @@ func (g *GenesisGenerator) setupBuiltInActors(ctx context.Context) error { } _, err = g.createSingletonActor(ctx, builtin.RewardActorAddr, builtin.RewardActorCodeID, rewardActorInitialBalance, func() (interface{}, error) { - return reward.ConstructState(), nil + return reward.ConstructState(big.Zero()), nil }) if err != nil { return err } _, err = g.createSingletonActor(ctx, builtin.StoragePowerActorAddr, builtin.StoragePowerActorCodeID, big.Zero(), func() (interface{}, error) { - emptyMap, err := adt.MakeEmptyMap(g.vm.ContextStore()).Root() + emptyMap, err := adt.MakeEmptyMap(adt.WrapStore(ctx, g.cst)).Root() + if err != nil { + return nil, err + } + + multiMap, err := adt.AsMultimap(adt.WrapStore(ctx, g.cst), emptyMap) + if err != nil { + return nil, err + } + + emptyMultiMap, err := multiMap.Root() if err != nil { return nil, err } - return power.ConstructState(emptyMap), nil + return power.ConstructState(emptyMap, emptyMultiMap), nil }) if err != nil { return err } _, err = g.createSingletonActor(ctx, builtin.StorageMarketActorAddr, builtin.StorageMarketActorCodeID, big.Zero(), func() (interface{}, error) { - emptyMSet, err := market.MakeEmptySetMultimap(g.vm.ContextStore()).Root() + emptyMSet, err := market.MakeEmptySetMultimap(adt.WrapStore(ctx, g.cst)).Root() if err != nil { return nil, err } @@ -232,7 +290,7 @@ func (g *GenesisGenerator) setupBuiltInActors(ctx context.Context) error { return nil } -func (g *GenesisGenerator) setupPrealloc() error { +func (g *GenesisGenerator) setupPrealloc(ctx context.Context) error { if len(g.keys) < len(g.cfg.PreallocatedFunds) { return fmt.Errorf("keys do not match prealloc") } @@ -244,12 +302,19 @@ func (g *GenesisGenerator) setupPrealloc() error { return err } - value, ok := types.NewAttoFILFromFILString(v) - if !ok { + value, err := types.ParseFIL(v) + if err != nil { return fmt.Errorf("failed to parse FIL value '%s'", v) } - _, err = g.vm.ApplyGenesisMessage(builtin.RewardActorAddr, addr, builtin.MethodSend, value, nil, &g.chainRand) + msg := types.Message{ + From: builtin.RewardActorAddr, + To: addr, + Method: builtin.MethodSend, + Value: abi.TokenAmount{Int: value.Int}, + Params: nil, + } + _, err = g.vm.ApplyImplicitMessage(ctx, &msg) if err != nil { return err } @@ -258,35 +323,36 @@ func (g *GenesisGenerator) setupPrealloc() error { } func (g *GenesisGenerator) genBlock(ctx context.Context) (cid.Cid, error) { - stateRoot, err := g.flush(ctx) + stateRoot, err := g.vm.Flush(ctx) if err != nil { return cid.Undef, err } // define empty cid and ensure empty components exist in blockstore - emptyAMTCid, err := amt.FromArray(ctx, g.cst, nil) + emptyAMT := adt.MakeEmptyArray(adt.WrapStore(ctx, g.cst)) + emptyAMTCid, err := emptyAMT.Root() if err != nil { return cid.Undef, err } - meta := types.TxMeta{SecpRoot: e.NewCid(emptyAMTCid), BLSRoot: e.NewCid(emptyAMTCid)} + meta := &types.MessageRoot{SecpkRoot: emptyAMTCid, BlsRoot: emptyAMTCid} metaCid, err := g.cst.Put(ctx, meta) if err != nil { return cid.Undef, err } - geneblk := &block.Block{ - Miner: builtin.SystemActorAddr, - Ticket: genesis.Ticket, - BeaconEntries: []*drand.Entry{{Data: []byte{0xca, 0xfe, 0xfa, 0xce}}}, - PoStProofs: []block.PoStProof{}, - Parents: block.NewTipSetKey(), - ParentWeight: big.Zero(), - Height: 0, - StateRoot: e.NewCid(stateRoot), - MessageReceipts: e.NewCid(emptyAMTCid), - Messages: e.NewCid(metaCid), - Timestamp: g.cfg.Time, - ForkSignaling: 0, + geneblk := &types.BlockHeader{ + Miner: builtin.SystemActorAddr, + Ticket: &genesis.Ticket, + BeaconEntries: []types.BeaconEntry{{Data: []byte{0xca, 0xfe, 0xfa, 0xce}}}, + ElectionProof: new(types.ElectionProof), + Parents: types.NewTipSetKey().Cids(), + ParentWeight: big.Zero(), + Height: 0, + ParentStateRoot: stateRoot, + ParentMessageReceipts: emptyAMTCid, + Messages: metaCid, + Timestamp: g.cfg.Time, + ForkSignaling: 0, } return g.cst.Put(ctx, geneblk) @@ -322,18 +388,12 @@ func (g *GenesisGenerator) setupMiners(ctx context.Context) ([]*RenderedMinerInf var sectorsToCommit []*sectorCommitInfo networkQAPower := big.Zero() - // Estimate the first epoch's block reward as a linear release over 6 years. - // The actual release will be initially faster, with exponential decay. - // Replace this code with calls to the reward actor when it's fixed. - // See https://github.com/filecoin-project/specs-actors/issues/317 - sixYearEpochs := 6 * 365 * 86400 / builtin.EpochDurationSeconds - initialBlockReward := big.Div(rewardActorInitialBalance, big.NewInt(int64(sixYearEpochs))) - // First iterate all miners and sectors to compute sector info, and accumulate the total network power that // will be present (which determines the necessary pledge amounts). // One reason that this state can't be computed purely by applying messages is that we wish to compute the // initial pledge for the sectors based on the total genesis power, regardless of the order in which // sectors are inserted here. + totalRawPow, totalQaPow := big.NewInt(0), big.NewInt(0) for _, m := range g.cfg.Miners { // Create miner actor ownerAddr, actorAddr, err := g.createMiner(ctx, m) @@ -341,16 +401,11 @@ func (g *GenesisGenerator) setupMiners(ctx context.Context) ([]*RenderedMinerInf return nil, err } - mState, err := g.loadMinerState(ctx, actorAddr) - if err != nil { - return nil, err - } - // Add configured deals to the market actor with miner as provider and worker as client dealIDs := []abi.DealID{} if len(m.CommittedSectors) > 0 { ownerKey := g.keys[m.Owner] - dealIDs, err = g.publishDeals(actorAddr, ownerAddr, ownerKey, m.CommittedSectors) + dealIDs, err = g.publishDeals(ctx, actorAddr, ownerAddr, ownerKey, m.CommittedSectors, m.MarketBalance) if err != nil { return nil, err } @@ -360,13 +415,12 @@ func (g *GenesisGenerator) setupMiners(ctx context.Context) ([]*RenderedMinerInf minerRawPower := big.Zero() for i, comm := range m.CommittedSectors { // Adjust sector expiration up to the epoch before the subsequent proving period starts. - periodOffset := mState.ProvingPeriodStart % miner.WPoStProvingPeriod - expiryOffset := abi.ChainEpoch(comm.DealCfg.EndEpoch+1) % miner.WPoStProvingPeriod - sectorExpiration := abi.ChainEpoch(comm.DealCfg.EndEpoch) + miner.WPoStProvingPeriod + (periodOffset - expiryOffset) - + // todo pick a better sector exp + maxPeriods := miner0.MaxSectorExpirationExtension / miner0.WPoStProvingPeriod + sectorExpiration := (maxPeriods-1)*miner0.WPoStProvingPeriod - 1 // Acquire deal weight value // call deal verify market actor to do calculation - dealWeight, verifiedWeight, err := g.getDealWeight(dealIDs[i], sectorExpiration, actorAddr) + dealWeight, verifiedWeight, err := g.getDealWeight(ctx, dealIDs[i], sectorExpiration, actorAddr) if err != nil { return nil, err } @@ -400,45 +454,117 @@ func (g *GenesisGenerator) setupMiners(ctx context.Context) ([]*RenderedMinerInf QAPower: minerQAPower, } minfos = append(minfos, minfo) + totalRawPow = big.Add(totalRawPow, minerRawPower) + totalQaPow = big.Add(totalQaPow, minerQAPower) + } + + _, err := g.updateSingletonActor(ctx, builtin.StoragePowerActorAddr, func(actor *types.Actor) (interface{}, error) { + var mState power.State + err := g.cst.Get(ctx, actor.Head, &mState) + if err != nil { + return nil, err + } + mState.TotalQualityAdjPower = totalQaPow + mState.TotalRawBytePower = totalRawPow + + mState.ThisEpochQualityAdjPower = totalQaPow + mState.ThisEpochRawBytePower = totalRawPow + return &mState, nil + }) + if err != nil { + return nil, err + } + + _, err = g.updateSingletonActor(ctx, builtin.RewardActorAddr, func(actor *types.Actor) (interface{}, error) { + return reward.ConstructState(networkQAPower), nil + }) + if err != nil { + return nil, err } // Now commit the sectors and power updates. for _, sector := range sectorsToCommit { - // Update power, setting state directly - sectorPledge, err := g.updatePower(ctx, sector.miner, sector.rawPower, sector.qaPower, networkQAPower, initialBlockReward) + params := &miner.SectorPreCommitInfo{ + SealProof: sector.comm.ProofType, + SectorNumber: sector.comm.SectorNum, + SealedCID: sector.comm.CommR, + SealRandEpoch: -1, + DealIDs: sector.dealIDs, + Expiration: sector.expiration, // TODO: Allow setting externally! + } + + dweight, err := g.dealWeight(ctx, sector.miner, params.DealIDs, 0, sector.expiration) if err != nil { - return nil, err + return nil, xerrors.Errorf("getting deal weight: %v", err) } - // Put sector info in miner sector set. - err = g.putSector(ctx, sector, sectorPledge) + size, err := sector.comm.ProofType.SectorSize() if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to get sector size: %v", err) } + sectorWeight := miner.QAPowerForWeight(size, sector.expiration, dweight.DealWeight, dweight.VerifiedDealWeight) + + // we've added fake power for this sector above, remove it now + _, err = g.updateSingletonActor(ctx, builtin.StoragePowerActorAddr, func(actor *types.Actor) (interface{}, error) { + var mState power.State + err = g.cst.Get(ctx, actor.Head, &mState) + if err != nil { + return nil, err + } + + mState.TotalQualityAdjPower = big.Sub(mState.TotalQualityAdjPower, sectorWeight) //nolint:scopelint + size, _ := sector.comm.ProofType.SectorSize() + if err != nil { + return nil, err + } + mState.TotalRawBytePower = big.Sub(mState.TotalRawBytePower, big.NewIntUnsigned(uint64(size))) + return &mState, nil + }) - // Transfer the pledge amount from the owner to the miner actor - _, err = g.vm.ApplyGenesisMessage(sector.owner, sector.miner, builtin.MethodSend, sectorPledge, nil, &g.chainRand) if err != nil { - return nil, err + return nil, xerrors.Errorf("removing fake power: %v", err) } - } - return minfos, nil -} -func (g *GenesisGenerator) loadMinerState(ctx context.Context, actorAddr address.Address) (*miner.State, error) { - mAct, found, err := g.stateTree.GetActor(ctx, actorAddr) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("no such miner actor %s", actorAddr) - } - var mState miner.State - _, err = g.store.Get(ctx, mAct.Head.Cid, &mState) - if err != nil { - return nil, err + epochReward, err := g.currentEpochBlockReward(ctx, sector.miner) + if err != nil { + return nil, xerrors.Errorf("getting current epoch reward: %v", err) + } + + tpow, err := g.currentTotalPower(ctx, sector.miner) + if err != nil { + return nil, xerrors.Errorf("getting current total power: %v", err) + } + + pcd := miner.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight) + pledge := miner.InitialPledgeForPower( + sectorWeight, + epochReward.ThisEpochBaselinePower, + epochReward.ThisEpochRewardSmoothed, + tpow.QualityAdjPowerSmoothed, + g.circSupply(ctx, sector.miner), + ) + + pledge = big.Add(pcd, pledge) + + buf := new(bytes.Buffer) + _ = params.MarshalCBOR(buf) + _, err = g.doExecValue(ctx, sector.miner, sector.owner, pledge, builtin.MethodsMiner.PreCommitSector, buf.Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to confirm presealed sectors: %v", err) + } + + // Commit one-by-one, otherwise pledge math tends to explode + confirmParams := &builtin.ConfirmSectorProofsParams{ + Sectors: []abi.SectorNumber{sector.comm.SectorNum}, + } + buf = new(bytes.Buffer) + _ = confirmParams.MarshalCBOR(buf) + _, err = g.doExecValue(ctx, sector.miner, builtin.StoragePowerActorAddr, big.Zero(), builtin.MethodsMiner.ConfirmSectorProofsValid, buf.Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to confirm presealed sectors: %v", err) + } } - return &mState, nil + return minfos, nil } func (g *GenesisGenerator) createMiner(ctx context.Context, m *CreateStorageMinerConfig) (address.Address, address.Address, error) { @@ -448,7 +574,7 @@ func (g *GenesisGenerator) createMiner(ctx context.Context, m *CreateStorageMine } // Resolve worker account's ID address. - stateRoot, err := g.flush(ctx) + stateRoot, err := g.vm.Flush(ctx) if err != nil { return address.Undef, address.Undef, err } @@ -474,30 +600,67 @@ func (g *GenesisGenerator) createMiner(ctx context.Context, m *CreateStorageMine pid = peer.ID(h) } - out, err := g.vm.ApplyGenesisMessage(ownerAddr, builtin.StoragePowerActorAddr, builtin.MethodsPower.CreateMiner, big.Zero(), &power.CreateMinerParams{ + params := mustEnc(&power.CreateMinerParams{ Owner: ownerAddr, Worker: ownerAddr, - Peer: pid, + Peer: abi.PeerID(pid), SealProofType: m.SealProofType, - }, &g.chainRand) + }) + msg := &types.Message{ + From: ownerAddr, + To: builtin.StoragePowerActorAddr, + Method: builtin.MethodsPower.CreateMiner, + Value: big.Zero(), + Params: params, + } + + out, err := g.vm.ApplyImplicitMessage(ctx, msg) if err != nil { return address.Undef, address.Undef, err } - + if out.Receipt.ExitCode != 0 { + return address.Undef, address.Undef, fmt.Errorf("execute genesis msg error") + } + if _, err := g.vm.Flush(ctx); err != nil { + return address.Undef, address.Undef, err + } // get miner ID address - ret := out.(*power.CreateMinerReturn) - return ownerAddr, ret.IDAddress, nil + createMinerReturn := power.CreateMinerReturn{} + err = createMinerReturn.UnmarshalCBOR(bytes.NewReader(out.Receipt.Return)) + if err != nil { + return address.Undef, address.Undef, err + } + return ownerAddr, createMinerReturn.IDAddress, nil } -func (g *GenesisGenerator) publishDeals(actorAddr, clientAddr address.Address, clientkey *crypto.KeyInfo, comms []*CommitConfig) ([]abi.DealID, error) { +func (g *GenesisGenerator) publishDeals(ctx context.Context, actorAddr, clientAddr address.Address, clientkey *crypto.KeyInfo, comms []*CommitConfig, marketBalance abi.TokenAmount) ([]abi.DealID, error) { // Add 0 balance to escrow and locked table - _, err := g.vm.ApplyGenesisMessage(clientAddr, builtin.StorageMarketActorAddr, builtin.MethodsMarket.AddBalance, big.Zero(), &clientAddr, &g.chainRand) - if err != nil { - return nil, err - } - _, err = g.vm.ApplyGenesisMessage(clientAddr, builtin.StorageMarketActorAddr, builtin.MethodsMarket.AddBalance, big.Zero(), &actorAddr, &g.chainRand) - if err != nil { - return nil, err + if marketBalance.GreaterThan(big.Zero()) { + params := mustEnc(&clientAddr) + msg := &types.Message{ + From: clientAddr, + To: builtin.StorageMarketActorAddr, + Method: builtin.MethodsMarket.AddBalance, + Value: marketBalance, + Params: params, + } + _, err := g.vm.ApplyImplicitMessage(ctx, msg) + if err != nil { + return nil, err + } + + params = mustEnc(&actorAddr) + msg = &types.Message{ + From: clientAddr, + To: builtin.StorageMarketActorAddr, + Method: builtin.MethodsMarket.AddBalance, + Value: marketBalance, + Params: params, + } + _, err = g.vm.ApplyImplicitMessage(ctx, msg) + if err != nil { + return nil, err + } } // Add all deals to chain in one message @@ -515,142 +678,180 @@ func (g *GenesisGenerator) publishDeals(actorAddr, clientAddr address.Address, c ProviderCollateral: big.Zero(), // collateral should actually be good ClientCollateral: big.Zero(), } - proposalBytes, err := encoding.Encode(&proposal) + buf := new(bytes.Buffer) + err := proposal.MarshalCBOR(buf) if err != nil { return nil, err } - sig, err := crypto.Sign(proposalBytes, clientkey.PrivateKey, crypto.SigTypeBLS) + var sig *crypto.Signature + err = clientkey.UsePrivateKey(func(privateKey []byte) error { + sig, err = crypto.Sign(buf.Bytes(), privateKey, crypto.SigTypeBLS) + return err + }) if err != nil { return nil, err } params.Deals = append(params.Deals, market.ClientDealProposal{ Proposal: proposal, - ClientSignature: sig, + ClientSignature: *sig, }) } + paramsBytes := mustEnc(params) + msg := &types.Message{ + From: clientAddr, + To: builtin.StorageMarketActorAddr, + Method: builtin.MethodsMarket.PublishStorageDeals, + Value: big.Zero(), + Params: paramsBytes, + } + // apply deal builtin.MethodsMarket.PublishStorageDeals - out, err := g.vm.ApplyGenesisMessage(clientAddr, builtin.StorageMarketActorAddr, builtin.MethodsMarket.PublishStorageDeals, big.Zero(), params, &g.chainRand) + out, err := g.vm.ApplyImplicitMessage(ctx, msg) if err != nil { return nil, err } - - ret := out.(*market.PublishStorageDealsReturn) - return ret.IDs, nil + if out.Receipt.ExitCode != 0 { + return nil, xerrors.Errorf("execute genesis msg error") + } + publishStoreageDealsReturn := market.PublishStorageDealsReturn{} + err = publishStoreageDealsReturn.UnmarshalCBOR(bytes.NewReader(out.Receipt.Return)) + if err != nil { + return nil, err + } + return publishStoreageDealsReturn.IDs, nil } -func (g *GenesisGenerator) getDealWeight(dealID abi.DealID, sectorExpiry abi.ChainEpoch, minerIDAddr address.Address) (dealWeight, verifiedWeight abi.DealWeight, err error) { - weightParams := &market.VerifyDealsOnSectorProveCommitParams{ +func (g *GenesisGenerator) getDealWeight(ctx context.Context, dealID abi.DealID, sectorExpiry abi.ChainEpoch, minerIDAddr address.Address) (dealWeight, verifiedWeight abi.DealWeight, err error) { + weightParams := &market.VerifyDealsForActivationParams{ DealIDs: []abi.DealID{dealID}, SectorExpiry: sectorExpiry, } - weightOut, err := g.vm.ApplyGenesisMessage(minerIDAddr, builtin.StorageMarketActorAddr, builtin.MethodsMarket.VerifyDealsOnSectorProveCommit, big.Zero(), weightParams, &g.chainRand) - if err != nil { - return big.Zero(), big.Zero(), err + params := mustEnc(weightParams) + msg := &types.Message{ + From: minerIDAddr, + To: builtin.StorageMarketActorAddr, + Method: builtin.MethodsMarket.VerifyDealsForActivation, + Value: big.Zero(), + Params: params, } - ret := weightOut.(*market.VerifyDealsOnSectorProveCommitReturn) - return ret.DealWeight, ret.VerifiedDealWeight, nil -} -func (g *GenesisGenerator) updatePower(ctx context.Context, miner address.Address, rawPower, qaPower, networkPower abi.StoragePower, epochBlockReward big.Int) (abi.TokenAmount, error) { - // NOTE: it would be much better to use OnSectorProveCommit, which would then calculate the initial pledge amount. - powAct, found, err := g.stateTree.GetActor(ctx, builtin.StoragePowerActorAddr) + weightOut, err := g.vm.ApplyImplicitMessage(ctx, msg) if err != nil { - return big.Zero(), err + return big.Zero(), big.Zero(), err } - if !found { - return big.Zero(), fmt.Errorf("state tree could not find power actor") + if weightOut.Receipt.ExitCode != 0 { + return big.Zero(), big.Zero(), xerrors.Errorf("execute genesis msg error") } - var powerState power.State - _, err = g.store.Get(ctx, powAct.Head.Cid, &powerState) + verifyDealsReturn := market.VerifyDealsForActivationReturn{} + err = verifyDealsReturn.UnmarshalCBOR(bytes.NewReader(weightOut.Receipt.Return)) if err != nil { - return big.Zero(), err + return big.Zero(), big.Zero(), err + } + return verifyDealsReturn.DealWeight, verifyDealsReturn.VerifiedDealWeight, nil +} + +func (g *GenesisGenerator) doExecValue(ctx context.Context, to, from address.Address, value big.Int, method abi.MethodNum, params []byte) ([]byte, error) { + _, found, err := g.stateTree.GetActor(ctx, from) + if !found || err != nil { + return nil, xerrors.Errorf("doExec failed to get from actor (%s): %v", from, err) + } + + msg := &types.Message{ + From: from, + To: to, + Method: method, + Value: value, + Params: params, } - err = powerState.AddToClaim(&cstore{ctx, g.cst}, miner, rawPower, qaPower) + ret, err := g.vm.ApplyImplicitMessage(ctx, msg) if err != nil { - return big.Zero(), err + return nil, xerrors.Errorf("doExec apply message failed: %v", err) } - // Adjusting the total power here is technically wrong and unnecessary (it happens in AddToClaim), - // but needed due to gain non-zero power in small networks when no miner meets the consensus minimum. - // At present, both impls ignore the consensus minimum and rely on this incorrect value. - // See https://github.com/filecoin-project/specs-actors/issues/266 - // https://github.com/filecoin-project/go-filecoin/issues/3958 - powerState.TotalRawBytePower = big.Add(powerState.TotalRawBytePower, rawPower) - powerState.TotalQualityAdjPower = big.Add(powerState.TotalQualityAdjPower, qaPower) + if ret.Receipt.ExitCode != 0 { + return nil, xerrors.Errorf("execute genesis msg error") + } + if _, err := g.vm.Flush(ctx); err != nil { + return nil, err + } + return ret.Receipt.Return, nil +} - // Persist new state. - newPowCid, _, err := g.store.Put(ctx, &powerState) +func (g *GenesisGenerator) currentTotalPower(ctx context.Context, maddr address.Address) (*power.CurrentTotalPowerReturn, error) { + pwret, err := g.doExecValue(ctx, builtin.StoragePowerActorAddr, maddr, big.Zero(), builtin.MethodsPower.CurrentTotalPower, nil) if err != nil { - return big.Zero(), err + return nil, err } - powAct.Head = e.NewCid(newPowCid) - err = g.stateTree.SetActor(ctx, builtin.StoragePowerActorAddr, powAct) + currentTotalReturn := &power.CurrentTotalPowerReturn{} + err = currentTotalReturn.UnmarshalCBOR(bytes.NewReader(pwret)) if err != nil { - return big.Zero(), err + return nil, err } - - initialPledge := big.Div(big.Mul(qaPower, epochBlockReward), networkPower) - return initialPledge, nil + return currentTotalReturn, nil } -func (g *GenesisGenerator) putSector(ctx context.Context, sector *sectorCommitInfo, pledge abi.TokenAmount) error { - mAct, found, err := g.stateTree.GetActor(ctx, sector.miner) - if err != nil { - return err +func (g *GenesisGenerator) dealWeight(ctx context.Context, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) (market.VerifyDealsForActivationReturn, error) { + params := &market.VerifyDealsForActivationParams{ + DealIDs: dealIDs, + SectorStart: sectorStart, + SectorExpiry: sectorExpiry, } - if !found { - return fmt.Errorf("mState tree could not find miner actor %s", sector.miner) + buf := new(bytes.Buffer) + err := params.MarshalCBOR(buf) + if err != nil { + return market.VerifyDealsForActivationReturn{}, err } - var mState miner.State - _, err = g.store.Get(ctx, mAct.Head.Cid, &mState) + ret, err := g.doExecValue(ctx, + builtin.StorageMarketActorAddr, + maddr, + abi.NewTokenAmount(0), + builtin.MethodsMarket.VerifyDealsForActivation, + buf.Bytes(), + ) if err != nil { - return err + return market.VerifyDealsForActivationReturn{}, err } - newSectorInfo := &miner.SectorOnChainInfo{ - Info: miner.SectorPreCommitInfo{ - RegisteredProof: sector.comm.ProofType, - SectorNumber: sector.comm.SectorNum, - SealedCID: sector.comm.CommR, - SealRandEpoch: 0, - DealIDs: sector.dealIDs, - Expiration: sector.expiration, - }, - ActivationEpoch: 0, - DealWeight: sector.dealWeight, - VerifiedDealWeight: sector.verifiedWeight, - } - err = mState.PutSector(&cstore{ctx, g.cst}, newSectorInfo) + vdaReturn := market.VerifyDealsForActivationReturn{} + err = vdaReturn.UnmarshalCBOR(bytes.NewReader(ret)) if err != nil { - return err + return market.VerifyDealsForActivationReturn{}, err } + return vdaReturn, nil +} - err = mState.AddNewSectors(sector.comm.SectorNum) +func (g *GenesisGenerator) currentEpochBlockReward(ctx context.Context, maddr address.Address) (*reward.ThisEpochRewardReturn, error) { + rwret, err := g.doExecValue(ctx, builtin.RewardActorAddr, maddr, big.Zero(), builtin.MethodsReward.ThisEpochReward, nil) if err != nil { - return err + return nil, err } - // Persist new state. - newMinerCid, _, err := g.store.Put(ctx, &mState) + epochRewardReturn := &reward.ThisEpochRewardReturn{} + err = epochRewardReturn.UnmarshalCBOR(bytes.NewReader(rwret)) if err != nil { - return err + return nil, err } - mAct.Head = e.NewCid(newMinerCid) - err = g.stateTree.SetActor(ctx, sector.miner, mAct) - return err + return epochRewardReturn, nil +} + +func (g *GenesisGenerator) circSupply(ctx context.Context, maddr address.Address) abi.TokenAmount { + supply, _ := g.vmOption.CircSupplyCalculator(ctx, 0, g.stateTree) + return supply } func computeSectorPower(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedDealWeight abi.DealWeight) (abi.StoragePower, abi.StoragePower) { - weight := &power.SectorStorageWeightDesc{ - SectorSize: size, - Duration: duration, - DealWeight: dealWeight, - VerifiedDealWeight: verifiedDealWeight, - } spower := big.NewIntUnsigned(uint64(size)) - qapower := power.QAPowerForWeight(weight) + qapower := miner.QAPowerForWeight(size, duration, dealWeight, verifiedDealWeight) return spower, qapower } + +func mustEnc(i cbg.CBORMarshaler) []byte { + enc, err := actors.SerializeParams(i) + if err != nil { + panic(err) // ok + } + return enc +} diff --git a/tools/gengen/util/gengen.go b/tools/gengen/util/gengen.go index 15834e3b5e..442de845aa 100644 --- a/tools/gengen/util/gengen.go +++ b/tools/gengen/util/gengen.go @@ -6,7 +6,7 @@ import ( "io" address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/go-state-types/abi" bserv "github.com/ipfs/go-blockservice" cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -17,10 +17,10 @@ import ( dag "github.com/ipfs/go-merkledag" car "github.com/ipld/go-car" - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" - "github.com/filecoin-project/go-filecoin/internal/pkg/genesis" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/genesis" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/types" ) // CreateStorageMinerConfig holds configuration options used to create a storage @@ -38,13 +38,15 @@ type CreateStorageMinerConfig struct { // CommittedSectors is the list of sector commitments in this miner's proving set CommittedSectors []*CommitConfig - // SealProofType is the proof configuration used by this miner + // RegisteredSealProof is the proof configuration used by this miner // (which implies sector size and window post partition size) - SealProofType abi.RegisteredProof + SealProofType abi.RegisteredSealProof // ProvingPeriodStart is next chain epoch at which a miner will need to submit a windowed post // If unset, it will be set to the proving period. ProvingPeriodStart *abi.ChainEpoch + + MarketBalance abi.TokenAmount } // CommitConfig carries all information needed to get a sector commitment in the @@ -54,7 +56,7 @@ type CommitConfig struct { CommD cid.Cid SectorNum abi.SectorNumber DealCfg *DealConfig - ProofType abi.RegisteredProof + ProofType abi.RegisteredSealProof } // DealConfig carries the information needed to specify a self-deal committing @@ -190,7 +192,7 @@ var defaultGenTimeOpt = GenTime(123456789) func MakeGenesisFunc(opts ...GenOption) genesis.InitFunc { // Dragons: GenesisInitFunc should take in only a blockstore to remove the hidden // assumption that cst and bs are backed by the same storage. - return func(cst cbor.IpldStore, bs blockstore.Blockstore) (*block.Block, error) { + return func(cst cbor.IpldStore, bs blockstoreutil.Blockstore) (*types.BlockHeader, error) { ctx := context.Background() genCfg := &GenesisCfg{} err := defaultGenTimeOpt(genCfg) @@ -207,7 +209,7 @@ func MakeGenesisFunc(opts ...GenOption) genesis.InitFunc { return nil, err } - var b block.Block + var b types.BlockHeader err = cst.Get(ctx, ri.GenesisCid, &b) if err != nil { return nil, err @@ -221,7 +223,7 @@ func MakeGenesisFunc(opts ...GenOption) genesis.InitFunc { // the final genesis block. // // WARNING: Do not use maps in this code, they will make this code non deterministic. -func GenGen(ctx context.Context, cfg *GenesisCfg, bs blockstore.Blockstore) (*RenderedGenInfo, error) { +func GenGen(ctx context.Context, cfg *GenesisCfg, bs blockstoreutil.Blockstore) (*RenderedGenInfo, error) { generator := NewGenesisGenerator(bs) err := generator.Init(cfg) if err != nil { @@ -232,7 +234,7 @@ func GenGen(ctx context.Context, cfg *GenesisCfg, bs blockstore.Blockstore) (*Re if err != nil { return nil, err } - err = generator.setupPrealloc() + err = generator.setupPrealloc(ctx) if err != nil { return nil, err } @@ -256,10 +258,8 @@ func GenGen(ctx context.Context, cfg *GenesisCfg, bs blockstore.Blockstore) (*Re func GenGenesisCar(cfg *GenesisCfg, out io.Writer) (*RenderedGenInfo, error) { ctx := context.Background() - bstore := blockstore.NewBlockstore(ds.NewMapDatastore()) - bstore = blockstore.NewIdStore(bstore) + bstore := blockstoreutil.WrapIDStore(blockstore.NewBlockstore(ds.NewMapDatastore())) dserv := dag.NewDAGService(bserv.New(bstore, offline.Exchange(bstore))) - info, err := GenGen(ctx, cfg, bstore) if err != nil { return nil, err @@ -296,8 +296,8 @@ type signer struct{} var _ types.Signer = (*signer)(nil) -func (ggs *signer) SignBytes(_ context.Context, data []byte, addr address.Address) (crypto.Signature, error) { - return crypto.Signature{}, nil +func (ggs *signer) SignBytes(_ context.Context, data []byte, addr address.Address) (*crypto.Signature, error) { + return nil, nil } func (ggs *signer) HasAddress(_ context.Context, addr address.Address) (bool, error) { diff --git a/tools/gengen/util/gengen_test.go b/tools/gengen/util/gengen_test.go index ac1fa6ca57..66448a75f1 100644 --- a/tools/gengen/util/gengen_test.go +++ b/tools/gengen/util/gengen_test.go @@ -2,41 +2,48 @@ package gengen_test import ( "context" - "io/ioutil" + "os" "testing" - "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/go-state-types/abi" ds "github.com/ipfs/go-datastore" blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - th "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - . "github.com/filecoin-project/go-filecoin/tools/gengen/util" + "github.com/filecoin-project/venus/pkg/constants" + th "github.com/filecoin-project/venus/pkg/testhelpers" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + genutil "github.com/filecoin-project/venus/tools/gengen/util" + _ "github.com/filecoin-project/venus/pkg/crypto/bls" + _ "github.com/filecoin-project/venus/pkg/crypto/secp" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func testConfig(t *testing.T) *GenesisCfg { - fiftyCommCfgs, err := MakeCommitCfgs(50) +func testConfig(t *testing.T) *genutil.GenesisCfg { + fiftyCommCfgs, err := genutil.MakeCommitCfgs(50) require.NoError(t, err) - tenCommCfgs, err := MakeCommitCfgs(10) + tenCommCfgs, err := genutil.MakeCommitCfgs(10) require.NoError(t, err) - return &GenesisCfg{ + return &genutil.GenesisCfg{ KeysToGen: 4, PreallocatedFunds: []string{"1000000", "500000"}, - Miners: []*CreateStorageMinerConfig{ + Miners: []*genutil.CreateStorageMinerConfig{ { Owner: 0, CommittedSectors: fiftyCommCfgs, SealProofType: constants.DevSealProofType, + MarketBalance: abi.NewTokenAmount(0), }, { Owner: 1, CommittedSectors: tenCommCfgs, SealProofType: constants.DevSealProofType, + MarketBalance: abi.NewTokenAmount(0), }, }, Network: "gfctest", @@ -45,43 +52,51 @@ func testConfig(t *testing.T) *GenesisCfg { } } -const defaultSeed = 4 -const defaultTime = 123456789 +const ( + defaultSeed = 4 + defaultTime = 123456789 +) func TestGenGenLoading(t *testing.T) { tf.IntegrationTest(t) - fi, err := ioutil.TempFile("", "gengentest") + fi, err := os.CreateTemp("", "gengentest") assert.NoError(t, err) - _, err = GenGenesisCar(testConfig(t), fi) + _, err = genutil.GenGenesisCar(testConfig(t), fi) assert.NoError(t, err) assert.NoError(t, fi.Close()) td := th.NewDaemon(t, th.GenesisFile(fi.Name())).Start() defer td.ShutdownSuccess() - o := td.Run("actor", "ls").AssertSuccess() + o := td.Run("state", "list-actor").AssertSuccess() stdout := o.ReadStdout() - assert.Contains(t, stdout, builtin.StoragePowerActorCodeID.String()) - assert.Contains(t, stdout, builtin.StorageMarketActorCodeID.String()) - assert.Contains(t, stdout, builtin.InitActorCodeID.String()) + //address won't change + assert.Contains(t, stdout, builtin.StoragePowerActorAddr.String()) + assert.Contains(t, stdout, builtin.StorageMarketActorAddr.String()) + assert.Contains(t, stdout, builtin.InitActorAddr.String()) } func TestGenGenDeterministic(t *testing.T) { tf.IntegrationTest(t) ctx := context.Background() - var info *RenderedGenInfo + var info *genutil.RenderedGenInfo for i := 0; i < 5; i++ { bstore := blockstore.NewBlockstore(ds.NewMapDatastore()) - inf, err := GenGen(ctx, testConfig(t), bstore) + inf, err := genutil.GenGen(ctx, testConfig(t), blockstoreutil.Adapt(bstore)) assert.NoError(t, err) if info == nil { info = inf } else { - assert.Equal(t, info, inf) + assert.Equal(t, info.GenesisCid, inf.GenesisCid) + assert.Equal(t, info.Miners, inf.Miners) + assert.Equal(t, len(info.Keys), len(inf.Keys)) + for i, key := range inf.Keys { + assert.Equal(t, info.Keys[i].Key(), key.Key()) + } } } } diff --git a/tools/gengen/util/testing.go b/tools/gengen/util/testing.go index 21ba37484d..7dda5ff221 100644 --- a/tools/gengen/util/testing.go +++ b/tools/gengen/util/testing.go @@ -3,37 +3,26 @@ package gengen import ( "fmt" - "github.com/filecoin-project/specs-actors/actors/abi" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbornode "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/constants" - "github.com/filecoin-project/go-filecoin/internal/pkg/version" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/v6/actors/builtin/market" + "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" + tutil "github.com/filecoin-project/specs-actors/v6/support/testing" + "github.com/filecoin-project/venus/pkg/constants" ) // MakeCommitCfgs creates n gengen commit configs, casting strings to cids. func MakeCommitCfgs(n int) ([]*CommitConfig, error) { cfgs := make([]*CommitConfig, n) for i := 0; i < n; i++ { - commP, err := constants.DefaultCidBuilder.Sum([]byte(fmt.Sprintf("commP: %d", i))) - if err != nil { - return nil, err - } - commR, err := constants.DefaultCidBuilder.Sum([]byte(fmt.Sprintf("commR: %d", i))) - if err != nil { - return nil, err - } - commD, err := constants.DefaultCidBuilder.Sum([]byte(fmt.Sprintf("commD: %d", i))) - if err != nil { - return nil, err - } + commP := tutil.MakeCID(fmt.Sprintf("commP: %d", i), &market.PieceCIDPrefix) + commR := tutil.MakeCID(fmt.Sprintf("commR: %d", i), &miner.SealedCIDPrefix) + commD := tutil.MakeCID(fmt.Sprintf("commD: %d", i), &market.PieceCIDPrefix) dealCfg := &DealConfig{ CommP: commP, - PieceSize: uint64(1), + PieceSize: uint64(2048), Verified: false, - EndEpoch: int64(1024), + EndEpoch: int64(538000), } cfgs[i] = &CommitConfig{ @@ -46,8 +35,3 @@ func MakeCommitCfgs(n int) ([]*CommitConfig, error) { } return cfgs, nil } - -// DefaultGenesis creates a test network genesis block with default accounts and actors installed. -func DefaultGenesis(cst cbornode.IpldStore, bs blockstore.Blockstore) (*block.Block, error) { - return MakeGenesisFunc(NetworkName(version.TEST))(cst, bs) -} diff --git a/tools/iptb-plugins/Makefile b/tools/iptb-plugins/Makefile deleted file mode 100644 index 00d3ea6dd5..0000000000 --- a/tools/iptb-plugins/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -IPTB_ROOT ?= $(HOME)/testbed - -all: filecoin install - -install: - mkdir -p $(IPTB_ROOT)/plugins - cp filecoin/*.so $(IPTB_ROOT)/plugins - -filecoin: - make -C filecoin all - -clean: - rm *.so - -.PHONY: all filecoin clean diff --git a/tools/iptb-plugins/README.md b/tools/iptb-plugins/README.md deleted file mode 100644 index 45f7114f31..0000000000 --- a/tools/iptb-plugins/README.md +++ /dev/null @@ -1,211 +0,0 @@ -# How to use IPTB with go-filecoin - -These scripts allow one to: - -- Create the IPTB testbed -- Initialize the testbed nodes with a genesis file -- Start the testbed nodes -- Configure the testbed nodes wallet addresses and miner address -- Connect the testbed nodes together - -## Setup -First, ensure you have the latest version of IPTB installed: -```shell -$> go get -u github.com/ipfs/iptb -``` - -Next, ensure you have go-filecoin **installed**, IPTB requires that the go-filecoin bin be in your path: -```shell -$> cd $GOPATH/src/github.com/filecoin-project/go-filecoin -$> go run build/main.go deps -$> go run build/main.go install -``` - -Now, build the `localfilecoin` iptb plugin: -```shell -$> make iptb -``` -And verify the plugin was created: -```shell -$> ls $HOME/testbed/plugins/ -localfilecoin.so -``` - -*NOTE:* If you want to create Docker nodes, be sure to build the docker image first: -```shell -$> docker build . -``` - -## Initialization - -### Simple -Create 10 local Filecoin nodes: -```shell -sh tools/iptb-plugins/filecoin/local/scripts/prepMining.sh 10 -``` - -Create 10 Docker Filecoin nodes: -```shell -sh tools/iptb-plugins/filecoin/docker/scripts/prepMining.sh 10 -``` - -### Advanced - -Create a 10 node `testbed`: -```shell -iptb testbed create --count 10 --type localfilecoin -``` -Verify the testbed was created: -```shell -$> ls $HOME/testbed/testbeds/ -default/ -$> ls $HOME/testbed/testbeds/default/ -0/ 1/ 2/ 3/ 4/ 5/ 6/ 7/ 8/ 9/ nodespec.json -$> cat $HOME/testbed/testbeds/default/nodespec.json -[ - { - "Type": "localfilecoin", - "Dir": "/home/frrist/testbed/testbeds/default/0", - "Attrs": {} - }, -... - { - "Type": "localfilecoin", - "Dir": "/home/frrist/testbed/testbeds/default/9", - "Attrs": {} - } -] -``` -NOTE: multiple testbeds can exist under the `$HOME/testbed/testbeds` directory, they may be created & interacted with by using the `--testbed` flag. - -Initialize the nodes in testbed `default`: -```shell -$> iptb init -node[0] exit 0 - -initializing filecoin node at /home/frrist/testbed/testbeds/default/0 - -... - -node[9] exit 0 - -initializing filecoin node at /home/frrist/testbed/testbeds/default/9 -``` -Verify the nodes initialized their repositories correctly: -```shell -$> ls $HOME/testbed/testbeds/default/0/ -badger/ config.toml keystore/ snapshots/ version wallet/ -``` -NOTE: arguments can be passed to nodes with any command by adding them after the `--` argument, e.g.: -```shell -$> iptb init -- --genesisfile=/some/path/to/it --devnet-nightly -``` - -Start the testbed nodes: -```shell -$> iptb start -INFO-[/home/frrist/testbed/testbeds/default/9] Started daemon: /home/frrist/testbed/testbeds/default/9, pid: 6843 -open /home/frrist/testbed/testbeds/default/9/api: no such file or directory -... -``` - -Connect all nodes together (ignore the errors about self dials, that is expected): -```shell -$> iptb connect -``` - -Verify the connections were made: -```shell -$> iptb run -- go-filecoin swarm peers -node[0] exit 0 - -/ip4/127.0.0.1/tcp/33427/ipfs/QmVihFTmJDpWc8iAQXcbp4mavc6dWDuHqktm9EfFyTvBiC -/ip4/127.0.0.1/tcp/36005/ipfs/QmSLPRjGzqoYVJcmKSgsAJWgtSGCemYKKndrpTjRtpXr4d -/ip4/127.0.0.1/tcp/36893/ipfs/QmNrCUhm9Hgp9sQz6MsZcAWX6j83eJzD4SHvmEzA8Xfh76 -/ip4/127.0.0.1/tcp/37705/ipfs/QmPcMfAGupZa5kfzB8FF4YeetrY5r7vDLeak9hC2FBB8aW -/ip4/127.0.0.1/tcp/39583/ipfs/QmaTJHZeSTorvtCSstk1LJs5HvHUt7vmbpaJuaYJFWtWiu -/ip4/127.0.0.1/tcp/40291/ipfs/QmYvTR6L8MpU6NNJgaBHvfG6DnVwL2kmox5VWDmYp2ipX2 -/ip4/127.0.0.1/tcp/40663/ipfs/QmSsTBuiG7N3z2utNNTc4p6N6mTbKgWbqiSW2h9HiRKq7M -/ip4/127.0.0.1/tcp/40671/ipfs/QmTG8s5TMUfGA1hwZ9umMBjG2bRD9DJTQg14U3XzLpqR23 -/ip4/127.0.0.1/tcp/45755/ipfs/QmTqjCLwJhxG4LyKRKhd536bDJ9UEBDix4HNUvRJMK8qL2 -``` - -## Running Commands - -Run a command on all the nodes: -``` -$> iptb run -- go-filecoin wallet addrs ls -node[0] exit 0 - -fcqd8399qra4a94tspmplcrh68x7vkhqzxaxtk6nw - -... - -node[9] exit 0 - -fcqn9054lff4s9v6rlt76h08k4ra0gt9xmpymcl9w -``` - -Or just the even number nodes: -```shell -$> iptb run [0,2,4,6,8] -- go-filecoin id -node[0] exit 0 - -{ - "Addresses": [ - "/ip4/127.0.0.1/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "/ip4/192.168.0.116/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "/ip4/172.17.0.1/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "/ip4/172.18.0.1/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8" - ], - "ID": "Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "AgentVersion": "", - "ProtocolVersion": "", - "PublicKey": "" -} -... -``` -Or nodes 3-5: -```shell -$> iptb run [3-5] -- go-filecoin swarm peers -node[3] exit 0 - -node[4] exit 0 - -node[5] exit 0 -``` - -Jump into a shell for a node: -```shell -$> iptb shell 0 -$> go-filecoin id -{ - "Addresses": [ - "/ip4/127.0.0.1/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "/ip4/192.168.0.116/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "/ip4/172.17.0.1/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "/ip4/172.18.0.1/tcp/44311/ipfs/Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8" - ], - "ID": "Qmbb5hawLiz1md6hcAiW98p1SLSE4u1cV5BNZB5VnhKQQ8", - "AgentVersion": "", - "ProtocolVersion": "", - "PublicKey": "" -} -$> exit -$> iptb shell 1 -$> go-filecoin id -{ - "Addresses": [ - "/ip4/127.0.0.1/tcp/39583/ipfs/QmaTJHZeSTorvtCSstk1LJs5HvHUt7vmbpaJuaYJFWtWiu", - "/ip4/192.168.0.116/tcp/39583/ipfs/QmaTJHZeSTorvtCSstk1LJs5HvHUt7vmbpaJuaYJFWtWiu", - "/ip4/172.17.0.1/tcp/39583/ipfs/QmaTJHZeSTorvtCSstk1LJs5HvHUt7vmbpaJuaYJFWtWiu", - "/ip4/172.18.0.1/tcp/39583/ipfs/QmaTJHZeSTorvtCSstk1LJs5HvHUt7vmbpaJuaYJFWtWiu" - ], - "ID": "QmaTJHZeSTorvtCSstk1LJs5HvHUt7vmbpaJuaYJFWtWiu", - "AgentVersion": "", - "ProtocolVersion": "", - "PublicKey": "" -} -``` - -Happy Coding diff --git a/tools/iptb-plugins/filecoin/Makefile b/tools/iptb-plugins/filecoin/Makefile deleted file mode 100644 index c8ed4841a1..0000000000 --- a/tools/iptb-plugins/filecoin/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -export GO111MODULE=on - -all: filecoinlocal filecoindocker - -filecoinlocal: - (cd local/localfilecoin; go build -buildmode=plugin -o ../../localfilecoin.so) -CLEAN += localfilecoin.so - -filecoindocker: - (cd docker/dockerfilecoin; go build -buildmode=plugin -o ../../dockerfilecoin.so) -CLEAN += dockerfilecoin.so - -.PHONY: all filecoinlocal filecoindocker diff --git a/tools/iptb-plugins/filecoin/docker/dockerfilecoin.go b/tools/iptb-plugins/filecoin/docker/dockerfilecoin.go deleted file mode 100644 index 93dc97f4b9..0000000000 --- a/tools/iptb-plugins/filecoin/docker/dockerfilecoin.go +++ /dev/null @@ -1,472 +0,0 @@ -package pluginlocalfilecoin - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stdcopy" - - "github.com/ipfs/iptb/testbed/interfaces" - "github.com/ipfs/iptb/util" -) - -// PluginName is the name of the plugin. -var PluginName = "dockerfilecoin" -var log = logging.Logger(PluginName) - -// DefaultDockerHost is the hostname used when connecting to a docker daemon. -var DefaultDockerHost = client.DefaultDockerHost - -// DefaultDockerImage is the image name the plugin will use when deploying a container> -var DefaultDockerImage = "go-filecoin" - -// DefaultDockerUser is the user that will run the command(s) inside the container. -var DefaultDockerUser = "filecoin" - -// DefaultDockerEntryPoint is the entrypoint to run when starting the container. -var DefaultDockerEntryPoint = []string{"/usr/local/bin/go-filecoin"} - -// DefaultDockerVolumePrefix is a prefix added when using docker volumes -// e.g. when running against a remote docker daemon a prefix like `/var/iptb/` -// is usful wrt permissions -var DefaultDockerVolumePrefix = "" - -// DefaultLogLevel is the value that will be used for GO_FILECOIN_LOG_LEVEL -var DefaultLogLevel = "3" - -// DefaultLogJSON is the value that will be used for GO_FILECOIN_LOG_JSON -var DefaultLogJSON = "false" - -var ( - // AttrLogLevel is the key used to set the log level through NewNode attrs - AttrLogLevel = "logLevel" - - // AttrLogJSON is the key used to set the node to output json logs - AttrLogJSON = "logJSON" -) - -// Dockerfilecoin represents attributes of a dockerized filecoin node. -type Dockerfilecoin struct { - Image string - ID string - Host string - User string - EntryPoint []string - VolumePrefix string - dir string - peerid cid.Cid - apiaddr multiaddr.Multiaddr - swarmaddr multiaddr.Multiaddr - - logLevel string - logJSON string -} - -var NewNode testbedi.NewNodeFunc // nolint: golint - -func init() { - NewNode = func(dir string, attrs map[string]string) (testbedi.Core, error) { - dockerHost := DefaultDockerHost - dockerImage := DefaultDockerImage - dockerUser := DefaultDockerUser - dockerEntry := DefaultDockerEntryPoint - dockerVolumePrefix := DefaultDockerVolumePrefix - logLevel := DefaultLogLevel - logJSON := DefaultLogJSON - - // the dockerid file is present once the container has started the daemon process, - // iptb uses the dockerid file to keep track of the containers its running - idb, err := ioutil.ReadFile(filepath.Join(dir, "dockerid")) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - dockerID := string(idb) - - apiaddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/3453") - if err != nil { - return nil, err - } - - swarmaddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/6000") - if err != nil { - return nil, err - } - - if v, ok := attrs["dockerHost"]; ok { - dockerHost = v - } - - if v, ok := attrs["dockerImage"]; ok { - dockerImage = v - } - - if v, ok := attrs["dockerUser"]; ok { - dockerUser = v - } - - if v, ok := attrs["dockerEntry"]; ok { - dockerEntry[0] = v - } - - if v, ok := attrs["dockerVolumePrefix"]; ok { - dockerVolumePrefix = v - } - - if v, ok := attrs[AttrLogLevel]; ok { - logLevel = v - } - - if v, ok := attrs[AttrLogJSON]; ok { - logJSON = v - } - - return &Dockerfilecoin{ - EntryPoint: dockerEntry, - Host: dockerHost, - ID: dockerID, - Image: dockerImage, - User: dockerUser, - VolumePrefix: dockerVolumePrefix, - - logLevel: logLevel, - logJSON: logJSON, - - dir: dir, - apiaddr: apiaddr, - swarmaddr: swarmaddr, - }, nil - } -} - -/** Core Interface **/ - -// Init runs the node init process. -func (l *Dockerfilecoin) Init(ctx context.Context, args ...string) (testbedi.Output, error) { - // Get the docker client - cli, err := l.GetClient() - if err != nil { - return nil, err - } - - // define entrypoint command - // TODO use an env var - cmds := []string{"init"} - cmds = append(cmds, args...) - - envs, err := l.env() - if err != nil { - return nil, err - } - - resp, err := cli.ContainerCreate(ctx, - &container.Config{ - Entrypoint: l.EntryPoint, - User: l.User, - Image: l.Image, - Env: envs, - Cmd: cmds, - Tty: false, - }, - &container.HostConfig{ - Binds: []string{ - fmt.Sprintf("%s%s:%s", l.VolumePrefix, l.Dir(), "/data/filecoin"), - }, - }, - &network.NetworkingConfig{}, - "") - if err != nil { - return nil, err - } - - // This runs the init command, when the command completes the container will stop running, since we - // have the added the bindings above the repo will be persisted, meaning we can create a second container - // during the `iptb start` command that will use the repo created here. - // TODO find a better way to do that above.. - if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { - return nil, err - } - - var exitCode int64 - // wait here until the init command completes and get an exit code - statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning) - select { - case err := <-errCh: - if err != nil { - return nil, err - } - case exitStatus := <-statusCh: - exitCode = exitStatus.StatusCode - } - - // collect logs generated during container execution - out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}) - if err != nil { - panic(err) - } - defer out.Close() // nolint: errcheck - - var outBuf, errBuf bytes.Buffer - outputDone := make(chan error) - go func() { - // StdCopy demultiplexes the stream into two buffers - _, err = stdcopy.StdCopy(&outBuf, &errBuf, out) - outputDone <- err - }() - - select { - case err := <-outputDone: - if err != nil { - return nil, err - } - break - case <-ctx.Done(): - return nil, ctx.Err() - } - - return iptbutil.NewOutput(cmds, outBuf.Bytes(), errBuf.Bytes(), int(exitCode), err), err -} - -// Start starts the node process. -func (l *Dockerfilecoin) Start(ctx context.Context, wait bool, args ...string) (testbedi.Output, error) { - // check if we already have a container running in the testbed dir - if _, err := os.Stat(filepath.Join(l.Dir(), "dockerid")); err == nil { - return nil, errors.Errorf("container already running in testbed dir: %s", l.Dir()) - } - - cli, err := l.GetClient() - if err != nil { - return nil, err - } - - // TODO use an env var - cmds := []string{"daemon"} - cmds = append(cmds, args...) - - envs, err := l.env() - if err != nil { - return nil, err - } - - // Create the container, first command needs to be daemon, now we have an ID for it - resp, err := cli.ContainerCreate(ctx, - &container.Config{ - Entrypoint: l.EntryPoint, - User: l.User, - Image: l.Image, - Env: envs, - Cmd: cmds, - Tty: false, - AttachStdout: true, - AttachStderr: true, - }, - &container.HostConfig{ - Binds: []string{fmt.Sprintf("%s%s:%s", l.VolumePrefix, l.Dir(), "/data/filecoin")}, - }, - &network.NetworkingConfig{}, - "") - if err != nil { - return nil, err - } - - // this runs the daemon command, the container will now remain running until stop is called - if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { - return nil, err - } - - // TODO this when it would be nice to have filecoin log to a file, then we could just mount that.. - // Sleep for a bit, else we don't see any logs - time.Sleep(2 * time.Second) - - out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}) - if err != nil { - return nil, err - } - defer out.Close() // nolint: errcheck - - var outBuf, errBuf bytes.Buffer - outputDone := make(chan error) - go func() { - // StdCopy demultiplexes the stream into two buffers - _, err = stdcopy.StdCopy(&outBuf, &errBuf, out) - outputDone <- err - }() - - select { - case err := <-outputDone: - if err != nil { - return nil, err - } - break - case <-ctx.Done(): - return nil, ctx.Err() - } - - // save the dockerid to a file - if err = ioutil.WriteFile(filepath.Join(l.Dir(), "dockerid"), []byte(resp.ID), 0664); err != nil { - return nil, err - } - - return iptbutil.NewOutput(cmds, outBuf.Bytes(), errBuf.Bytes(), int(0), err), err -} - -// Stop stops the node process. -func (l *Dockerfilecoin) Stop(ctx context.Context) error { - cli, err := l.GetClient() - if err != nil { - return err - } - - // "2" is the same as Ctrl+C - if err := cli.ContainerKill(ctx, l.ID, "2"); err != nil { - return err - } - - // remove the dockerid file since we use this in `Start` as a liveness check - if err := os.Remove(filepath.Join(l.Dir(), "dockerid")); err != nil { - return err - } - - return nil -} - -// RunCmd runs a command in the context of the node. -func (l *Dockerfilecoin) RunCmd(ctx context.Context, stdin io.Reader, args ...string) (testbedi.Output, error) { - // TODO pass opts here to control a docker daemon on a remote host - cli, err := l.GetClient() - if err != nil { - return nil, err - } - - // TODO use an env var - return Exec(ctx, cli, l.ID, false, "/data/filecoin", args...) -} - -// Connect connects the node to another testbed node. -func (l *Dockerfilecoin) Connect(ctx context.Context, n testbedi.Core) error { - swarmaddrs, err := n.SwarmAddrs() - if err != nil { - return err - } - - for _, a := range swarmaddrs { - // we should try all addresses - // TODO(frrist) libp2p has a better way to do this built in iirc - output, err := l.RunCmd(ctx, nil, "go-filecoin", "swarm", "connect", a) - if err != nil { - return err - } - - if output.ExitCode() != 0 { - out, err := ioutil.ReadAll(output.Stderr()) - if err != nil { - return err - } - l.Errorf("%s", string(out)) - } - } - return nil -} - -func (l *Dockerfilecoin) env() ([]string, error) { - envs := os.Environ() - - envs = filecoin.UpdateOrAppendEnv(envs, "FIL_PATH", "/data/filecoin") - envs = filecoin.UpdateOrAppendEnv(envs, "GO_FILECOIN_LOG_LEVEL", l.logLevel) - envs = filecoin.UpdateOrAppendEnv(envs, "GO_FILECOIN_LOG_JSON", l.logJSON) - - return envs, nil -} - -// Shell starts a shell in the context of a node. -func (l *Dockerfilecoin) Shell(ctx context.Context, ns []testbedi.Core) error { - panic("NYI") -} - -// Infof writes an info log. -func (l *Dockerfilecoin) Infof(format string, args ...interface{}) { - log.Infof("Node: %s %s", l, fmt.Sprintf(format, args...)) -} - -// Errorf writes an error log. -func (l *Dockerfilecoin) Errorf(format string, args ...interface{}) { - log.Errorf("Node: %s %s", l, fmt.Sprintf(format, args...)) -} - -// Dir returns the directory the node is using. -func (l *Dockerfilecoin) Dir() string { - return l.dir -} - -// Type returns the type of the node. -func (l *Dockerfilecoin) Type() string { - return PluginName -} - -// String implements the stringer interface. -func (l *Dockerfilecoin) String() string { - return l.dir -} - -/** Libp2p Interface **/ - -// PeerID returns the nodes peerID. -func (l *Dockerfilecoin) PeerID() (string, error) { - var err error - l.peerid, err = l.GetPeerID() - if err != nil { - return "", err - } - - return l.peerid.String(), err -} - -// APIAddr returns the api address of the node. -func (l *Dockerfilecoin) APIAddr() (string, error) { - return l.apiaddr.String(), nil -} - -// SwarmAddrs returns the addresses a node is listening on for swarm connections. -func (l *Dockerfilecoin) SwarmAddrs() ([]string, error) { - out, err := l.RunCmd(context.Background(), nil, "go-filecoin", "id", "--format=''") - if err != nil { - return nil, err - } - - outStr, err := ioutil.ReadAll(out.Stdout()) - if err != nil { - return nil, err - } - - addrs := strings.Split(string(outStr), "\n") - return addrs, nil -} - -/** Config Interface **/ - -// GetConfig returns the nodes config. -func (l *Dockerfilecoin) GetConfig() (interface{}, error) { - panic("NYI") -} - -// WriteConfig writes a nodes config file. -func (l *Dockerfilecoin) WriteConfig(cfg interface{}) error { - panic("NYI") -} diff --git a/tools/iptb-plugins/filecoin/docker/dockerfilecoin/plugin.go b/tools/iptb-plugins/filecoin/docker/dockerfilecoin/plugin.go deleted file mode 100644 index ceb19b1ddd..0000000000 --- a/tools/iptb-plugins/filecoin/docker/dockerfilecoin/plugin.go +++ /dev/null @@ -1,8 +0,0 @@ -package main - -import ( - plugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/docker" -) - -var PluginName = plugin.PluginName // nolint: golint, staticcheck, deadcode, unused -var NewNode = plugin.NewNode // nolint: golint, staticcheck, deadcode, unused diff --git a/tools/iptb-plugins/filecoin/docker/iptb_metrics.go b/tools/iptb-plugins/filecoin/docker/iptb_metrics.go deleted file mode 100644 index c6c380b321..0000000000 --- a/tools/iptb-plugins/filecoin/docker/iptb_metrics.go +++ /dev/null @@ -1,65 +0,0 @@ -package pluginlocalfilecoin - -import ( - "context" - "io" - - "github.com/docker/docker/api/types" -) - -// Events not implemented -func (l *Dockerfilecoin) Events() (io.ReadCloser, error) { - panic("Not Implemented") -} - -// StderrReader provides an io.ReadCloser to the running daemons stderr -func (l *Dockerfilecoin) StderrReader() (io.ReadCloser, error) { - cli, err := l.GetClient() - if err != nil { - return nil, err - } - - options := types.ContainerLogsOptions{ - ShowStdout: false, - ShowStderr: true, - Follow: true, - } - - return cli.ContainerLogs(context.Background(), l.ID, options) -} - -// StdoutReader provides an io.ReadCloser to the running daemons stdout -func (l *Dockerfilecoin) StdoutReader() (io.ReadCloser, error) { - cli, err := l.GetClient() - if err != nil { - return nil, err - } - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: false, - Follow: true, - } - - return cli.ContainerLogs(context.Background(), l.ID, options) -} - -// Heartbeat not implemented -func (l *Dockerfilecoin) Heartbeat() (map[string]string, error) { - panic("Not Implemented") -} - -// Metric not implemented -func (l *Dockerfilecoin) Metric(key string) (string, error) { - panic("Not Implemented") -} - -// GetMetricList not implemented -func (l *Dockerfilecoin) GetMetricList() []string { - panic("Not Implemented") -} - -// GetMetricDesc not implemented -func (l *Dockerfilecoin) GetMetricDesc(key string) (string, error) { - panic("Not Implemented") -} diff --git a/tools/iptb-plugins/filecoin/docker/scripts/prepMining.sh b/tools/iptb-plugins/filecoin/docker/scripts/prepMining.sh deleted file mode 100755 index 22b5a54769..0000000000 --- a/tools/iptb-plugins/filecoin/docker/scripts/prepMining.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -# This script is useful when you want to setup dockerized filecoin instances that can mine. -# This script can be ran like any other bash script. - -# This script is used to create an IPTB testbed, initialize the testbed nodes with a genesis file, -# start the testbed nodes, configure the testbed nodes wallet addresses and miner address -# from the addresses in the aforementioned genesis file s.t. the nodes can mine, and lastly connect the -# testbed nodes together. -# -# This script has a hard limit on the number of node called MAX_NODES, this is -# due to our docker configuration, for more information on this limit ask @frrist. - -# TODO add tests to verify this always works. - -MAX_NODES=25 - -if test -z "$1" -then - echo "ERROR: you must pass value for number of nodes you wish to init, e.g.: 10" - exit 1 -fi - - -if [ "$1" -gt "$MAX_NODES" ]; -then - printf "If you wish to run with a value larger that 25, you must edit the Dockerfile in the go-filecoin repo\n - Where to edit:\n - ENV GENSETUP_COUNT 25 #<--SET THIS VALUE\n - After edit you must rebuild the docker file:\n - $ docker build -t go-filecoin . - " - exit 1 -fi - -# create a testbed for the iptb nodes -iptb testbed create --count "$1" --type dockerfilecoin --force - -printf "Initializing %d nodes\n" "$1" -iptb init -- --genesisfile=/data/genesis.car - -printf "Starting %d nodes\n" "$1" -iptb start -- --block-time=5s - -printf "Configuring %d nodes\n" "$1" -for i in $(eval echo {0..$1}) -do - minerAddr=$(iptb run "$i" cat /data/minerAddr$i | tail -n 2 | head -n 1) - iptb run "$i" -- go-filecoin config mining.minerAddress \"\\\"$minerAddr\\\"\" - iptb run "$i" -- go-filecoin wallet import /data/walletKey$i -done - -printf "Connecting %d nodes\n" "$1" -iptb connect - -printf "Complete! %d nodes connected and ready to mine >.>" "$1" diff --git a/tools/iptb-plugins/filecoin/docker/util.go b/tools/iptb-plugins/filecoin/docker/util.go deleted file mode 100644 index f57a3075ae..0000000000 --- a/tools/iptb-plugins/filecoin/docker/util.go +++ /dev/null @@ -1,136 +0,0 @@ -package pluginlocalfilecoin - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "strings" - - "github.com/ipfs/go-cid" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stdcopy" - "github.com/ipfs/iptb/util" - "github.com/pkg/errors" - - "github.com/ipfs/iptb/testbed/interfaces" -) - -// ExecResult represents a result returned from Exec() -type ExecResult struct { - ExitCode int - outBuffer *bytes.Buffer - errBuffer *bytes.Buffer -} - -// Stdout returns stdout output of a command run by Exec() -func (res *ExecResult) Stdout() string { - return res.outBuffer.String() -} - -// Stderr returns stderr output of a command run by Exec() -func (res *ExecResult) Stderr() string { - return res.errBuffer.String() -} - -// Combined returns combined stdout and stderr output of a command run by Exec() -func (res *ExecResult) Combined() string { - return res.outBuffer.String() + res.errBuffer.String() -} - -// Exec executes a command inside a container, returning the result -// containing stdout, stderr, and exit code. Note: -// - this is a synchronous operation; -// - cmd stdin is closed. -func Exec(ctx context.Context, cli client.APIClient, containerID string, detach bool, repoDir string, args ...string) (testbedi.Output, error) { - // prepare exec - cmd := []string{"sh", "-c"} - cmd = append(cmd, strings.Join(args, " ")) - execConfig := types.ExecConfig{ - User: "filecoin", - AttachStdout: true, - AttachStderr: true, - Env: []string{fmt.Sprintf("FIL_PATH=%s", repoDir)}, - Cmd: cmd, - Detach: detach, - } - cresp, err := cli.ContainerExecCreate(ctx, containerID, execConfig) - if err != nil { - return nil, err - } - execID := cresp.ID - - // run it, with stdout/stderr attached - aresp, err := cli.ContainerExecAttach(ctx, execID, types.ExecStartCheck{}) - if err != nil { - return nil, err - } - defer aresp.Close() - - // read the output - var outBuf, errBuf bytes.Buffer - outputDone := make(chan error) - - go func() { - // StdCopy demultiplexes the stream into two buffers - _, err = stdcopy.StdCopy(&outBuf, &errBuf, aresp.Reader) - outputDone <- err - }() - - select { - case err := <-outputDone: - if err != nil { - return nil, err - } - break - - case <-ctx.Done(): - return nil, ctx.Err() - } - - // get the exit code - iresp, err := cli.ContainerExecInspect(ctx, execID) - if err != nil { - return nil, err - } - - return iptbutil.NewOutput(cmd, outBuf.Bytes(), errBuf.Bytes(), iresp.ExitCode, err), err -} - -// GetPeerID returns the nodes peerID by running its `id` command. -// TODO this a temp fix, should read the nodes keystore instead -func (l *Dockerfilecoin) GetPeerID() (cid.Cid, error) { - // run the id command - out, err := l.RunCmd(context.TODO(), nil, "id", "--format=") - if err != nil { - return cid.Undef, err - } - - if out.ExitCode() != 0 { - return cid.Undef, errors.New("Could not get PeerID, non-zero exit code") - } - - _, err = io.Copy(os.Stdout, out.Stderr()) - if err != nil { - return cid.Undef, err - } - - // convert the reader to a string TODO this is annoying - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(out.Stdout()) - if err != nil { - return cid.Undef, err - } - cidStr := strings.TrimSpace(buf.String()) - - // decode the parsed string to a cid...maybe - return cid.Decode(cidStr) -} - -// GetClient creates a new docker sdk client -func (l *Dockerfilecoin) GetClient() (*client.Client, error) { - return client.NewClientWithOpts(client.WithVersion("1.37"), client.WithHost(l.Host)) -} diff --git a/tools/iptb-plugins/filecoin/local/copy_file.go b/tools/iptb-plugins/filecoin/local/copy_file.go deleted file mode 100644 index 2bfa5c2c60..0000000000 --- a/tools/iptb-plugins/filecoin/local/copy_file.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !linux - -package pluginlocalfilecoin - -import ( - "io" - "os" -) - -// https://stackoverflow.com/a/21061062 -func copyFileContents(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - - defer in.Close() // nolint: errcheck - out, err := os.Create(dst) - if err != nil { - return - } - - defer func() { - cerr := out.Close() - if err == nil { - err = cerr - } - }() - - if _, err = io.Copy(out, in); err != nil { - return - } - - err = out.Sync() - return -} diff --git a/tools/iptb-plugins/filecoin/local/copy_file_linux.go b/tools/iptb-plugins/filecoin/local/copy_file_linux.go deleted file mode 100644 index fe019dc8b7..0000000000 --- a/tools/iptb-plugins/filecoin/local/copy_file_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux - -package pluginlocalfilecoin - -import ( - "os/exec" -) - -// We can't copy the bits in go due to the following bug -// https://github.com/golang/go/issues/22315 -func copyFileContents(src, dst string) (err error) { - cmd := exec.Command("cp", src, dst) - if err := cmd.Start(); err != nil { - return err - } - - return cmd.Wait() -} diff --git a/tools/iptb-plugins/filecoin/local/filecoinutil.go b/tools/iptb-plugins/filecoin/local/filecoinutil.go deleted file mode 100644 index edf0e71182..0000000000 --- a/tools/iptb-plugins/filecoin/local/filecoinutil.go +++ /dev/null @@ -1,119 +0,0 @@ -package pluginlocalfilecoin - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/ipfs/go-cid" - "github.com/pkg/errors" - - commands "github.com/filecoin-project/go-filecoin/cmd/go-filecoin" - "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin" -) - -func (l *Localfilecoin) isAlive() (bool, error) { - pid, err := l.getPID() - if os.IsNotExist(err) { - return false, nil - } else if err != nil { - return false, err - } - - proc, err := os.FindProcess(pid) - if err != nil { - return false, nil - } - - err = proc.Signal(syscall.Signal(0)) - if err == nil { - return true, nil - } - - return false, nil -} - -func (l *Localfilecoin) getPID() (int, error) { - b, err := ioutil.ReadFile(filepath.Join(l.iptbPath, "daemon.pid")) - if err != nil { - return -1, err - } - - return strconv.Atoi(string(b)) -} - -func (l *Localfilecoin) env() ([]string, error) { - envs := os.Environ() - - currPath := os.Getenv("PATH") - pathList := filepath.SplitList(currPath) - pathList = append([]string{filepath.Dir(l.binPath)}, pathList...) - newPath := strings.Join(pathList, string(filepath.ListSeparator)) - envs = filecoin.UpdateOrAppendEnv(envs, "FIL_PATH", l.repoPath) - envs = filecoin.UpdateOrAppendEnv(envs, "GO_FILECOIN_LOG_LEVEL", l.logLevel) - envs = filecoin.UpdateOrAppendEnv(envs, "GO_FILECOIN_LOG_JSON", l.logJSON) - envs = filecoin.UpdateOrAppendEnv(envs, "RUST_LOG", "info") - envs = filecoin.UpdateOrAppendEnv(envs, "PATH", newPath) - - return envs, nil -} - -func (l *Localfilecoin) signalAndWait(p *os.Process, waitch <-chan struct{}, signal os.Signal, t time.Duration) error { - err := p.Signal(signal) - if err != nil { - return fmt.Errorf("error killing daemon %s: %s", l.iptbPath, err) - } - - select { - case <-waitch: - return nil - case <-time.After(t): - return errTimeout - } -} - -func (l *Localfilecoin) readerFor(file string) (io.ReadCloser, error) { - return os.OpenFile(filepath.Join(l.iptbPath, file), os.O_RDONLY, 0) -} - -// GetPeerID returns the nodes peerID by running its `id` command. -// TODO this a temp fix, should read the nodes keystore instead -func (l *Localfilecoin) GetPeerID() (cid.Cid, error) { - // run the id command - out, err := l.RunCmd(context.TODO(), nil, l.binPath, "id") - if err != nil { - return cid.Undef, err - } - - if out.ExitCode() != 0 { - return cid.Undef, errors.New("Could not get PeerID, non-zero exit code") - } - - _, err = io.Copy(os.Stdout, out.Stderr()) - if err != nil { - return cid.Undef, err - } - - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(out.Stdout()) - if err != nil { - return cid.Undef, err - } - - var details commands.IDDetails - err = details.UnmarshalJSON(buf.Bytes()) - if err != nil { - return cid.Undef, err - } - - // decode the parsed string to a cid...maybe - return cid.Decode(details.ID.String()) -} diff --git a/tools/iptb-plugins/filecoin/local/iptb_metrics.go b/tools/iptb-plugins/filecoin/local/iptb_metrics.go deleted file mode 100644 index 3fc18de7b4..0000000000 --- a/tools/iptb-plugins/filecoin/local/iptb_metrics.go +++ /dev/null @@ -1,40 +0,0 @@ -package pluginlocalfilecoin - -import ( - "io" -) - -// Events not implemented -func (l *Localfilecoin) Events() (io.ReadCloser, error) { - panic("Not Implemented") -} - -// StderrReader provides an io.ReadCloser to the running daemons stderr -func (l *Localfilecoin) StderrReader() (io.ReadCloser, error) { - return l.readerFor("daemon.stderr") -} - -// StdoutReader provides an io.ReadCloser to the running daemons stdout -func (l *Localfilecoin) StdoutReader() (io.ReadCloser, error) { - return l.readerFor("daemon.stdout") -} - -// Heartbeat not implemented -func (l *Localfilecoin) Heartbeat() (map[string]string, error) { - panic("Not Implemented") -} - -// Metric not implemented -func (l *Localfilecoin) Metric(key string) (string, error) { - panic("Not Implemented") -} - -// GetMetricList not implemented -func (l *Localfilecoin) GetMetricList() []string { - panic("Not Implemented") -} - -// GetMetricDesc not implemented -func (l *Localfilecoin) GetMetricDesc(key string) (string, error) { - panic("Not Implemented") -} diff --git a/tools/iptb-plugins/filecoin/local/localfilecoin.go b/tools/iptb-plugins/filecoin/local/localfilecoin.go deleted file mode 100644 index 8a5fb1baf2..0000000000 --- a/tools/iptb-plugins/filecoin/local/localfilecoin.go +++ /dev/null @@ -1,586 +0,0 @@ -package pluginlocalfilecoin - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - - "github.com/ipfs/iptb/testbed/interfaces" - "github.com/ipfs/iptb/util" - "golang.org/x/sync/errgroup" - - "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin" -) - -// PluginName is the name of the plugin -var PluginName = "localfilecoin" - -var log = logging.Logger(PluginName) - -// errIsAlive will be returned by Start if the node is already running -var errIsAlive = errors.New("node is already running") -var errTimeout = errors.New("timeout") - -// defaultFilecoinBinary is the name or full path of the binary that will be used -const defaultFilecoinBinary = "go-filecoin" - -// defaultRepoPath is the name of the repo path relative to the plugin root directory -const defaultRepoPath = "repo" - -// defaultSectorsPath is the name of the sector path relative to the plugin root directory -const defaultSectorsPath = "sectors" - -// defaultLogLevel is the value that will be used for GO_FILECOIN_LOG_LEVEL -const defaultLogLevel = "3" - -// defaultRustLogLevel is the value that will be used for RUST_LOG -const defaultRustLogLevel = "" - -// defaultLogJSON is the value that will be used for GO_FILECOIN_LOG_JSON -const defaultLogJSON = "false" - -var ( - // AttrFilecoinBinary is the key used to set which binary to use in the plugin through NewNode attrs - AttrFilecoinBinary = "filecoinBinary" - - // AttrLogLevel is the key used to set the log level through NewNode attrs - AttrLogLevel = "logLevel" - - // AttrLogJSON is the key used to set the node to output json logs - AttrLogJSON = "logJSON" - - // AttrRustLogLevel is the key used to set the node to output rust logs - AttrRustLogLevel = "rustLogLevel" - - // AttrSectorsPath is the key used to set the sectors path - AttrSectorsPath = "sectorsPath" -) - -// Localfilecoin represents a filecoin node -type Localfilecoin struct { - iptbPath string // Absolute path for all process data - peerid cid.Cid - apiaddr multiaddr.Multiaddr - - binPath string // Absolute path to binary - repoPath string // Absolute path to repo - sectorsPath string // Absolute path to sectors - logLevel string - logJSON string - rustLogLevel string -} - -var NewNode testbedi.NewNodeFunc // nolint: golint - -func init() { - NewNode = func(dir string, attrs map[string]string) (testbedi.Core, error) { - dir, err := filepath.Abs(dir) - if err != nil { - return nil, err - } - var ( - binPath = "" - repoPath = filepath.Join(dir, defaultRepoPath) - sectorsPath = filepath.Join(dir, defaultSectorsPath) - logLevel = defaultLogLevel - logJSON = defaultLogJSON - rustLogLevel = defaultRustLogLevel - ) - - if v, ok := attrs[AttrFilecoinBinary]; ok { - binPath = v - } - - if v, ok := attrs[AttrLogLevel]; ok { - logLevel = v - } - - if v, ok := attrs[AttrLogJSON]; ok { - logJSON = v - } - - if v, ok := attrs[AttrRustLogLevel]; ok { - rustLogLevel = v - } - - if v, ok := attrs[AttrSectorsPath]; ok { - sectorsPath = v - } - - if len(binPath) == 0 { - if binPath, err = exec.LookPath(defaultFilecoinBinary); err != nil { - return nil, err - } - } - - if err := os.Mkdir(filepath.Join(dir, "bin"), 0755); err != nil { - return nil, fmt.Errorf("could not make dir: %s", err) - } - - dst := filepath.Join(dir, "bin", filepath.Base(binPath)) - if err := copyFileContents(binPath, dst); err != nil { - return nil, err - } - - if err := os.Chmod(dst, 0755); err != nil { - return nil, err - } - - return &Localfilecoin{ - iptbPath: dir, - binPath: dst, - repoPath: repoPath, - sectorsPath: sectorsPath, - logLevel: logLevel, - logJSON: logJSON, - rustLogLevel: rustLogLevel, - }, nil - } -} - -/** Core Interface **/ - -// Init runs the node init process. -func (l *Localfilecoin) Init(ctx context.Context, args ...string) (testbedi.Output, error) { - // The repo path is provided by the environment - args = append([]string{l.binPath, "init"}, args...) - output, oerr := l.RunCmd(ctx, nil, args...) - if oerr != nil { - return nil, oerr - } - if output.ExitCode() != 0 { - return output, errors.Errorf("%s exited with non-zero code %d", output.Args(), output.ExitCode()) - } - - icfg, err := l.Config() - if err != nil { - return nil, err - } - - lcfg := icfg.(*config.Config) - - if err := lcfg.Set("api.address", `"/ip4/127.0.0.1/tcp/0"`); err != nil { - return nil, err - } - - if err := lcfg.Set("swarm.address", `"/ip4/127.0.0.1/tcp/0"`); err != nil { - return nil, err - } - - // only set sectors path to l.sectorsPath if init command does not set - isectorsPath, err := lcfg.Get("sectorbase.rootdir") - if err != nil { - return nil, err - } - lsectorsPath := isectorsPath.(string) - if lsectorsPath == "" { - if err := lcfg.Set("sectorbase.rootdir", l.sectorsPath); err != nil { - return nil, err - } - } - - if err := l.WriteConfig(lcfg); err != nil { - return nil, err - } - - return output, oerr -} - -// Start starts the node process. -func (l *Localfilecoin) Start(ctx context.Context, wait bool, args ...string) (testbedi.Output, error) { - alive, err := l.isAlive() - if err != nil { - return nil, err - } - - if alive { - return nil, errIsAlive - } - - repoFlag := fmt.Sprintf("--repodir=%s", l.repoPath) // Not provided by environment here - dargs := append([]string{"daemon", repoFlag}, args...) - cmd := exec.CommandContext(ctx, l.binPath, dargs...) - cmd.Dir = l.iptbPath - - cmd.Env, err = l.env() - if err != nil { - return nil, err - } - - iptbutil.SetupOpt(cmd) - - stdout, err := os.Create(filepath.Join(l.iptbPath, "daemon.stdout")) - if err != nil { - return nil, err - } - - stderr, err := os.Create(filepath.Join(l.iptbPath, "daemon.stderr")) - if err != nil { - return nil, err - } - - cmd.Stdout = stdout - cmd.Stderr = stderr - - err = cmd.Start() - if err != nil { - return nil, err - } - - pid := cmd.Process.Pid - if pid == 0 { - panic("here") - } - - l.Infof("Started daemon: %s, pid: %d", l, pid) - - if err := ioutil.WriteFile(filepath.Join(l.iptbPath, "daemon.pid"), []byte(fmt.Sprint(pid)), 0666); err != nil { - return nil, err - } - if wait { - if err := filecoin.WaitOnAPI(l); err != nil { - return nil, err - } - } - return iptbutil.NewOutput(dargs, []byte{}, []byte{}, 0, err), nil -} - -// Stop stops the node process. -func (l *Localfilecoin) Stop(ctx context.Context) error { - pid, err := l.getPID() - if err != nil { - return fmt.Errorf("error killing daemon %s: %s", l.iptbPath, err) - } - - p, err := os.FindProcess(pid) - if err != nil { - return fmt.Errorf("error killing daemon %s: %s", l.iptbPath, err) - } - - waitch := make(chan struct{}, 1) - go func() { - // TODO pass return state - p.Wait() // nolint: errcheck - waitch <- struct{}{} - }() - - defer func() { - err := os.Remove(filepath.Join(l.iptbPath, "daemon.pid")) - if err != nil && !os.IsNotExist(err) { - panic(fmt.Errorf("error removing pid file for daemon at %s: %s", l.iptbPath, err)) - } - err = os.Remove(filepath.Join(l.repoPath, "api")) - if err != nil && !os.IsNotExist(err) { - panic(fmt.Errorf("error removing API file for daemon at %s: %s", l.repoPath, err)) - } - }() - - if err := l.signalAndWait(p, waitch, syscall.SIGTERM, 1*time.Second); err != errTimeout { - return err - } - - if err := l.signalAndWait(p, waitch, syscall.SIGTERM, 2*time.Second); err != errTimeout { - return err - } - - if err := l.signalAndWait(p, waitch, syscall.SIGQUIT, 5*time.Second); err != errTimeout { - return err - } - - if err := l.signalAndWait(p, waitch, syscall.SIGKILL, 5*time.Second); err != errTimeout { - return err - } - - for { - err := p.Signal(syscall.Signal(0)) - if err != nil { - break - } - time.Sleep(time.Millisecond * 10) - } - - return nil -} - -// RunCmd runs a command in the context of the node. -func (l *Localfilecoin) RunCmd(ctx context.Context, stdin io.Reader, args ...string) (testbedi.Output, error) { - env, err := l.env() - if err != nil { - return nil, fmt.Errorf("error getting env: %s", err) - } - - firstArg := args[0] - if firstArg == "go-filecoin" { - firstArg = l.binPath - } - - cmd := exec.CommandContext(ctx, firstArg, args[1:]...) - cmd.Env = env - cmd.Stdin = stdin - - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - - err = cmd.Start() - if err != nil { - return nil, err - } - - g, ctx := errgroup.WithContext(ctx) - - var stderrbytes []byte - var stdoutbytes []byte - - g.Go(func() error { - var err error - stderrbytes, err = ioutil.ReadAll(stderr) - return err - }) - - g.Go(func() error { - var err error - stdoutbytes, err = ioutil.ReadAll(stdout) - return err - }) - - if err := g.Wait(); err != nil { - return nil, err - } - - exiterr := cmd.Wait() - - var exitcode = 0 - switch oerr := exiterr.(type) { - case *exec.ExitError: - if ctx.Err() == context.DeadlineExceeded { - err = errors.Wrapf(oerr, "context deadline exceeded for command: %q", strings.Join(cmd.Args, " ")) - } - - exitcode = 1 - case nil: - err = oerr - } - - return iptbutil.NewOutput(args, stdoutbytes, stderrbytes, exitcode, err), nil -} - -// Connect connects the node to another testbed node. -func (l *Localfilecoin) Connect(ctx context.Context, n testbedi.Core) error { - swarmaddrs, err := n.SwarmAddrs() - if err != nil { - return err - } - - output, err := l.RunCmd(ctx, nil, l.binPath, "swarm", "connect", swarmaddrs[0]) - - if err != nil { - return err - } - - if output.ExitCode() != 0 { - out, err := ioutil.ReadAll(output.Stderr()) - if err != nil { - return err - } - - return fmt.Errorf("%s", string(out)) - } - - return err -} - -// Shell starts a user shell in the context of a node setting FIL_PATH to ensure calls to -// go-filecoin will be ran agasint the target node. Stderr, stdout will be set to os.Stderr -// and os.Stdout. If env TTY is set, it will be used for stdin, otherwise os.Stdin will be used. -// -// If FIL_PATH is already set, an error will be returned. -// -// The shell environment will have the follow variables set in the shell for the user. -// -// NODE0-NODE# - set to the PeerID for each value in ns passed. -// FIL_PATH - The value is set to the directory for the Filecoin node. -// FIL_PID - The value is set to the pid for the Filecoin daemon -// FIL_BINARY - The value is set to the path of the binary used for running the Filecoin daemon. -// PATH - The users PATH will be updated to include a location that contains the FIL_BINARY. -// -// Note: user shell configuration may lead to the `go-filecoin` command not pointing to FIL_BINARY, -// due to PATH ordering. -func (l *Localfilecoin) Shell(ctx context.Context, ns []testbedi.Core) error { - shell := os.Getenv("SHELL") - if shell == "" { - return fmt.Errorf("no shell found") - } - - if len(os.Getenv("FIL_PATH")) != 0 { - // If the users shell sets FIL_PATH, it will just be overridden by the shell again - return fmt.Errorf("shell has FIL_PATH set, please unset before trying to use iptb shell") - } - - nenvs, err := l.env() - if err != nil { - return err - } - - // TODO(tperson): It would be great if we could guarantee that the shell - // is using the same binary. However, the users shell may prepend anything - // we change in the PATH - - for i, n := range ns { - peerid, err := n.PeerID() - - if err != nil { - return err - } - - nenvs = append(nenvs, fmt.Sprintf("NODE%d=%s", i, peerid)) - } - - pid, err := l.getPID() - if err != nil { - return err - } - - nenvs = filecoin.UpdateOrAppendEnv(nenvs, "FIL_PID", fmt.Sprintf("%d", pid)) - nenvs = filecoin.UpdateOrAppendEnv(nenvs, "FIL_BINARY", l.binPath) - - cmd := exec.CommandContext(ctx, shell) - cmd.Env = nenvs - - stdin := os.Stdin - - // When running code with `go test`, the os.Stdin is not connected to the shell - // where `go test` was ran. This makes the shell exit immediately and it's not - // possible to run it. To get around this issue we can let the user tell us the - // TTY their shell is using by setting the TTY env. This will allow the shell - // to use the same TTY the user started running `go test` in. - tty := os.Getenv("TTY") - if len(tty) != 0 { - f, err := os.Open(tty) - if err != nil { - return err - } - - stdin = f - } - - cmd.Stdin = stdin - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - - if err := cmd.Start(); err != nil { - return err - } - - return cmd.Wait() -} - -// Infof writes an info log. -func (l *Localfilecoin) Infof(format string, args ...interface{}) { - log.Infof("Node: %s %s", l, fmt.Sprintf(format, args...)) -} - -// Errorf writes an error log. -func (l *Localfilecoin) Errorf(format string, args ...interface{}) { - log.Errorf("Node: %s %s", l, fmt.Sprintf(format, args...)) -} - -// Dir returns the IPTB directory the node is using. -func (l *Localfilecoin) Dir() string { - return l.iptbPath -} - -// Type returns the type of the node. -func (l *Localfilecoin) Type() string { - return PluginName -} - -// String implements the stringr interface. -func (l *Localfilecoin) String() string { - return l.iptbPath -} - -/** Libp2p Interface **/ - -// PeerID returns the nodes peerID. -func (l *Localfilecoin) PeerID() (string, error) { - /* - if l.peerid != nil { - return l.peerid, nil - } - */ - - var err error - l.peerid, err = l.GetPeerID() - if err != nil { - return "", err - } - - return l.peerid.String(), err -} - -// APIAddr returns the api address of the node. -func (l *Localfilecoin) APIAddr() (string, error) { - /* - if l.apiaddr != nil { - return l.apiaddr, nil - } - */ - - var err error - l.apiaddr, err = filecoin.GetAPIAddrFromRepo(l.repoPath) - if err != nil { - return "", err - } - - return l.apiaddr.String(), err -} - -// SwarmAddrs returns the addresses a node is listening on for swarm connections. -func (l *Localfilecoin) SwarmAddrs() ([]string, error) { - out, err := l.RunCmd(context.Background(), nil, l.binPath, "id", "--format=") - if err != nil { - return nil, err - } - - outStr, err := ioutil.ReadAll(out.Stdout()) - if err != nil { - return nil, err - } - - addrs := strings.Split(string(outStr), "\n") - return addrs, nil -} - -/** Config Interface **/ - -// Config returns the nodes config. -func (l *Localfilecoin) Config() (interface{}, error) { - return config.ReadFile(filepath.Join(l.repoPath, "config.json")) -} - -// WriteConfig writes a nodes config file. -func (l *Localfilecoin) WriteConfig(cfg interface{}) error { - lcfg := cfg.(*config.Config) - return lcfg.WriteFile(filepath.Join(l.repoPath, "config.json")) -} diff --git a/tools/iptb-plugins/filecoin/local/localfilecoin/plugin.go b/tools/iptb-plugins/filecoin/local/localfilecoin/plugin.go deleted file mode 100644 index 124b66ddf9..0000000000 --- a/tools/iptb-plugins/filecoin/local/localfilecoin/plugin.go +++ /dev/null @@ -1,8 +0,0 @@ -package main - -import ( - plugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" -) - -var PluginName = plugin.PluginName // nolint: golint, staticcheck, deadcode, unused -var NewNode = plugin.NewNode // nolint: golint, staticcheck, deadcode, unused diff --git a/tools/iptb-plugins/filecoin/local/scripts/prepMining.sh b/tools/iptb-plugins/filecoin/local/scripts/prepMining.sh deleted file mode 100755 index 82f254a0dc..0000000000 --- a/tools/iptb-plugins/filecoin/local/scripts/prepMining.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -# This script is useful when you want to setup local filecoin processes that can mine. -# This script can be ran like any other bash script. - -# This script is used to create an IPTB testbed, initialize the testbed nodes with a genesis file, -# start the testbed nodes, configure the testbed nodes wallet addresses and miner address -# from the addresses in the aforementioned genesis file s.t. the nodes can mine, and lastly connect the -# testbed nodes together. -# -# TODO add tests to verify this always works. - -# Linux and OSX have different dd flags -DD_FILE_SIZE=1m -if [[ "$OSTYPE" == "linux-gnu" ]]; then - # <3 - DD_FILE_SIZE=1M -fi - -if test -z "$1" -then - echo "ERROR: you must pass value for number of nodes you wish to init, e.g.: 10" - exit 1 -fi - -if test -z "$GOPATH"; then - GOPATH=$(go env GOPATH) -fi - -# create a testbed for the iptb nodes -iptb testbed create --count "$1" --type localfilecoin --force - -# set common paths to find bins and config files -GENDIR=$GOPATH/src/github.com/filecoin-project/go-filecoin/tools/gengen -FIXDIR=$GOPATH/src/github.com/filecoin-project/go-filecoin/fixtures - -printf "Setting up initial boostrap node (0)\n" -# configure mining on node 0 -minerAddr=$(cat $FIXDIR/gen.json | jq ".Miners[0].Address" -r) - -iptb init 0 -- --genesisfile=$FIXDIR/genesis.car -iptb start 0 -- --block-time=5s -iptb run 0 -- go-filecoin config mining.minerAddress "\"$minerAddr\"" - -# import miner owner -ownerRaw=$(iptb run 0 -- go-filecoin wallet import "$FIXDIR/0.key") -# sad face, iptb makes me do all the jumps -minerOwner=$(echo $ownerRaw | sed -e 's/^node\[0\] exit 0 //' | jq -r ".") -# update the peerID to the correct value -peerID=$(iptb run 0 -- go-filecoin id | tail -n +3 | jq ".ID" -r) -iptb run 0 -- go-filecoin miner update-peerid --from="$minerOwner" --gas-price=0 --gas-limit=300 "$minerAddr" "$peerID" -# start mining -iptb run 0 -- go-filecoin mining start - -# ranges are inclusive in bash, so subtract one -J=$(($1 - 1)) - -# init all other nodes -for i in `seq 1 $J` -do - iptb init "$i" -- --genesisfile=$FIXDIR/genesis.car --auto-seal-interval-seconds 1 # autosealing every second - iptb start "$i" -done - -# connect nodes -printf "Connecting %d nodes\n" "$1" -iptb connect - -printf "Creating miners\n" - -# configure mining addresses on all the nodes -for i in `seq 1 $J` -do - # send some tokens - nodeAddr=$(iptb run "$i" -- go-filecoin wallet addrs ls | tail -n +3) - msgCidRaw=$(iptb run 0 -- go-filecoin message send --from "$minerOwner" --value 100 "$nodeAddr") - msgCid=$(echo $msgCidRaw | sed -e 's/^node\[0\] exit 0 //') - echo "Waiting for $msgCid" - iptb run "$i" -- go-filecoin message wait "$msgCid" - - # create the actual miner - newMinerAddr=$(iptb run "$i" -- go-filecoin miner create 10 | tail -n +3) - - # start mining - iptb run "$i" -- go-filecoin mining start # I don't think these guys need to mine yet, wait until the deal is processed - - # add an ask - printf "adding ask" - iptb run "$i" -- go-filecoin miner set-price --miner="$newMinerAddr" 1 100000 --gas-price=0 --gas-limit=300 # price of one FIL/whatever, ask is valid for 100000 blocks - - # make a deal - dd if=/dev/random of="$FIXDIR/fake.dat" bs="$DD_FILE_SIZE" count=1 # small data file will be autosealed - dataCidRaw=$(iptb run 0 -- go-filecoin client import "$FIXDIR/fake.dat") - rm "$FIXDIR/fake.dat" - dataCid=$(echo $dataCidRaw | sed -e 's/^node\[0\] exit 0 //') - printf "making deal" - - echo $newMinerAddr - echo $dataCid - iptb run 0 -- go-filecoin client propose-storage-deal "$newMinerAddr" "$dataCid" 1 10000 # I think this is where stuff fails right now?? -done - -printf "Complete! %d nodes connected and ready to mine >.>" "$1" diff --git a/tools/iptb-plugins/filecoin/mock/mockfilecoin.go b/tools/iptb-plugins/filecoin/mock/mockfilecoin.go deleted file mode 100644 index dce57b46f8..0000000000 --- a/tools/iptb-plugins/filecoin/mock/mockfilecoin.go +++ /dev/null @@ -1,118 +0,0 @@ -package pluginmockfilecoin - -import ( - "bytes" - "context" - "errors" - "io" - - "github.com/ipfs/iptb/testbed/interfaces" - "github.com/ipfs/iptb/util" -) - -// PluginName is the name of the plugin -var PluginName = "mockfilecoin" - -// Mockfilecoin is a mock structure used for testing things that use go-filecoin iptb plugins -type Mockfilecoin struct { - dir string - - stderr bytes.Buffer - stdout bytes.Buffer -} - -var NewNode testbedi.NewNodeFunc // nolint: golint - -func init() { - NewNode = func(dir string, attrs map[string]string) (testbedi.Core, error) { - return &Mockfilecoin{ - dir: dir, - }, nil - } -} - -// Init is not implemented -func (m *Mockfilecoin) Init(ctx context.Context, args ...string) (testbedi.Output, error) { - return nil, nil -} - -// Start is not implemented -func (m *Mockfilecoin) Start(ctx context.Context, wait bool, args ...string) (testbedi.Output, error) { - return nil, nil -} - -// Stop is not implemented -func (m *Mockfilecoin) Stop(ctx context.Context) error { - return nil -} - -// RunCmd will return "string" for args "", json for args "json", and ldjson for args "ldjson" -func (m *Mockfilecoin) RunCmd(ctx context.Context, stdin io.Reader, args ...string) (testbedi.Output, error) { - if args[0] == "" { - return iptbutil.NewOutput(args, []byte("string"), []byte{}, 0, nil), nil - } else if args[0] == "json" { - //return json object - return iptbutil.NewOutput(args, []byte(`{"key":"value"}`), []byte{}, 0, nil), nil - } else if args[0] == "ldjson" { - // return ldjson objects - return iptbutil.NewOutput(args, []byte("{\"key\":\"value1\"}\n{\"key\":\"value2\"}\n"), []byte{}, 0, nil), nil - } else if args[0] == "add-to-daemonstderr" { - for _, arg := range args[1:] { - m.stderr.WriteString(arg) - m.stderr.WriteByte('\n') - } - - return iptbutil.NewOutput(args, []byte{}, []byte{}, 0, nil), nil - } - return nil, errors.New(`invalid mock args, can only be one of: "", "json", or "ldjson"`) -} - -// Connect is not implemented -func (m *Mockfilecoin) Connect(ctx context.Context, n testbedi.Core) error { - return nil -} - -// Shell is not implemented -func (m *Mockfilecoin) Shell(ctx context.Context, ns []testbedi.Core) error { - panic("not implemented") -} - -// Dir is not implemented -func (m *Mockfilecoin) Dir() string { - return m.dir -} - -// Type is not implemented -func (m *Mockfilecoin) Type() string { - panic("not implemented") -} - -// String is not implemented -func (m *Mockfilecoin) String() string { - return "mockNode" -} - -// PeerID is not implemented -func (m *Mockfilecoin) PeerID() (string, error) { - panic("not implemented") -} - -// APIAddr is not implemented -func (m *Mockfilecoin) APIAddr() (string, error) { - panic("not implemented") -} - -// SwarmAddrs is not implemented -func (m *Mockfilecoin) SwarmAddrs() ([]string, error) { - panic("not implemented") -} - -// Config is not implemented -func (m *Mockfilecoin) Config() (interface{}, error) { - panic("not implemented") -} - -// WriteConfig is not implemented -func (m *Mockfilecoin) WriteConfig(interface{}) error { - panic("not implemented") -} diff --git a/tools/iptb-plugins/filecoin/mock/mockfilecoin_metrics.go b/tools/iptb-plugins/filecoin/mock/mockfilecoin_metrics.go deleted file mode 100644 index 4d02739a62..0000000000 --- a/tools/iptb-plugins/filecoin/mock/mockfilecoin_metrics.go +++ /dev/null @@ -1,16 +0,0 @@ -package pluginmockfilecoin - -import ( - "io" - "io/ioutil" -) - -// StderrReader returns an io.ReadCloser that represents daemon stderr -func (m *Mockfilecoin) StderrReader() (io.ReadCloser, error) { - return ioutil.NopCloser(&m.stderr), nil -} - -// StdoutReader returns an io.ReadCloser that represents daemon stdout -func (m *Mockfilecoin) StdoutReader() (io.ReadCloser, error) { - return ioutil.NopCloser(&m.stdout), nil -} diff --git a/tools/iptb-plugins/filecoin/util.go b/tools/iptb-plugins/filecoin/util.go deleted file mode 100644 index 439993cdfb..0000000000 --- a/tools/iptb-plugins/filecoin/util.go +++ /dev/null @@ -1,121 +0,0 @@ -package filecoin - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "path/filepath" - "strings" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/multiformats/go-multiaddr" - - testbedi "github.com/ipfs/iptb/testbed/interfaces" -) - -var log = logging.Logger("util") - -// WaitOnAPI waits for a nodes api to come up. -func WaitOnAPI(l testbedi.Libp2p) error { - for i := 0; i < 50; i++ { - err := tryAPICheck(l) - if err == nil { - return nil - } - log.Warn(err.Error()) - time.Sleep(time.Millisecond * 400) - } - - pcid, err := l.PeerID() - if err != nil { - return err - } - - return fmt.Errorf("node %s failed to come online in given time period", pcid) -} - -func tryAPICheck(l testbedi.Libp2p) error { - addrStr, err := l.APIAddr() - if err != nil { - return err - } - - addr, err := multiaddr.NewMultiaddr(addrStr) - if err != nil { - return err - } - - //TODO(tperson) ipv6 - ip, err := addr.ValueForProtocol(multiaddr.P_IP4) - if err != nil { - return err - } - pt, err := addr.ValueForProtocol(multiaddr.P_TCP) - if err != nil { - return err - } - - resp, err := http.Get(fmt.Sprintf("http://%s:%s/api/id", ip, pt)) - if err != nil { - return err - } - - out := make(map[string]interface{}) - err = json.NewDecoder(resp.Body).Decode(&out) - if err != nil { - return fmt.Errorf("liveness check failed: %s", err) - } - - id, ok := out["ID"] - if !ok { - return fmt.Errorf("liveness check failed: ID field not present in output") - } - - pcid, err := l.PeerID() - if err != nil { - return err - } - - idstr, ok := id.(string) - if !ok { - return fmt.Errorf("liveness check failed: ID field is unexpected type") - } - - if idstr != pcid { - return fmt.Errorf("liveness check failed: unexpected peer at endpoint") - } - - return nil -} - -// GetAPIAddrFromRepo reads the api address from the `api` file in a nodes repo. -func GetAPIAddrFromRepo(dir string) (multiaddr.Multiaddr, error) { - addrStr, err := ioutil.ReadFile(filepath.Join(dir, "api")) - if err != nil { - return nil, err - } - - maddr, err := multiaddr.NewMultiaddr(string(addrStr)) - if err != nil { - return nil, err - } - - return maddr, nil -} - -// UpdateOrAppendEnv will look through an array of strings for the environment key -// updating if it is found, or appending to the end if not. -func UpdateOrAppendEnv(envs []string, key, value string) []string { - entry := fmt.Sprintf("%s=%s", key, value) - - for i, e := range envs { - if strings.HasPrefix(e, key+"=") { - envs[i] = entry - return envs - } - } - - return append(envs, entry) -} diff --git a/tools/migration/README.md b/tools/migration/README.md deleted file mode 100644 index 1c8b2c4bc6..0000000000 --- a/tools/migration/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# IMPORTANT - -**DO NOT REGENERATE** test fixtures associated with a migration, using go-filecoin code that is later than the intended "oldVersion" associated with the migration. - -This will not only invalidate the tests, but users will be unable to run migrations on -their repo, because the migration code becomes polluted with behaviors past its original -intended versions. The migration will likely become completely unable to read their repo. - -If your changes have broken migration tests, then one of the following scenarios may apply: - -1. A migration has already been created and merged for the upcoming release, and you are are making more breaking changes to the repo. In this case, you will need to: - - * write your own migration for your changes and backport the old code to the other one, -
OR - * include your changes in the other migration. -2. The migration is recent but applies to a previous release. In this case you need to write a new migration and backport the old code. -3. The previous migration version is enough versions behind that no nodes should even be running it by now. Consider invalidating the broken migration. \ No newline at end of file diff --git a/tools/migration/internal/default_migrations_provider.go b/tools/migration/internal/default_migrations_provider.go deleted file mode 100644 index e1c9f233ae..0000000000 --- a/tools/migration/internal/default_migrations_provider.go +++ /dev/null @@ -1,23 +0,0 @@ -package internal - -// DefaultMigrationsProvider provides a list of migrations available for migrating -// in production. -// To add a migration: -// 1. add a migration folder in tools/migration/migrations like repo-- -// 2. put the migration code in the folder in its own package -// 2. add the new migration package to the imports above -// 3. instantiate and append it to the list of migrations returned by DefaultMigrationsProvider -// -// See runner_test for examples. - -import ( - migration12 "github.com/filecoin-project/go-filecoin/tools/migration/migrations/repo-1-2" -) - -// DefaultMigrationsProvider is the migrations provider dependency used in production. -// You may provide a test version when needed. Please see runner_test.go for more information. -func DefaultMigrationsProvider() []Migration { - return []Migration{ - &migration12.MetadataFormatJSONtoCBOR{}, - } -} diff --git a/tools/migration/internal/logger.go b/tools/migration/internal/logger.go deleted file mode 100644 index ed57c1db87..0000000000 --- a/tools/migration/internal/logger.go +++ /dev/null @@ -1,52 +0,0 @@ -package internal - -import ( - "io" - "log" - "os" -) - -// Logger logs migration events to disk and, if initialized with verbose, -// stdout too. -type Logger struct { - closer io.WriteCloser - logger *log.Logger -} - -// NewLogger creates a new Logger. All log writes go to f, the logging file. -// If the verbose flag is true all log writes also go to stdout. -func NewLogger(wc io.WriteCloser, verbose bool) *Logger { - // by default just write to file - var w io.Writer - w = wc - if verbose { - w = io.MultiWriter(wc, os.Stdout) - } - return &Logger{ - closer: wc, - logger: log.New(w, "[Filecoin Migration] ", log.LstdFlags), - } -} - -// Error logs an error to the logging output. -func (l *Logger) Error(err error) { - if err == nil { - return - } - l.logger.Printf("ERROR: %s", err.Error()) -} - -// Print logs a string to the logging output. -func (l *Logger) Print(msg string) { - l.logger.Print(msg) -} - -// Printf logs and formats a string to the logging output. -func (l *Logger) Printf(format string, v ...interface{}) { - l.logger.Printf(format, v...) -} - -// Close closes the logfile backing the Logger. -func (l *Logger) Close() error { - return l.closer.Close() -} diff --git a/tools/migration/internal/logger_test.go b/tools/migration/internal/logger_test.go deleted file mode 100644 index a008858a91..0000000000 --- a/tools/migration/internal/logger_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package internal_test - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - . "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -func TestLoggerWritesToFile(t *testing.T) { - f, err := ioutil.TempFile("", "logfile") - require.NoError(t, err) - defer func() { - require.NoError(t, os.Remove(f.Name())) - }() - - logger := NewLogger(f, false) - logger.Print("testing print 1") - errTest := errors.New("testing error 2") - logger.Error(errTest) - - // Reopen file so we can read new writes - out, err := ioutil.ReadFile(f.Name()) - require.NoError(t, err) - outStr := string(out) - assert.Contains(t, outStr, "testing print 1") - expectedErrStr := fmt.Sprintf("ERROR: %s", errTest.Error()) - assert.Contains(t, outStr, expectedErrStr) -} - -func TestLoggerWritesToBothVerbose(t *testing.T) { - // Point os.Stdout to a temp file - fStdout, err := ioutil.TempFile("", "stdout") - require.NoError(t, err) - defer func() { - require.NoError(t, os.Remove(fStdout.Name())) - }() - old := os.Stdout - os.Stdout = fStdout - defer func() { os.Stdout = old }() - - // Create log file - fLogFile, err := ioutil.TempFile("", "logfile") - require.NoError(t, err) - defer func() { - require.NoError(t, os.Remove(fLogFile.Name())) - }() - - // Log verbosely - logger := NewLogger(fLogFile, true) - logger.Print("test line") - errTest := errors.New("test err") - logger.Error(errTest) - expectedErrStr := fmt.Sprintf("ERROR: %s", errTest.Error()) - - // Check logfile - outLogFile, err := ioutil.ReadFile(fLogFile.Name()) - require.NoError(t, err) - outLogFileStr := string(outLogFile) - assert.Contains(t, outLogFileStr, "test line") - assert.Contains(t, outLogFileStr, expectedErrStr) - - // Check stdout alias file - outStdout, err := ioutil.ReadFile(fStdout.Name()) - require.NoError(t, err) - outStdoutStr := string(outStdout) - assert.Contains(t, outStdoutStr, "test line") - assert.Contains(t, outStdoutStr, expectedErrStr) -} diff --git a/tools/migration/internal/repo_fs_helpers.go b/tools/migration/internal/repo_fs_helpers.go deleted file mode 100644 index 80de442bab..0000000000 --- a/tools/migration/internal/repo_fs_helpers.go +++ /dev/null @@ -1,88 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "os" - "time" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - rcopy "github.com/otiai10/copy" -) - -// This is a set of file system helpers for repo migration. -// -// CloneRepo and InstallRepo expect a symlink that points to the filecoin repo -// directory, typically ~/.filecoin or whatever FIL_PATH is set to. -// -// This does not touch sector data. - -// CloneRepo copies a linked repo directory to a new, writable repo directory. -// The directory created will be named with a timestamp, version number, and -// uniqueifyig tag if necessary. -// -// Params: -// linkPath: path to a symlink, which links to an actual repo directory to be cloned. -func CloneRepo(linkPath string, newVersion uint) (string, error) { - repoDirPath, err := os.Readlink(linkPath) - if err != nil { - return "", fmt.Errorf("repo path must be a symbolic link: %s", err) - } - - newDirPath, err := makeNewRepoPath(linkPath, newVersion) - if err != nil { - return "", err - } - - if err := rcopy.Copy(repoDirPath, newDirPath); err != nil { - return "", err - } - if err := os.Chmod(newDirPath, os.ModeDir|0744); err != nil { - return "", err - } - return newDirPath, nil -} - -// InstallNewRepo updates a symlink to point to a new repo directory. -func InstallNewRepo(linkPath, newRepoPath string) error { - // Check that linkPath is a symlink. - if _, err := os.Readlink(linkPath); err != nil { - return err - } - - // Check the repo exists. - if _, err := os.Stat(newRepoPath); err != nil { - return err - } - - // Replace the symlink. - if err := os.Remove(linkPath); err != nil { - return err - } - if err := os.Symlink(newRepoPath, linkPath); err != nil { - return err - } - return nil -} - -// makeNewRepoPath generates a new repo path for a migration. -// Params: -// linkPath: the actual old repo path -// version: the prospective version for the new repo -// Returns: -// a vacant path for a new repo directory -// Example output: -// /Users/davonte/.filecoin-20190806-150455-v002 -func makeNewRepoPath(linkPath string, version uint) (string, error) { - // Search for a free name - now := time.Now() - var newpath string - for i := uint(0); i < 1000; i++ { - newpath = repo.MakeRepoDirName(linkPath, now, version, i) - if _, err := os.Stat(newpath); os.IsNotExist(err) { - return newpath, nil - } - } - // this should never happen, but just in case. - return "", errors.New("couldn't find a free dirname for cloning") -} diff --git a/tools/migration/internal/repo_fs_helpers_test.go b/tools/migration/internal/repo_fs_helpers_test.go deleted file mode 100644 index 15eede533e..0000000000 --- a/tools/migration/internal/repo_fs_helpers_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package internal_test - -import ( - "os" - "regexp" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - . "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -func TestRepoMigrationHelper_CloneRepo(t *testing.T) { - tf.UnitTest(t) - - t.Run("Creates the dir with the right permissions", func(t *testing.T) { - oldRepo := repo.RequireMakeTempDir(t, "") - defer repo.RequireRemoveAll(t, oldRepo) - - linkedRepoPath := oldRepo + "something" - require.NoError(t, os.Symlink(oldRepo, oldRepo+"something")) - defer repo.RequireRemoveAll(t, linkedRepoPath) - - newRepoPath, err := CloneRepo(linkedRepoPath, 42) - require.NoError(t, err) - defer repo.RequireRemoveAll(t, newRepoPath) - - stat, err := os.Stat(newRepoPath) - require.NoError(t, err) - expectedPerms := "drwxr--r--" - assert.Equal(t, expectedPerms, stat.Mode().String()) - }) - - t.Run("fails if the old repo does not point to a symbolic link", func(t *testing.T) { - oldRepo := repo.RequireMakeTempDir(t, "") - defer repo.RequireRemoveAll(t, oldRepo) - - result, err := CloneRepo(oldRepo, 42) - assert.Error(t, err, "old-repo must be a symbolic link.") - assert.Equal(t, "", result) - - linkedRepoPath := oldRepo + "something" - require.NoError(t, os.Symlink(oldRepo, oldRepo+"something")) - defer repo.RequireRemoveAll(t, linkedRepoPath) - - result, err = CloneRepo(linkedRepoPath, 42) - assert.NoError(t, err) - assert.NotEqual(t, "", result) - }) - - t.Run("Increments the int on the end until a free filename is found", func(t *testing.T) { - oldRepo := repo.RequireMakeTempDir(t, "") - defer repo.RequireRemoveAll(t, oldRepo) - - linkedRepoPath := oldRepo + "something" - require.NoError(t, os.Symlink(oldRepo, oldRepo+"something")) - defer repo.RequireRemoveAll(t, linkedRepoPath) - - // Call CloneRepo several times and ensure that the filename end - // is incremented, since these calls will happen in <1s. - var paths []string - var endRegex string - for i := 0; i < 10; i++ { - result, err := CloneRepo(linkedRepoPath, 42) - require.NoError(t, err) - - // Presence of the trailing uniqueifier depends on whether the timestamp ticks over - // a 1-second boundary. - endRegex = "[0-9]{8}-[0-9]{6}-v042(-\\d+)?$" - regx, err := regexp.Compile(endRegex) - assert.NoError(t, err) - assert.Regexp(t, regx, result) - - if len(paths) > 0 { - assert.NotEqual(t, paths[len(paths)-1], result) - } - paths = append(paths, result) - } - for _, dir := range paths { - repo.RequireRemoveAll(t, dir) - } - }) -} - -func TestRepoFSHelpers_InstallNewRepo(t *testing.T) { - tf.UnitTest(t) - - t.Run("swaps out the symlink", func(t *testing.T) { - container, repoLink := RequireInitRepo(t, 0) - defer repo.RequireRemoveAll(t, container) - - oldRepoPath := repo.RequireReadLink(t, repoLink) - newRepoPath, err := CloneRepo(repoLink, 42) - require.NoError(t, err) - - require.NoError(t, InstallNewRepo(repoLink, newRepoPath)) - AssertNewRepoInstalled(t, newRepoPath, oldRepoPath, repoLink) - }) - - t.Run("returns error and leaves symlink if new repo does not exist", func(t *testing.T) { - container, repoLink := RequireInitRepo(t, 0) - defer repo.RequireRemoveAll(t, container) - - oldRepoPath := repo.RequireReadLink(t, repoLink) - err := InstallNewRepo(repoLink, "/tmp/nonexistentfile") - assert.EqualError(t, err, "stat /tmp/nonexistentfile: no such file or directory") - - assert.NotEqual(t, "/tmp/nonexistentfile", repo.RequireReadLink(t, repoLink)) - assert.Equal(t, oldRepoPath, repo.RequireReadLink(t, repoLink)) - }) -} diff --git a/tools/migration/internal/runner.go b/tools/migration/internal/runner.go deleted file mode 100644 index a8620dafb3..0000000000 --- a/tools/migration/internal/runner.go +++ /dev/null @@ -1,262 +0,0 @@ -package internal - -import ( - "fmt" - "strconv" - - "github.com/mitchellh/go-homedir" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -// Migration is the interface to all repo migration versions. -type Migration interface { - // Describe returns a list of steps, as a formatted string, that a given Migration will take. - // These should correspond to named functions in the given Migration. - Describe() string - - // Migrate performs all migration steps for the Migration that implements the interface. - // Migrate expects newRepo to be: - // a directory - // read/writeable by this process, - // contain a copy of the old repo. - Migrate(newRepoPath string) error - - // Versions returns valid from and to migration versions for this migration. - Versions() (from, to uint) - - // Validate performs validation operations, using oldRepo for comparison. - // Validation requirements will be different for every migration. - // Validate expects newRepo to be - // a directory - // readable by this process - // already migrated - // It expects oldRepo to be - // a directory - // read-only by this process - // A successful validation returns nil. - Validate(oldRepoPath, newRepoPath string) error -} - -// MigrationRunner represents a migration command -type MigrationRunner struct { - // logger logs to stdout/err and a logfile. - logger *Logger - - // command is the migration command to run, passed from the CLI - command string - - // oldRepoOpt is the value of --old-repo passed from the CLI, - // expanded homedir where needed - oldRepoOpt string - - // newRepoPath is where the to-be-migrated/migrated repo is located, - // expanded homedir where needed - newRepoPath string - - // MigrationsProvider is a dependency for fetching available migrations - // to allow unit tests to supply test migrations without creating test fixtures. - MigrationsProvider func() []Migration -} - -// NewMigrationRunner builds a MigrationRunner for the given command and repo options -// Returns an error if homepath expansion fails for oldRepoOpt or newRepoOpt -func NewMigrationRunner(logger *Logger, command, oldRepoOpt, newRepoOpt string) (*MigrationRunner, error) { - oldPath, err := homedir.Expand(oldRepoOpt) - if err != nil { - return nil, err - } - newPath, err := homedir.Expand(newRepoOpt) - if err != nil { - return nil, err - } - return &MigrationRunner{ - logger: logger, - command: command, - oldRepoOpt: oldPath, - newRepoPath: newPath, - MigrationsProvider: DefaultMigrationsProvider, - }, nil -} - -// RunResult stores the needed results of calling Run() -type RunResult struct { - // full path to new repo (migrated or not). - // This is blank unless repo was cloned -- if it errors out too early or for "describe" - NewRepoPath string - - // Old version and new version. If no applicable migration is found, these will be equal, - // and if errors early they will = 0. - OldVersion, NewVersion uint - - // Any errors encountered by Run - Err error -} - -// Run executes the MigrationRunner -func (m *MigrationRunner) Run() RunResult { - repoVersion, err := m.repoVersion(m.oldRepoOpt) - if err != nil { - return RunResult{Err: err} - } - - targetVersion := m.getTargetMigrationVersion() - if repoVersion == targetVersion { - m.logger.Printf("Repo up-to-date: binary version %d = repo version %d", repoVersion, m.getTargetMigrationVersion()) - return RunResult{OldVersion: repoVersion, NewVersion: repoVersion} - } - - var mig Migration - if mig, err = m.findMigration(repoVersion); err != nil { - return RunResult{ - Err: errors.Wrapf(err, "migration check failed"), - OldVersion: repoVersion, - NewVersion: repoVersion, - } - } - // We just didn't find a migration that applies. This is fine. - if mig == nil { - return RunResult{ - OldVersion: repoVersion, - NewVersion: repoVersion, - } - } - err = m.runCommand(mig) - return RunResult{ - Err: err, - OldVersion: repoVersion, - NewVersion: targetVersion, - NewRepoPath: m.newRepoPath, - } -} - -// runCommand runs the migration command set in the Migration runner. -func (m *MigrationRunner) runCommand(mig Migration) error { - var err error - - from, to := mig.Versions() - - switch m.command { - case "describe": - // Describe is not expected to be run by a script, but by a human, so - // ignore the logger & print to stdout. - fmt.Printf("\n Migration from %d to %d:", from, to) - fmt.Println(mig.Describe()) - case "migrate": - if m.newRepoPath, err = CloneRepo(m.oldRepoOpt, to); err != nil { - return errors.Wrap(err, "clone repo failed") - } - m.logger.Printf("new repo will be at %s", m.newRepoPath) - - if err = mig.Migrate(m.newRepoPath); err != nil { - return errors.Wrap(err, "migration failed") - } - if err = m.validateAndUpdateVersion(to, m.newRepoPath, mig); err != nil { - return errors.Wrap(err, "validation failed") - } - if err = InstallNewRepo(m.oldRepoOpt, m.newRepoPath); err != nil { - return errors.Wrap(err, "installation failed") - } - case "buildonly": - if m.newRepoPath, err = CloneRepo(m.oldRepoOpt, to); err != nil { - return err - } - m.logger.Printf("new repo will be at %s", m.newRepoPath) - - if err = mig.Migrate(m.newRepoPath); err != nil { - return errors.Wrap(err, "migration failed") - } - if err = m.validateAndUpdateVersion(to, m.newRepoPath, mig); err != nil { - return errors.Wrap(err, "validation failed") - } - case "install": - if m.newRepoPath == "" { - return errors.New("installation failed: new repo path not specified") - } - - repoVer, err := repo.ReadVersion(m.newRepoPath) - if err != nil { - return errors.Wrap(err, "installation failed") - } - // quick check of version to help prevent install after a failed migration - newVer := strconv.FormatUint(uint64(to), 10) // to forestall parsing errors of version - if repoVer != newVer { - return fmt.Errorf("installation failed: repo has version %s, expected version %s", repoVer, newVer) - } - - if err = InstallNewRepo(m.oldRepoOpt, m.newRepoPath); err != nil { - return errors.Wrap(err, "installation failed") - } - } - return nil -} - -// repoVersion opens the version file for the given version, -// gets the version and validates it -func (m *MigrationRunner) repoVersion(repoPath string) (uint, error) { - strVersion, err := repo.ReadVersion(repoPath) - if err != nil { - return 0, err - } - - version, err := strconv.Atoi(strVersion) - if err != nil { - return 0, errors.Wrap(err, "repo version is corrupt") - } - - if version < 0 || version > 10000 { - return 0, errors.Errorf("repo version out of range: %s", strVersion) - } - - return uint(version), nil -} - -// validateAndUpdateVersion calls the migration's validate function and then bumps -// the version number in the new repo. -func (m *MigrationRunner) validateAndUpdateVersion(toVersion uint, newRepoPath string, mig Migration) error { - if err := mig.Validate(m.oldRepoOpt, newRepoPath); err != nil { - return err - } - return repo.WriteVersion(newRepoPath, toVersion) -} - -// getTargetMigrationVersion returns the maximum resulting version of any migration available from the provider. -func (m *MigrationRunner) getTargetMigrationVersion() uint { - targetVersion := uint(0) - migrations := m.MigrationsProvider() - for _, mig := range migrations { - _, to := mig.Versions() - if to > targetVersion { - targetVersion = to - } - } - return targetVersion -} - -// findMigration finds the list of migrations in the MigrationsProvder that is valid for -// upgrading current repoVersion -// returns: -// nil + error if >1 valid migration is found, or -// the migration to run + nil error -func (m *MigrationRunner) findMigration(repoVersion uint) (mig Migration, err error) { - var applicableMigs []Migration - for _, mig := range m.MigrationsProvider() { - from, to := mig.Versions() - if to != from+1 { - m.logger.Printf("refusing multi-version migration from %d to %d", from, to) - } else if from == repoVersion { - applicableMigs = append(applicableMigs, mig) - } else { - m.logger.Printf("skipping migration from %d to %d", from, to) - } - } - if len(applicableMigs) == 0 { - m.logger.Printf("did not find valid repo migration for version %d", repoVersion) - return nil, nil - } - if len(applicableMigs) > 1 { - return nil, fmt.Errorf("found >1 migration for version %d; cannot proceed", repoVersion) - } - return applicableMigs[0], nil -} diff --git a/tools/migration/internal/runner_buildonly_test.go b/tools/migration/internal/runner_buildonly_test.go deleted file mode 100644 index 157178d003..0000000000 --- a/tools/migration/internal/runner_buildonly_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package internal_test - -import ( - "os" - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - . "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -func TestMigrationRunner_RunBuildonly(t *testing.T) { - tf.UnitTest(t) - - container, repoSymLink := RequireInitRepo(t, 0) - oldRepoPath := repo.RequireReadLink(t, repoSymLink) - defer repo.RequireRemoveAll(t, container) - - t.Run("clones repo, updates version, does not install", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "buildonly", repoSymLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - runResult := runner.Run() - assert.NoError(t, runResult.Err) - - stat, err := os.Stat(runResult.NewRepoPath) - require.NoError(t, err) - assert.True(t, stat.IsDir()) - - AssertBumpedVersion(t, runResult.NewRepoPath, repoSymLink, 0) - assert.Equal(t, uint(1), runResult.NewVersion) - AssertLinked(t, oldRepoPath, repoSymLink) - }) -} diff --git a/tools/migration/internal/runner_install_test.go b/tools/migration/internal/runner_install_test.go deleted file mode 100644 index c84bcb918e..0000000000 --- a/tools/migration/internal/runner_install_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package internal_test - -import ( - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - . "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -func TestMigrationRunner_RunInstall(t *testing.T) { - tf.IntegrationTest(t) - - t.Run("swaps out symlink", func(t *testing.T) { - container, repoSymlink := RequireInitRepo(t, 0) - defer repo.RequireRemoveAll(t, container) - - oldPath := repo.RequireReadLink(t, repoSymlink) - newPath, err := CloneRepo(repoSymlink, 1) - require.NoError(t, err) - require.NoError(t, repo.WriteVersion(newPath, 1)) // Fake migration - - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - - runner, err := NewMigrationRunner(logger, "install", repoSymlink, newPath) - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - runResult := runner.Run() - assert.NoError(t, runResult.Err) - AssertNewRepoInstalled(t, runResult.NewRepoPath, oldPath, repoSymlink) - }) - - t.Run("returns error if new-repo option is not given", func(t *testing.T) { - container, repoSymlink := RequireInitRepo(t, 0) - defer repo.RequireRemoveAll(t, container) - - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - - runner, err := NewMigrationRunner(logger, "install", repoSymlink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - runResult := runner.Run() - assert.EqualError(t, runResult.Err, "installation failed: new repo path not specified") - }) - - t.Run("returns error if new-repo is not found, and does not remove symlink", func(t *testing.T) { - container, repoSymlink := RequireInitRepo(t, 0) - defer repo.RequireRemoveAll(t, container) - oldPath := repo.RequireReadLink(t, repoSymlink) - - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "install", repoSymlink, "/tmp/nonexistent") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - runResult := runner.Run() - - assert.EqualError(t, runResult.Err, "installation failed: open /tmp/nonexistent/version: no such file or directory") - AssertLinked(t, oldPath, repoSymlink) - }) - - t.Run("returns error if new repo does not have expected version", func(t *testing.T) { - container, repoSymlink := RequireInitRepo(t, 0) - defer repo.RequireRemoveAll(t, container) - oldPath := repo.RequireReadLink(t, repoSymlink) - - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - - newPath, err := CloneRepo(repoSymlink, 1) - require.NoError(t, err) - // Fail to actually update the version file - - runner, err := NewMigrationRunner(logger, "install", repoSymlink, newPath) - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - runResult := runner.Run() - assert.EqualError(t, runResult.Err, "installation failed: repo has version 0, expected version 1") - AssertLinked(t, oldPath, repoSymlink) - }) -} diff --git a/tools/migration/internal/runner_migrate_test.go b/tools/migration/internal/runner_migrate_test.go deleted file mode 100644 index d95ceba68a..0000000000 --- a/tools/migration/internal/runner_migrate_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package internal_test - -import ( - "testing" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - . "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -func TestMigrationRunner_RunMigrate(t *testing.T) { - tf.UnitTest(t) - - container, repoSymLink := RequireInitRepo(t, 0) - oldRepoPath := repo.RequireReadLink(t, repoSymLink) - defer repo.RequireRemoveAll(t, container) - - t.Run("returns error when migration step fails", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "migrate", repoSymLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderMigrationFails - runResult := runner.Run() - - assert.EqualError(t, runResult.Err, "migration failed: migration has failed") - }) - - t.Run("returns error when validation step fails", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "migrate", repoSymLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderValidationFails - runResult := runner.Run() - assert.EqualError(t, runResult.Err, "validation failed: validation has failed") - }) - - t.Run("on success bumps version and installs new repo at symlink", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "migrate", repoSymLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - runResult := runner.Run() - assert.NoError(t, runResult.Err) - AssertBumpedVersion(t, runResult.NewRepoPath, oldRepoPath, 0) - AssertNewRepoInstalled(t, runResult.NewRepoPath, oldRepoPath, repoSymLink) - }) -} diff --git a/tools/migration/internal/runner_test.go b/tools/migration/internal/runner_test.go deleted file mode 100644 index 9ae5fab8ef..0000000000 --- a/tools/migration/internal/runner_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package internal_test - -import ( - "errors" - "io/ioutil" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - . "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -func TestMigrationRunner_Run(t *testing.T) { - tf.UnitTest(t) - - container, repoLink := RequireInitRepo(t, 0) - oldRepoPath := repo.RequireReadLink(t, repoLink) - defer repo.RequireRemoveAll(t, container) - - t.Run("valid command returns error if repo not found", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", "/home/filecoin-symlink", "doesnt/matter") - require.NoError(t, err) - assert.Error(t, runner.Run().Err, "no filecoin repo found in /home/filecoin-symlink.") - }) - - t.Run("can set MigrationsProvider", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - migrations := runner.MigrationsProvider() - assert.NotEmpty(t, migrations) - assert.NoError(t, runner.Run().Err) - }) - - t.Run("Does not run the migration if the repo is already up to date", func(t *testing.T) { - require.NoError(t, repo.WriteVersion(oldRepoPath, 1)) - - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - assert.NoError(t, runner.Run().Err) - AssertLogged(t, dummyLogFile, "Repo up-to-date: binary version 1 = repo version 1") - }) - - t.Run("Returns error when a valid migration is not found", func(t *testing.T) { - require.NoError(t, repo.WriteVersion(oldRepoPath, 199)) - - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - assert.NoError(t, runner.Run().Err) - - out, err := ioutil.ReadFile(dummyLogPath) - require.NoError(t, err) - assert.Contains(t, string(out), "skipping migration from 0 to 1") - assert.Contains(t, string(out), "did not find valid repo migration for version 199") - }) - - t.Run("Returns error when repo version is invalid", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - require.NoError(t, ioutil.WriteFile(filepath.Join(oldRepoPath, "version"), []byte("-1"), 0644)) - assert.EqualError(t, runner.Run().Err, "repo version out of range: -1") - - require.NoError(t, repo.WriteVersion(oldRepoPath, 32767)) - assert.EqualError(t, runner.Run().Err, "repo version out of range: 32767") - }) - - t.Run("Returns error if version file does not contain an integer string", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - runner.MigrationsProvider = testProviderPasses - - require.NoError(t, ioutil.WriteFile(filepath.Join(oldRepoPath, "version"), []byte("foo"), 0644)) - assert.EqualError(t, runner.Run().Err, "repo version is corrupt: strconv.Atoi: parsing \"foo\": invalid syntax") - }) - - t.Run("describe does not clone repo", func(t *testing.T) { - require.NoError(t, repo.WriteVersion(repoLink, 0)) - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - - runner.MigrationsProvider = testProviderPasses - - runResult := runner.Run() - require.NoError(t, runResult.Err) - assert.Equal(t, "", runResult.NewRepoPath) - }) - - t.Run("run fails if there is more than 1 applicable migration", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - - runner.MigrationsProvider = func() []Migration { - return []Migration{ - &TestMigDoesNothing, - &TestMigDoesNothing, - } - } - assert.EqualError(t, runner.Run().Err, "migration check failed: found >1 migration for version 0; cannot proceed") - }) - - t.Run("run skips multiversion", func(t *testing.T) { - dummyLogFile, dummyLogPath := repo.RequireOpenTempFile(t, "logfile") - defer repo.RequireRemoveAll(t, dummyLogPath) - logger := NewLogger(dummyLogFile, false) - runner, err := NewMigrationRunner(logger, "describe", repoLink, "") - require.NoError(t, err) - - runner.MigrationsProvider = func() []Migration { - return []Migration{ - &TestMigDoesNothing, - &TestMigMultiversion, - } - } - assert.NoError(t, runner.Run().Err) - - out, err := ioutil.ReadFile(dummyLogPath) - require.NoError(t, err) - assert.Contains(t, string(out), "refusing multi-version migration from 1 to 3") - }) - - t.Run("newRepoOpt is ignored for commands other than install", func(t *testing.T) { - }) -} - -func testProviderPasses() []Migration { - return []Migration{&TestMigDoesNothing} -} - -func testProviderValidationFails() []Migration { - return []Migration{&TestMigFailsValidation} -} - -func testProviderMigrationFails() []Migration { - return []Migration{&TestMigFailsMigration} -} - -type TestMigration struct { - describeFunc func() string - migrateFunc func(string) error - versionsFunc func() (uint, uint) - validateFunc func(string, string) error -} - -func (m *TestMigration) Describe() string { - return m.describeFunc() -} - -func (m *TestMigration) Migrate(newRepoPath string) error { - return m.migrateFunc(newRepoPath) -} -func (m *TestMigration) Versions() (from, to uint) { - return m.versionsFunc() -} - -func (m *TestMigration) Validate(oldRepoPath, newRepoPath string) error { - return m.validateFunc(oldRepoPath, newRepoPath) -} - -var TestMigFailsValidation = TestMigration{ - describeFunc: func() string { return "migration fails validation step" }, - versionsFunc: func() (uint, uint) { return 0, 1 }, - migrateFunc: func(string) error { return nil }, - validateFunc: func(string, string) error { return errors.New("validation has failed") }, -} - -var TestMigDoesNothing = TestMigration{ - describeFunc: func() string { return "the migration that doesn't do anything" }, - versionsFunc: func() (uint, uint) { return 0, 1 }, - migrateFunc: func(string) error { return nil }, - validateFunc: func(string, string) error { return nil }, -} - -var TestMigFailsMigration = TestMigration{ - describeFunc: func() string { return "migration fails migration step" }, - versionsFunc: func() (uint, uint) { return 0, 1 }, - migrateFunc: func(string) error { return errors.New("migration has failed") }, - validateFunc: func(string, string) error { return nil }, -} - -var TestMigMultiversion = TestMigration{ - describeFunc: func() string { return "the migration that skips a version" }, - versionsFunc: func() (uint, uint) { return 1, 3 }, - migrateFunc: func(string) error { return nil }, - validateFunc: func(string, string) error { return nil }, -} diff --git a/tools/migration/internal/test_helpers.go b/tools/migration/internal/test_helpers.go deleted file mode 100644 index 095ac6653b..0000000000 --- a/tools/migration/internal/test_helpers.go +++ /dev/null @@ -1,69 +0,0 @@ -package internal - -import ( - "io/ioutil" - "os" - "path" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-filecoin/internal/pkg/config" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -// AssertLogged asserts that a given string is contained in the given log file. -func AssertLogged(t *testing.T, logFile *os.File, subStr string) { - out, err := ioutil.ReadFile(logFile.Name()) - require.NoError(t, err) - outStr := string(out) - assert.Contains(t, outStr, subStr) -} - -// RequireInitRepo establishes a new repo symlink and directory inside a temporary container -// directory. Migrations of the repo are expected to be placed within the same container, such -// that a test can clean up arbitrary migrations by removing the container. -// Returns the path to the container directory, and the repo symlink inside it. -func RequireInitRepo(t *testing.T, version uint) (container, repoLink string) { - container = repo.RequireMakeTempDir(t, "migration-test") - repoLink = path.Join(container, "repo") - err := repo.InitFSRepo(repoLink, version, config.NewDefaultConfig()) - require.NoError(t, err) - return -} - -// AssertLinked verifies that repoLink points to oldRepoDir -func AssertLinked(t *testing.T, repoDir, repoLink string) { - newRepoTarget, err := os.Readlink(repoLink) - require.NoError(t, err) - assert.Equal(t, newRepoTarget, repoDir) -} - -// AssertNewRepoInstalled verifies that the repoLink points to newRepoDir, and that -// oldRepoDir is still there -func AssertNewRepoInstalled(t *testing.T, newRepoDir, oldRepoDir, repoLink string) { - linkTarget, err := os.Readlink(repoLink) - require.NoError(t, err) - assert.Equal(t, newRepoDir, linkTarget) - oldRepoStat, err := os.Stat(oldRepoDir) - require.NoError(t, err) - assert.True(t, oldRepoStat.IsDir()) -} - -// AssertRepoVersion verifies that the version in repoPath is equal to versionStr -func AssertRepoVersion(t *testing.T, versionStr, repoPath string) { - repoVersion, err := repo.ReadVersion(repoPath) - require.NoError(t, err) - assert.Equal(t, repoVersion, versionStr) -} - -// AssertBumpedVersion checks that the version oldRepoDir is as expected, -// that the version in newRepoDir is updated by 1 -func AssertBumpedVersion(t *testing.T, newRepoDir, oldRepoDir string, oldVersion uint64) { - oldVersionStr := strconv.FormatUint(oldVersion, 10) - AssertRepoVersion(t, oldVersionStr, oldRepoDir) - newVersionStr := strconv.FormatUint(oldVersion+1, 10) - AssertRepoVersion(t, newVersionStr, newRepoDir) -} diff --git a/tools/migration/main.go b/tools/migration/main.go deleted file mode 100644 index 8bd2d134a4..0000000000 --- a/tools/migration/main.go +++ /dev/null @@ -1,209 +0,0 @@ -package main - -import ( - "fmt" - "log" - "os" - "strings" - - "github.com/mitchellh/go-homedir" - - "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -const defaultLogFilePath = "~/.filecoin-migration-logs" - -// USAGE is the usage documentation for the migration tool -const USAGE = ` -USAGE - go-filecoin-migrate -h|--help - go-filecoin-migrate (describe|buildonly|migrate) --old-repo= [-h|--help] [-v|--verbose] - go-filecoin-migrate install --old-repo= --new-repo= [-v|--verbose] - -COMMANDS - describe prints a description of what the current migration will do - - buildonly runs the migration and validations, but does not install the newly migrated - repo at the --old-repo symlink - - migrate runs migration, validations, and installs newly migrated repo at - --old-repo symlink - - install installs a newly migrated repo - -REQUIRED ARGUMENTS - --old-repo the symlink location of this node's filecoin home directory. This is required - even for the 'describe' command, as its repo version helps determine which migration - to run. This must be a symbolic link or migration will not proceed. - - --new-repo the location of a newly migrated repo. This is required only for the - install command and otherwise ignored. - -OPTIONS - -h, --help This message - -v --verbose Print diagnostic messages to stdout - --log-file The path of the file for writing detailed log output - -EXAMPLES - for a migration from version 1 to 2: - go-filecoin-migrate migrate --old-repo=~/.filecoin - Migrates then installs the repo. Migrated repo will be in ~/.filecoin_1_2_ - and symlinked to ~/.filecoin - - go-filecoin-migrate migrate --old-repo=/opt/filecoin - Migrates then installs the repo. Migrated repo will be in /opt/filecoin_1_2_ - and symlinked to /opt/filecoin - - go-filecoin-migrate build-only --old-repo=/opt/filecoin - Runs migration steps only. Migrated repo will be in /opt/filecoin_1_2_ - and symlinked to /opt/filecoin - - go-filecoin-migrate install --old-repo=/opt/filecoin --new-repo=/opt/filecoin-123445566860 --verbose - swaps out the link at /opt/filecoin to point to /opt/filecoin-123445566860, as long as - /opt/filecoin is a symlink and /opt/filecoin-123445566860 has an up-to-date version. -` - -func main() { // nolint: deadcode - if len(os.Args) < 2 { - showUsageAndExit(1) - } - - command := os.Args[1] - - switch command { - case "-h", "--help": - showUsageAndExit(0) - case "describe": - logger, err := newLoggerWithVerbose(true) - if err != nil { - exitErr(err.Error()) - } - - oldRepoOpt := findOldRepoOrExit(logger) - - // Errors are handled inside runRunner - _ = runRunner(logger, command, oldRepoOpt, "") - - case "buildonly", "migrate", "install": - logger, err := newLoggerWithVerbose(getVerbose()) - if err != nil { - exitErr(err.Error()) - } - - oldRepoOpt := findOldRepoOrExit(logger) - - var newRepoOpt string - var found bool - - if command == "install" { - newRepoOpt, found = findOpt("new-repo", os.Args) - if !found { - exitErrCloseLogger(fmt.Sprintf("--new-repo is required for 'install'\n%s\n", USAGE), logger) - } - } - - runResult := runRunner(logger, command, oldRepoOpt, newRepoOpt) - if runResult.NewRepoPath != "" { - logger.Printf("New repo location: %s", runResult.NewRepoPath) - } - if runResult.NewVersion != runResult.OldVersion { - logger.Printf("Repo has been migrated to version %d", runResult.NewVersion) - } - err = logger.Close() - if err != nil { - exitErr(err.Error()) - } - default: - exitErr(fmt.Sprintf("invalid command: %s\n%s\n", command, USAGE)) - } -} - -func runRunner(logger *internal.Logger, command string, oldRepoOpt string, newRepoOpt string) internal.RunResult { - runner, err := internal.NewMigrationRunner(logger, command, oldRepoOpt, newRepoOpt) - if err != nil { - exitErrCloseLogger(err.Error(), logger) - } - runResult := runner.Run() - if runResult.Err != nil { - exitErrCloseLogger(runResult.Err.Error(), logger) - } - return runResult -} - -func findOldRepoOrExit(logger *internal.Logger) string { - oldRepoOpt, found := findOpt("old-repo", os.Args) - if !found { - exitErrCloseLogger(fmt.Sprintf("--old-repo is required\n%s\n", USAGE), logger) - } - return oldRepoOpt -} - -// exitError exit(1)s the executable with the given error String -func exitErr(errstr string) { - log.New(os.Stderr, "", 0).Println("Error: " + errstr) - os.Exit(1) -} - -// exitErrorCloseLogger closes the logger and calls exitError -func exitErrCloseLogger(errstr string, logger *internal.Logger) { - err := logger.Close() - if err != nil { - errstr = fmt.Sprintf("%s. Error closing logfile when reporting this error %s", errstr, err.Error()) - } - exitErr(errstr) -} - -// showUsageAndExit prints out USAGE and exits with the given code. -func showUsageAndExit(code int) { - fmt.Print(USAGE) - os.Exit(code) -} - -// getVerbose parses os.Args looking for -v or --verbose. -// returns whether it was found. -func getVerbose() bool { - if _, found := findOpt("-v", os.Args); found { - return true - } - _, res := findOpt("--verbose", os.Args) - return res -} - -// newLoggerWithVerbose opens a new logger & logfile with verboseness set to `verb` -func newLoggerWithVerbose(verb bool) (*internal.Logger, error) { - path, err := getLogFilePath() - if err != nil { - return nil, err - } - - logFile, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - return internal.NewLogger(logFile, verb), nil -} - -// getLogFilePath returns the path of the logfile. -func getLogFilePath() (string, error) { - if logPath, found := findOpt("--log-file", os.Args); found { - return logPath, nil - } - - return homedir.Expand(defaultLogFilePath) -} - -// findOpt fetches option values. -// returns: string: value of option set with "=". If not set, returns "" -// bool: true if option was found, false if not -func findOpt(str string, args []string) (string, bool) { - for _, elem := range args { - if strings.Contains(elem, str) { - opt := strings.Split(elem, "=") - if len(opt) > 1 { - return opt[1], true - } - return "", true - } - } - return "", false -} diff --git a/tools/migration/main_test.go b/tools/migration/main_test.go deleted file mode 100644 index 1db7040de9..0000000000 --- a/tools/migration/main_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package main_test - -import ( - "os" - "os/exec" - "path/filepath" - "testing" - - "github.com/filecoin-project/go-filecoin/build/project" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - "github.com/filecoin-project/go-filecoin/tools/migration/internal" -) - -func TestUsage(t *testing.T) { - tf.IntegrationTest(t) // because we're using exec.Command - command := requireGetMigrationBinary(t) - usage := `go-filecoin-migrate (describe|buildonly|migrate) --old-repo= [-h|--help] [-v|--verbose]` - - t.Run("bare invocation prints usage but exits with 1", func(t *testing.T) { - out, err := exec.Command(command).CombinedOutput() - assert.Contains(t, string(out), usage) - assert.Error(t, err) - }) - - t.Run("-h prints usage", func(t *testing.T) { - out, err := exec.Command(command, "-h").CombinedOutput() - assert.Contains(t, string(out), usage) - assert.NoError(t, err) - }) - - t.Run("--help prints usage", func(t *testing.T) { - out, err := exec.Command(command, "--help").CombinedOutput() - assert.Contains(t, string(out), usage) - assert.NoError(t, err) - }) -} - -func TestOptions(t *testing.T) { - tf.IntegrationTest(t) // because we're using exec.Command - command := requireGetMigrationBinary(t) - usage := `go-filecoin-migrate (describe|buildonly|migrate) --old-repo= [-h|--help] [-v|--verbose]` - - t.Run("error when calling with invalid command", func(t *testing.T) { - out, err := exec.Command(command, "foo", "--old-repo=something").CombinedOutput() - assert.Contains(t, string(out), "Error: invalid command: foo") - assert.Contains(t, string(out), usage) - assert.Error(t, err) - }) - - t.Run("accepts --verbose or -v with valid command", func(t *testing.T) { - repoDir, symlink := internal.RequireInitRepo(t, 1) - defer repo.RequireRemoveAll(t, repoDir) - defer repo.RequireRemoveAll(t, symlink) - - expected := "MetadataFormatJSONtoCBOR migrates the storage repo from version 1 to 2." - - out, err := exec.Command(command, "describe", "--old-repo="+symlink, "--verbose").CombinedOutput() - assert.NoError(t, err) - assert.Contains(t, string(out), expected) - - _, err = exec.Command(command, "describe", "--old-repo="+symlink, "-v").CombinedOutput() - assert.NoError(t, err) - assert.Contains(t, string(out), expected) - }) - - t.Run("requires --old-repo argument", func(t *testing.T) { - out, err := exec.Command(command, "describe").CombinedOutput() - expected := "Error: --old-repo is required" - assert.Error(t, err) - assert.Contains(t, string(out), expected) // should include describe output when implemented - assert.Contains(t, string(out), usage) - }) -} - -func requireGetMigrationBinary(t *testing.T) string { - root := project.Root() - - bin := filepath.Join(root, "tools/migration/go-filecoin-migrate") - _, err := os.Stat(bin) - require.NoError(t, err) - - return bin -} diff --git a/tools/migration/main_whitebox_test.go b/tools/migration/main_whitebox_test.go deleted file mode 100644 index 3ac23e431f..0000000000 --- a/tools/migration/main_whitebox_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "testing" - - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" - ast "github.com/stretchr/testify/assert" -) - -func TestMigrationRunner_findOpt(t *testing.T) { - tf.UnitTest(t) - assert := ast.New(t) - args := []string{"newRepo=/tmp/somedir", "verbose"} - - t.Run("returns the value+true for an option, if given", func(t *testing.T) { - val, found := findOpt("newRepo", args) - assert.True(found) - assert.Equal("/tmp/somedir", val) - }) - - t.Run("returns empty string + true for option with no value set", func(t *testing.T) { - val, found := findOpt("verbose", args) - assert.True(found) - assert.Equal("", val) - }) - - t.Run("returns empty string + false if option is not found", func(t *testing.T) { - val, found := findOpt("nuffin", args) - assert.False(found) - assert.Equal("", val) - }) -} diff --git a/tools/migration/migrations/repo-1-2/fixtures/repo-1.tgz b/tools/migration/migrations/repo-1-2/fixtures/repo-1.tgz deleted file mode 100644 index 336dad6a10..0000000000 Binary files a/tools/migration/migrations/repo-1-2/fixtures/repo-1.tgz and /dev/null differ diff --git a/tools/migration/migrations/repo-1-2/migration.go b/tools/migration/migrations/repo-1-2/migration.go deleted file mode 100644 index 20d8824cb3..0000000000 --- a/tools/migration/migrations/repo-1-2/migration.go +++ /dev/null @@ -1,385 +0,0 @@ -package migration12 - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/pkg/errors" - - "github.com/filecoin-project/go-filecoin/internal/pkg/block" - "github.com/filecoin-project/go-filecoin/internal/pkg/chain" - "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" - "github.com/filecoin-project/go-filecoin/internal/pkg/repo" -) - -// This migration was written for a prior development state in order to demonstrate migration -// functionality and workflow. It's not expected to run against a current repo and the tests have -// since been removed. This code remains for the next migration author to use as a reference, -// after which we can probably remove it. - -// duplicate head key here to protect against future changes -var headKey = datastore.NewKey("/chain/heaviestTipSet") - -// migrationChainStore is a stripped down implementation of the Store interface -// based on Store, containing only the fields and functions needed for the migration. -// -// the extraction line is drawn at package level where the migration occurs, i.e. chain, -// and no further. -type migrationChainStore struct { - // BsPriv is the on disk storage for blocks. - BsPriv bstore.Blockstore - - // Ds is the datastore backing bsPriv. It is also accessed directly - // to set and get chain meta-data, specifically the tipset cidset to - // state root mapping, and the heaviest tipset cids. - Ds repo.Datastore - - // head is the tipset at the head of the best known chain. - Head block.TipSet -} - -// GetBlock retrieves a block by cid. -func (store *migrationChainStore) GetBlock(ctx context.Context, c cid.Cid) (*block.Block, error) { - data, err := store.BsPriv.Get(c) - if err != nil { - return nil, errors.Wrapf(err, "failed to get block %s", c.String()) - } - return block.DecodeBlock(data.RawData()) -} - -// MetadataFormatJSONtoCBOR is the migration from version 1 to 2. -type MetadataFormatJSONtoCBOR struct { - store *migrationChainStore -} - -// Describe describes the steps this migration will take. -func (m *MetadataFormatJSONtoCBOR) Describe() string { - return `MetadataFormatJSONtoCBOR migrates the storage repo from version 1 to 2. - - This migration changes chain store metadata serialization from JSON to CBOR. - The chain store metadata will be read in as JSON and rewritten as CBOR. - Chain store metadata consists of associations between tipset keys and state - root cids and the tipset key of the head of the chain. No other repo data is changed. -` -} - -// Migrate performs the migration steps -func (m *MetadataFormatJSONtoCBOR) Migrate(newRepoPath string) error { - // open the repo path - oldVer, _ := m.Versions() - - // This call performs some checks on the repo before we start. - fsrepo, err := repo.OpenFSRepo(newRepoPath, oldVer) - if err != nil { - return err - } - defer mustCloseRepo(fsrepo) - - // construct the chainstore to be migrated from FSRepo - m.store = &migrationChainStore{ - BsPriv: bstore.NewBlockstore(fsrepo.ChainDatastore()), - Ds: fsrepo.ChainDatastore(), - } - - if err = m.convertJSONtoCBOR(context.Background()); err != nil { - return err - } - return nil -} - -// Versions returns the old and new versions that are valid for this migration -func (m *MetadataFormatJSONtoCBOR) Versions() (from, to uint) { - return 1, 2 -} - -// Validate performs validation tests for the migration steps: -// Reads in the old chainstore and the new chainstore, -// Compares the two and returns error if they are not completely equal once loaded. -func (m *MetadataFormatJSONtoCBOR) Validate(oldRepoPath, newRepoPath string) error { - // open the repo path - oldVer, _ := m.Versions() - - // This call performs some checks on the repo before we start. - oldFsRepo, err := repo.OpenFSRepo(oldRepoPath, oldVer) - if err != nil { - return err - } - defer mustCloseRepo(oldFsRepo) - - // construct the chainstore from FSRepo - oldStore := &migrationChainStore{ - BsPriv: bstore.NewBlockstore(oldFsRepo.ChainDatastore()), - Ds: oldFsRepo.ChainDatastore(), - } - - // Version hasn't been updated yet. - newFsRepo, err := repo.OpenFSRepo(newRepoPath, oldVer) - if err != nil { - return err - } - defer mustCloseRepo(newFsRepo) - - newStore := &migrationChainStore{ - BsPriv: bstore.NewBlockstore(newFsRepo.ChainDatastore()), - Ds: newFsRepo.ChainDatastore(), - } - - ctx := context.Background() - - // compare entire chainstores - if err = compareChainStores(ctx, oldStore, newStore); err != nil { - return errors.Wrap(err, "old and new chainStores are not equal") - } - - return nil -} - -// convertJSONtoCBOR is adapted from chain Store.Load: -// 1. stripped out logging -// 2. instead of calling Store.PutTipSetMetadata it just calls -// writeTipSetAndStateAsCBOR, because block format is unchanged. -// 3. then calls writeHeadAsCBOR, instead of store.SetHead which also publishes an event -// and does some logging -// -// This migration will leave some fork metadata in JSON format in the repo, but it won't matter: -// for consensus purposes we don't care about uncle blocks, and if we see the block again, -// it will be over the network, then DataStore.Put will look for it as CBOR, won't find it and write it out as CBOR anyway. If it's never seen again we don't care about it. -func (m *MetadataFormatJSONtoCBOR) convertJSONtoCBOR(ctx context.Context) error { - tipCids, err := loadChainHeadAsJSON(m.store) - if err != nil { - return err - } - var blocks []*block.Block - // traverse starting from head to begin loading the chain - for it := tipCids.Iter(); !it.Complete(); it.Next() { - blk, err := m.store.GetBlock(ctx, it.Value()) - if err != nil { - return errors.Wrap(err, "failed to load block in head TipSet") - } - blocks = append(blocks, blk) - } - - headTs, err := block.NewTipSet(blocks...) - if err != nil { - return errors.Wrap(err, "failed to add validated block to TipSet") - } - - tipsetProvider := chain.TipSetProviderFromBlocks(ctx, m.store) - for iter := chain.IterAncestors(ctx, tipsetProvider, headTs); !iter.Complete(); err = iter.Next() { - if err != nil { - return err - } - - stateRoot, err := loadStateRootAsJSON(iter.Value(), m.store) - if err != nil { - return err - } - tipSetAndState := &chain.TipSetMetadata{ - TipSet: iter.Value(), - TipSetStateRoot: stateRoot, - } - // only write TipSet and State; Block and tipIndex formats are not changed. - if err = m.writeTipSetAndStateAsCBOR(tipSetAndState); err != nil { - return err - } - } - - // write head back out as CBOR - err = m.writeHeadAsCBOR(ctx, headTs.Key()) - if err != nil { - return err - } - return nil -} - -// writeHeadAsCBOR writes the head. Taken from Store.writeHead, which was called by -// setHeadPersistent. We don't need mutexes for this -func (m *MetadataFormatJSONtoCBOR) writeHeadAsCBOR(ctx context.Context, cids block.TipSetKey) error { - val, err := encoding.Encode(cids) - if err != nil { - return err - } - - // this writes the value to the FSRepo - return m.store.Ds.Put(headKey, val) -} - -// writeTipSetAndStateAsCBOR writes the tipset key and the state root id to the -// datastore. (taken from Store.writeTipSetAndState) -func (m *MetadataFormatJSONtoCBOR) writeTipSetAndStateAsCBOR(tsas *chain.TipSetMetadata) error { - if tsas.TipSetStateRoot == cid.Undef { - return errors.New("attempting to write state root cid.Undef") - } - val, err := encoding.Encode(tsas.TipSetStateRoot) - if err != nil { - return err - } - - // datastore keeps tsKey:stateRoot (k,v) pairs. - h, err := tsas.TipSet.Height() - if err != nil { - return err - } - keyStr := makeKey(tsas.TipSet.String(), h) - key := datastore.NewKey(keyStr) - - // this writes the value to the FSRepo - return m.store.Ds.Put(key, val) -} - -// loadChainHeadAsJSON loads the latest known head from disk assuming JSON format -func loadChainHeadAsJSON(chainStore *migrationChainStore) (block.TipSetKey, error) { - return loadChainHead(false, chainStore) -} - -// loadChainHeadAsCBOR loads the latest known head from disk assuming CBOR format -func loadChainHeadAsCBOR(store *migrationChainStore) (block.TipSetKey, error) { - return loadChainHead(true, store) -} - -// loadChainHead loads the chain head CIDs as either CBOR or JSON -func loadChainHead(asCBOR bool, store *migrationChainStore) (block.TipSetKey, error) { - var emptyCidSet block.TipSetKey - - bb, err := store.Ds.Get(headKey) - if err != nil { - return emptyCidSet, errors.Wrap(err, "failed to read headKey") - } - - var cids block.TipSetKey - if asCBOR { - err = encoding.Decode(bb, &cids) - - } else { - err = json.Unmarshal(bb, &cids) - } - if err != nil { - return emptyCidSet, errors.Wrap(err, "failed to cast headCids") - } - - return cids, nil -} - -// function wrapper for loading state root as JSON -func loadStateRootAsJSON(ts block.TipSet, store *migrationChainStore) (cid.Cid, error) { - return loadStateRoot(ts, false, store) -} - -// loadStateRoot loads the chain store metadata into store, updating its -// state root and then returning the state root + any error -// pass true to load as CBOR (new format) or false to load as JSON (old format) -func loadStateRoot(ts block.TipSet, asCBOR bool, store *migrationChainStore) (cid.Cid, error) { - h, err := ts.Height() - if err != nil { - return cid.Undef, err - } - key := datastore.NewKey(makeKey(ts.String(), h)) - bb, err := store.Ds.Get(key) - if err != nil { - return cid.Undef, errors.Wrapf(err, "failed to read tipset key %s", ts.String()) - } - - var stateRoot cid.Cid - if asCBOR { - err = encoding.Decode(bb, &stateRoot) - } else { - err = json.Unmarshal(bb, &stateRoot) - } - if err != nil { - return cid.Undef, errors.Wrapf(err, "failed to cast state root of tipset %s", ts.String()) - } - return stateRoot, nil -} - -// compareChainStores loads each chain store and iterates through each, comparing heads -// then state root at each iteration, returning error if: -// * at any point state roots are not equal -// * the old and new chain store iterators are not complete at the same time -// * at the end the two heads are not equal -func compareChainStores(ctx context.Context, oldStore *migrationChainStore, newStore *migrationChainStore) error { - oldTipCids, err := loadChainHeadAsJSON(oldStore) - if err != nil { - return err - } - - newTipCids, err := loadChainHeadAsCBOR(newStore) - if err != nil { - return err - } - - oldHeadTs, err := loadTipSet(ctx, oldTipCids, oldStore) - if err != nil { - return err - } - - newHeadTs, err := loadTipSet(ctx, newTipCids, newStore) - if err != nil { - return err - } - - if !newHeadTs.Equals(oldHeadTs) { - return errors.New("new and old head tipsets not equal") - } - - oldIt := chain.IterAncestors(ctx, chain.TipSetProviderFromBlocks(ctx, oldStore), oldHeadTs) - for newIt := chain.IterAncestors(ctx, chain.TipSetProviderFromBlocks(ctx, newStore), newHeadTs); !newIt.Complete(); err = newIt.Next() { - if err != nil { - return err - } - if oldIt.Complete() { - return errors.New("old chain store is shorter than new chain store") - } - - newSr, err := loadStateRoot(newIt.Value(), true, newStore) - if err != nil { - return err - } - - oldSr, err := loadStateRootAsJSON(oldIt.Value(), oldStore) - if err != nil { - return err - } - if !newSr.Equals(oldSr) { - return errors.New("current state root not equal for block") - } - if err = oldIt.Next(); err != nil { - return errors.Wrap(err, "old chain store Next failed") - } - } - if !oldIt.Complete() { - return errors.New("old chain store is longer than new chain store") - } - return nil -} - -func loadTipSet(ctx context.Context, cidSet block.TipSetKey, chainStore *migrationChainStore) (headTs block.TipSet, err error) { - var blocks []*block.Block - for iter := cidSet.Iter(); !iter.Complete(); iter.Next() { - blk, err := chainStore.GetBlock(ctx, iter.Value()) - if err != nil { - return headTs, errors.Wrap(err, "failed to load block in head TipSet") - } - blocks = append(blocks, blk) - } - headTs, err = block.NewTipSet(blocks...) - if err != nil { - return headTs, err - } - return headTs, nil -} - -func makeKey(pKey string, h abi.ChainEpoch) string { - return fmt.Sprintf("p-%s h-%d", pKey, h) -} - -func mustCloseRepo(fsRepo *repo.FSRepo) { - err := fsRepo.Close() - if err != nil { - panic(err) - } -} diff --git a/tools/prerelease-tool/main.go b/tools/prerelease-tool/main.go index 45d4e0b21a..cc0d2922af 100644 --- a/tools/prerelease-tool/main.go +++ b/tools/prerelease-tool/main.go @@ -29,7 +29,7 @@ func main() { dryRun := flag.Bool("dry-run", false, "perform a dry run instead of executing create/delete/update actions") limit := flag.Int("limit", 7, "limit of prereleases to keep") owner := flag.String("owner", "filecoin-project", "github owner or organization") - repo := flag.String("repo", "go-filecoin", "github project repository") + repo := flag.String("repo", "venus", "github project repository") token, ok := os.LookupEnv("GITHUB_TOKEN") if !ok { log.Fatal("Github token must be provided through GITHUB_TOKEN environment variable") @@ -40,10 +40,8 @@ func main() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() go func() { - select { - case <-ctx.Done(): - log.Print(ctx.Err()) - } + <-ctx.Done() + log.Print(ctx.Err()) }() tc := oauth2.NewClient(ctx, ts) client := github.NewClient(tc) @@ -127,7 +125,7 @@ func (r *prereleaseTool) deleteReleases(ctx context.Context, m releaseToDelete) return handleDeleteReleasesReturn(ok, err, deleteCount) } if resp.StatusCode != 204 { - return handleDeleteReleasesReturn(ok, fmt.Errorf(`Unexpected HTTP status code from release delete request. + return handleDeleteReleasesReturn(ok, fmt.Errorf(`unexpected HTTP status code from release delete request. Expected: 204 Got: %d`, resp.StatusCode), deleteCount) } resp, err = r.client.Git.DeleteRef(ctx, r.Owner, r.Repo, "tags/"+tag) @@ -135,7 +133,7 @@ func (r *prereleaseTool) deleteReleases(ctx context.Context, m releaseToDelete) return handleDeleteReleasesReturn(ok, err, deleteCount) } if resp.StatusCode != 204 { - return handleDeleteReleasesReturn(ok, fmt.Errorf(`Unexpected HTTP status code from release delete request. + return handleDeleteReleasesReturn(ok, fmt.Errorf(`unexpected HTTP status code from release delete request. Expected: 204 Got: %d`, resp.StatusCode), deleteCount) } deleteCount++ diff --git a/tools/prerelease-tool/main_test.go b/tools/prerelease-tool/main_test.go index 12c81bf419..4d712c7b30 100644 --- a/tools/prerelease-tool/main_test.go +++ b/tools/prerelease-tool/main_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" "github.com/google/go-github/github" "github.com/stretchr/testify/assert" ) diff --git a/tools/seed/index.go b/tools/seed/index.go new file mode 100644 index 0000000000..e52e6f4777 --- /dev/null +++ b/tools/seed/index.go @@ -0,0 +1,6 @@ +package seed + +// ID identifies sector storage by UUID. One sector storage should map to one +// +// filesystem, local or networked / shared by multiple machines +type ID string diff --git a/tools/seed/local.go b/tools/seed/local.go new file mode 100644 index 0000000000..aeda6d3396 --- /dev/null +++ b/tools/seed/local.go @@ -0,0 +1,15 @@ +package seed + +// LocalStorageMeta [path]/sectorstore.json +type LocalStorageMeta struct { + ID ID + + // A high weight means data is more likely to be stored in this path + Weight uint64 // 0 = readonly + + // Intermediate data for the sealing process will be stored here + CanSeal bool + + // Finalized sectors that will be proved over time will be stored here + CanStore bool +} diff --git a/tools/seed/seed.go b/tools/seed/seed.go new file mode 100644 index 0000000000..4788767747 --- /dev/null +++ b/tools/seed/seed.go @@ -0,0 +1,366 @@ +package seed + +import ( + "context" + "crypto/rand" + "encoding/csv" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/impl" + "github.com/filecoin-project/venus/venus-shared/types" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-commp-utils/zerocomm" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/venus/pkg/crypto" + "github.com/filecoin-project/venus/pkg/gen/genesis" + "github.com/filecoin-project/venus/pkg/util/ffiwrapper/basicfs" + "github.com/filecoin-project/venus/pkg/util/storiface" +) + +var log = logging.Logger("preseal") + +func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.SectorNumber, sectors int, sbroot string, preimage []byte, ki *crypto.KeyInfo, fakeSectors bool) (*genesis.Miner, *crypto.KeyInfo, error) { + mid, err := address.IDFromAddress(maddr) + if err != nil { + return nil, nil, err + } + + if err := os.MkdirAll(sbroot, 0o775); err != nil { //nolint:gosec + return nil, nil, err + } + + next := offset + + sbfs := &basicfs.Provider{ + Root: sbroot, + } + + sb, err := impl.New(sbfs) + if err != nil { + return nil, nil, err + } + + ssize, err := spt.SectorSize() + if err != nil { + return nil, nil, err + } + + var sealedSectors []*genesis.PreSeal + for i := 0; i < sectors; i++ { + sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next} + ref := storage.SectorRef{ID: sid, ProofType: spt} + next++ + + var preseal *genesis.PreSeal + if !fakeSectors { + preseal, err = presealSector(sb, sbfs, ref, ssize, preimage) + if err != nil { + return nil, nil, err + } + } else { + preseal, err = presealSectorFake(sbfs, ref, ssize) + if err != nil { + return nil, nil, err + } + } + + sealedSectors = append(sealedSectors, preseal) + } + + var minerAddr address.Address + if ki != nil { + minerAddr, err = ki.Address() + if err != nil { + return nil, nil, err + } + } else { + newKeyInfo, err := crypto.NewBLSKeyFromSeed(rand.Reader) + if err != nil { + return nil, nil, err + } + ki = &newKeyInfo + minerAddr, err = ki.Address() + if err != nil { + return nil, nil, err + } + } + + var pid peer.ID + { + log.Warn("PeerID not specified, generating dummy") + p, _, err := ic.GenerateEd25519Key(rand.Reader) + if err != nil { + return nil, nil, err + } + + pid, err = peer.IDFromPrivateKey(p) + if err != nil { + return nil, nil, err + } + } + + miner := &genesis.Miner{ + ID: maddr, + Owner: minerAddr, + Worker: minerAddr, + MarketBalance: big.Zero(), + PowerBalance: big.Zero(), + SectorSize: ssize, + Sectors: sealedSectors, + PeerID: pid, + } + + if err := createDeals(miner, ki, maddr, ssize); err != nil { + return nil, nil, fmt.Errorf("creating deals: %w", err) + } + + { + b, err := json.MarshalIndent(&LocalStorageMeta{ + ID: ID(uuid.New().String()), + Weight: 0, // read-only + CanSeal: false, + CanStore: false, + }, "", " ") + if err != nil { + return nil, nil, fmt.Errorf("marshaling storage config: %w", err) + } + + if err := os.WriteFile(filepath.Join(sbroot, "sectorstore.json"), b, 0o644); err != nil { + return nil, nil, fmt.Errorf("persisting storage metadata (%s): %w", filepath.Join(sbroot, "storage.json"), err) + } + } + + return miner, ki, nil +} + +func presealSector(sb *impl.Sealer, sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) { + pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader) + if err != nil { + return nil, err + } + + trand := blake2b.Sum256(preimage) + ticket := abi.SealRandomness(trand[:]) + + fmt.Printf("sector-id: %d, piece info: %v\n", sid, pi) + + in2, err := sb.SealPreCommit1(context.TODO(), sid, ticket, []abi.PieceInfo{pi}) + if err != nil { + return nil, fmt.Errorf("commit: %w", err) + } + + cids, err := sb.SealPreCommit2(context.TODO(), sid, in2) + if err != nil { + return nil, fmt.Errorf("commit: %w", err) + } + + if err := sb.FinalizeSector(context.TODO(), sid, nil); err != nil { + return nil, fmt.Errorf("trim cache: %w", err) + } + + if err := cleanupUnsealed(sbfs, sid); err != nil { + return nil, fmt.Errorf("remove unsealed file: %w", err) + } + + log.Warn("PreCommitOutput: ", sid, cids.Sealed, cids.Unsealed) + + return &genesis.PreSeal{ + CommR: cids.Sealed, + CommD: cids.Unsealed, + SectorID: sid.ID.Number, + ProofType: sid.ProofType, + }, nil +} + +func presealSectorFake(sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize) (*genesis.PreSeal, error) { + paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) + if err != nil { + return nil, fmt.Errorf("acquire unsealed sector: %w", err) + } + defer done() + + if err := os.Mkdir(paths.Cache, 0o755); err != nil { + return nil, fmt.Errorf("mkdir cache: %w", err) + } + + commr, err := ffi.FauxRep(sid.ProofType, paths.Cache, paths.Sealed) + if err != nil { + return nil, fmt.Errorf("fauxrep: %w", err) + } + + return &genesis.PreSeal{ + CommR: commr, + CommD: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()), + SectorID: sid.ID.Number, + ProofType: sid.ProofType, + }, nil +} + +func cleanupUnsealed(sbfs *basicfs.Provider, ref storage.SectorRef) error { + paths, done, err := sbfs.AcquireSector(context.TODO(), ref, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) + if err != nil { + return err + } + defer done() + + return os.Remove(paths.Unsealed) +} + +func WriteGenesisMiner(maddr address.Address, sbroot string, gm *genesis.Miner, key *crypto.KeyInfo) error { + output := map[string]genesis.Miner{ + maddr.String(): *gm, + } + + out, err := json.MarshalIndent(output, "", " ") + if err != nil { + return err + } + + log.Infof("Writing preseal manifest to %s", filepath.Join(sbroot, "pre-seal-"+maddr.String()+".json")) + + if err := os.WriteFile(filepath.Join(sbroot, "pre-seal-"+maddr.String()+".json"), out, 0o664); err != nil { + return err + } + + if key != nil { + b, err := json.Marshal(key) + if err != nil { + return err + } + + // TODO: allow providing key + if err := os.WriteFile(filepath.Join(sbroot, "pre-seal-"+maddr.String()+".key"), []byte(hex.EncodeToString(b)), 0o664); err != nil { + return err + } + } + + return nil +} + +func createDeals(m *genesis.Miner, ki *crypto.KeyInfo, maddr address.Address, ssize abi.SectorSize) error { + addr, err := ki.Address() + if err != nil { + return err + } + for i, sector := range m.Sectors { + label, err := types.NewLabelFromString(fmt.Sprintf("%d", i)) + if err != nil { + return err + } + + proposal := &types.DealProposal{ + PieceCID: sector.CommD, + PieceSize: abi.PaddedPieceSize(ssize), + Client: addr, + Provider: maddr, + Label: label, + StartEpoch: 0, + EndEpoch: 9001, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + + sector.DealClientKey = ki + sector.Deal = *proposal + } + + return nil +} + +type GenAccountEntry struct { + Version int + ID string + Amount types.FIL + VestingMonths int + CustodianID int + M int + N int + Addresses []address.Address + Type string + Sig1 string + Sig2 string +} + +func ParseMultisigCsv(csvf string) ([]GenAccountEntry, error) { + fileReader, err := os.Open(csvf) + if err != nil { + return nil, fmt.Errorf("read multisig csv: %w", err) + } + defer fileReader.Close() //nolint:errcheck + r := csv.NewReader(fileReader) + records, err := r.ReadAll() + if err != nil { + return nil, fmt.Errorf("read multisig csv: %w", err) + } + var entries []GenAccountEntry + for i, e := range records[1:] { + var addrs []address.Address + addrStrs := strings.Split(strings.TrimSpace(e[7]), ":") + for j, a := range addrStrs { + addr, err := address.NewFromString(a) + if err != nil { + return nil, fmt.Errorf("failed to parse address %d in row %d (%q): %w", j, i, a, err) + } + addrs = append(addrs, addr) + } + + balance, err := types.ParseFIL(strings.TrimSpace(e[2])) + if err != nil { + return nil, fmt.Errorf("failed to parse account balance: %w", err) + } + + vesting, err := strconv.Atoi(strings.TrimSpace(e[3])) + if err != nil { + return nil, fmt.Errorf("failed to parse vesting duration for record %d: %w", i, err) + } + + custodianID, err := strconv.Atoi(strings.TrimSpace(e[4])) + if err != nil { + return nil, fmt.Errorf("failed to parse custodianID in record %d: %w", i, err) + } + threshold, err := strconv.Atoi(strings.TrimSpace(e[5])) + if err != nil { + return nil, fmt.Errorf("failed to parse multisigM in record %d: %w", i, err) + } + num, err := strconv.Atoi(strings.TrimSpace(e[6])) + if err != nil { + return nil, fmt.Errorf("number of addresses be integer: %w", err) + } + if e[0] != "1" { + return nil, fmt.Errorf("record version must be 1") + } + entries = append(entries, GenAccountEntry{ + Version: 1, + ID: e[1], + Amount: balance, + CustodianID: custodianID, + VestingMonths: vesting, + M: threshold, + N: num, + Type: e[8], + Sig1: e[9], + Sig2: e[10], + Addresses: addrs, + }) + } + + return entries, nil +} diff --git a/vendors/filecoin-ffi b/vendors/filecoin-ffi deleted file mode 160000 index e297d672ba..0000000000 --- a/vendors/filecoin-ffi +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e297d672bab07d5a2c87d2e0cc8d5d98177ec8b9 diff --git a/venus-component/go.mod b/venus-component/go.mod new file mode 100644 index 0000000000..b5e8c84de8 --- /dev/null +++ b/venus-component/go.mod @@ -0,0 +1,71 @@ +module github.com/filecoin-project/venus/venus-component + +go 1.18 + +require ( + github.com/filecoin-project/go-cbor-util v0.0.1 + github.com/filecoin-project/venus/venus-shared v0.0.0 + github.com/ipfs/go-cid v0.3.2 + github.com/libp2p/go-libp2p v0.23.2 + go.opencensus.io v0.23.0 + go.uber.org/fx v1.15.0 +) + +require ( + github.com/btcsuite/btcd v0.20.1-beta // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/filecoin-project/go-address v0.0.6 // indirect + github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect + github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-bitfield v0.2.4 // indirect + github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect + github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect + github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-state-types v0.1.1 // indirect + github.com/filecoin-project/specs-actors v0.9.13 // indirect + github.com/filecoin-project/specs-actors/v2 v2.3.5 // indirect + github.com/filecoin-project/specs-actors/v3 v3.1.1 // indirect + github.com/filecoin-project/specs-actors/v4 v4.0.1 // indirect + github.com/filecoin-project/specs-actors/v5 v5.0.4 // indirect + github.com/filecoin-project/specs-actors/v6 v6.0.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/ipfs/go-block-format v0.0.3 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-cbor v0.0.5 // indirect + github.com/ipfs/go-ipld-format v0.0.2 // indirect + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/klauspost/cpuid/v2 v2.1.1 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-libp2p-core v0.11.0 // indirect + github.com/libp2p/go-openssl v0.1.0 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-pointer v0.0.1 // indirect + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.1.0 // indirect + github.com/multiformats/go-multiaddr v0.7.0 // indirect + github.com/multiformats/go-multibase v0.1.1 // indirect + github.com/multiformats/go-multicodec v0.6.0 // indirect + github.com/multiformats/go-multihash v0.2.1 // indirect + github.com/multiformats/go-varint v0.0.6 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a // indirect + github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20211110122933-f57984553008 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/dig v1.12.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.23.0 // indirect + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect + golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + lukechampine.com/blake3 v1.1.7 // indirect +) + +replace github.com/filecoin-project/venus/venus-shared => github.com/dtynn/venus/venus-shared v0.0.0-20211123072147-edbf49c4507e diff --git a/venus-component/go.sum b/venus-component/go.sum new file mode 100644 index 0000000000..958aa700e8 --- /dev/null +++ b/venus-component/go.sum @@ -0,0 +1,571 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/dtynn/venus/venus-shared v0.0.0-20211123072147-edbf49c4507e h1:gv9YiO71L/FIQSjW0OhHLr1g8ipO/R0/9hgqzqPBEf8= +github.com/dtynn/venus/venus-shared v0.0.0-20211123072147-edbf49c4507e/go.mod h1:goyPoyE7NO1pTxRgBKb9dzUyx/vQngs3pYWNsPrPf3Q= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= +github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= +github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= +github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= +github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= +github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= +github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1 h1:LR260vya4p++atgf256W6yV3Lxl5mKrBFcEZePWQrdg= +github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4= +github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= +github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= +github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= +github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= +github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= +github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= +github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= +github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0= +github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.23.2 h1:yqyTeKQJyofWXxEv/eEVUvOrGdt/9x+0PIQ4N1kaxmE= +github.com/libp2p/go-libp2p v0.23.2/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.11.0 h1:75jAgdA+IChNa+/mZXogfmrGkgwxkVvxmIC7pV+F6sI= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= +github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= +github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.7.0 h1:gskHcdaCyPtp9XskVwtvEeQOG465sCohbQIirSyqxrc= +github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20211110122933-f57984553008 h1:7WtW9D9VGpmRLuQmrPy2JobUNdka95z3MKEVpELtOjo= +github.com/whyrusleeping/cbor-gen v0.0.0-20211110122933-f57984553008/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/xorcare/golden v0.6.0 h1:E8emU8bhyMIEpYmgekkTUaw4vtcrRE+Wa0c5wYIcgXc= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.12.0 h1:l1GQeZpEbss0/M4l/ZotuBndCrkMdjnygzgcuOjAdaY= +go.uber.org/dig v1.12.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= +go.uber.org/fx v1.15.0 h1:kcfBpAm98n0ksanyyZLFE/Q3T7yPi13Ge2liu3TxR+A= +go.uber.org/fx v1.15.0/go.mod h1:jI3RazQUhGv5KkpZIRv+kuP4CcgX3fnc0qX8bLnzbx8= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= diff --git a/venus-component/libp2p/exchange/client/client.go b/venus-component/libp2p/exchange/client/client.go new file mode 100644 index 0000000000..d2e3616ad7 --- /dev/null +++ b/venus-component/libp2p/exchange/client/client.go @@ -0,0 +1,493 @@ +package client + +import ( + "bufio" + "context" + "errors" + "fmt" + "math/rand" + "time" + + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "go.opencensus.io/trace" + "go.uber.org/fx" + + "github.com/filecoin-project/venus/venus-shared/chain" + "github.com/filecoin-project/venus/venus-shared/libp2p" + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/logging" +) + +var log = logging.New("exchange.client") + +// client implements exchange.Client, using the libp2p ChainExchange protocol +// as the fetching mechanism. +type client struct { + // Connection manager used to contact the server. + // FIXME: We should have a reduced interface here, initialized + // just with our protocol ID, we shouldn't be able to open *any* + // connection. + host host.Host + + peerTracker *bsPeerTracker +} + +var _ exchange.Client = (*client)(nil) + +// NewClient creates a new libp2p-based exchange.Client that uses the libp2p +// ChainExhange protocol as the fetching mechanism. +func NewClient(lc fx.Lifecycle, host host.Host, pmgr libp2p.PeerManager) exchange.Client { + return &client{ + host: host, + peerTracker: newPeerTracker(lc, host, pmgr), + } +} + +// Main logic of the client request service. The provided `Request` +// is sent to the `singlePeer` if one is indicated or to all available +// ones otherwise. The response is processed and validated according +// to the `Request` options. Either a `validatedResponse` is returned +// (which can be safely accessed), or an `error` that may represent +// either a response error status, a failed validation or an internal +// error. +// +// This is the internal single point of entry for all external-facing +// APIs, currently we have 3 very heterogeneous services exposed: +// * GetBlocks: Headers +// * GetFullTipSet: Headers | Messages +// * GetChainMessages: Messages +// This function handles all the different combinations of the available +// request options without disrupting external calls. In the future the +// consumers should be forced to use a more standardized service and +// adhere to a single API derived from this function. +func (c *client) doRequest( + ctx context.Context, + req *exchange.Request, + singlePeer *peer.ID, + // In the `GetChainMessages` case, we won't request the headers but we still + // need them to check the integrity of the `CompactedMessages` in the response + // so the tipset blocks need to be provided by the caller. + tipsets []*chain.TipSet, +) (*validatedResponse, error) { + // Validate request. + if req.Length == 0 { + return nil, fmt.Errorf("invalid request of length 0") + } + if req.Length > exchange.MaxRequestLength { + return nil, fmt.Errorf("request length (%d) above maximum (%d)", + req.Length, exchange.MaxRequestLength) + } + if req.Options == 0 { + return nil, fmt.Errorf("request with no options set") + } + + // Generate the list of peers to be queried, either the + // `singlePeer` indicated or all peers available (sorted + // by an internal peer tracker with some randomness injected). + var peers []peer.ID + if singlePeer != nil { + peers = []peer.ID{*singlePeer} + } else { + peers = c.getShuffledPeers() + if len(peers) == 0 { + return nil, fmt.Errorf("no peers available") + } + } + + // Try the request for each peer in the list, + // return on the first successful response. + // FIXME: Doing this serially isn't great, but fetching in parallel + // may not be a good idea either. Think about this more. + globalTime := time.Now() + // Global time used to track what is the expected time we will need to get + // a response if a client fails us. + for _, peer := range peers { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("context cancelled: %w", ctx.Err()) + default: + } + + plog := log.With("peer", peer.String()) + + // Send request, read response. + res, err := c.sendRequestToPeer(ctx, peer, req) + if err != nil { + if !errors.Is(err, network.ErrNoConn) { + plog.Warnf("could not send request to peer: %s", err) + } + continue + } + + // Process and validate response. + validRes, err := c.processResponse(req, res, tipsets) + if err != nil { + plog.Warnf("processing peer response failed: %s", err) + continue + } + + c.peerTracker.logGlobalSuccess(time.Since(globalTime)) + c.host.ConnManager().TagPeer(peer, "bsync", SuccessPeerTagValue) + return validRes, nil + } + + errString := "doRequest failed for all peers" + if singlePeer != nil { + errString = fmt.Sprintf("doRequest failed for single peer %s", *singlePeer) + } + return nil, fmt.Errorf(errString) +} + +// Process and validate response. Check the status, the integrity of the +// information returned, and that it matches the request. Extract the information +// into a `validatedResponse` for the external-facing APIs to select what they +// need. +// +// We are conflating in the single error returned both status and validation +// errors. Peer penalization should happen here then, before returning, so +// we can apply the correct penalties depending on the cause of the error. +// FIXME: Add the `peer` as argument once we implement penalties. +func (c *client) processResponse(req *exchange.Request, res *exchange.Response, tipsets []*chain.TipSet) (r *validatedResponse, err error) { + err = res.StatusToError() + if err != nil { + return nil, fmt.Errorf("status error: %w", err) + } + + defer func() { + if rerr := recover(); rerr != nil { + log.Errorf("process response error: %s", rerr) + err = fmt.Errorf("process response error: %s", rerr) + return + } + }() + + options := exchange.ParseOptions(req.Options) + if options.IsEmpty() { + // Safety check: this shouldn't have been sent, and even if it did + // it should have been caught by the peer in its error status. + return nil, fmt.Errorf("nothing was requested") + } + + // Verify that the chain segment returned is in the valid range. + // Note that the returned length might be less than requested. + resLength := len(res.Chain) + if resLength == 0 { + return nil, fmt.Errorf("got no chain in successful response") + } + if resLength > int(req.Length) { + return nil, fmt.Errorf("got longer response (%d) than requested (%d)", + resLength, req.Length) + } + if resLength < int(req.Length) && res.Status != exchange.Partial { + return nil, fmt.Errorf("got less than requested without a proper status: %d", res.Status) + } + + validRes := &validatedResponse{} + if options.IncludeHeaders { + // Check for valid block sets and extract them into `TipSet`s. + validRes.tipsets = make([]*chain.TipSet, resLength) + for i := 0; i < resLength; i++ { + if res.Chain[i] == nil { + return nil, fmt.Errorf("response with nil tipset in pos %d", i) + } + for blockIdx, block := range res.Chain[i].Blocks { + if block == nil { + return nil, fmt.Errorf("tipset with nil block in pos %d", blockIdx) + // FIXME: Maybe we should move this check to `NewTipSet`. + } + } + + validRes.tipsets[i], err = chain.NewTipSet(res.Chain[i].Blocks) + if err != nil { + return nil, fmt.Errorf("invalid tipset blocks at height (head - %d): %w", i, err) + } + } + + // Check that the returned head matches the one requested. + if !chain.CidArrsEqual(validRes.tipsets[0].Cids(), req.Head) { + return nil, fmt.Errorf("returned chain head does not match request") + } + + // Check `TipSet`s are connected (valid chain). + for i := 0; i < len(validRes.tipsets)-1; i++ { + if !validRes.tipsets[i].IsChildOf(validRes.tipsets[i+1]) { + return nil, fmt.Errorf("tipsets are not connected at height (head - %d)/(head - %d)", + i, i+1) + // FIXME: Maybe give more information here, like CIDs. + } + } + } + + if options.IncludeMessages { + validRes.messages = make([]*exchange.CompactedMessages, resLength) + for i := 0; i < resLength; i++ { + if res.Chain[i].Messages == nil { + return nil, fmt.Errorf("no messages included for tipset at height (head - %d)", i) + } + validRes.messages[i] = res.Chain[i].Messages + } + + if options.IncludeHeaders { + // If the headers were also returned check that the compression + // indexes are valid before `toFullTipSets()` is called by the + // consumer. + err := c.validateCompressedIndices(res.Chain) + if err != nil { + return nil, err + } + } else { + // If we didn't request the headers they should have been provided + // by the caller. + if len(tipsets) < len(res.Chain) { + return nil, fmt.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) + } + chain := make([]*exchange.BSTipSet, 0, resLength) + for i, resChain := range res.Chain { + next := &exchange.BSTipSet{ + Blocks: tipsets[i].Blocks(), + Messages: resChain.Messages, + } + chain = append(chain, next) + } + + err := c.validateCompressedIndices(chain) + if err != nil { + return nil, err + } + } + } + + return validRes, nil +} + +func (c *client) validateCompressedIndices(chain []*exchange.BSTipSet) error { + resLength := len(chain) + for tipsetIdx := 0; tipsetIdx < resLength; tipsetIdx++ { + msgs := chain[tipsetIdx].Messages + blocksNum := len(chain[tipsetIdx].Blocks) + + if len(msgs.BlsIncludes) != blocksNum { + return fmt.Errorf("BlsIncludes (%d) does not match number of blocks (%d)", + len(msgs.BlsIncludes), blocksNum) + } + + if len(msgs.SecpkIncludes) != blocksNum { + return fmt.Errorf("SecpkIncludes (%d) does not match number of blocks (%d)", + len(msgs.SecpkIncludes), blocksNum) + } + + for blockIdx := 0; blockIdx < blocksNum; blockIdx++ { + for _, mi := range msgs.BlsIncludes[blockIdx] { + if int(mi) >= len(msgs.Bls) { + return fmt.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)", + mi, len(msgs.Bls)) + } + } + + for _, mi := range msgs.SecpkIncludes[blockIdx] { + if int(mi) >= len(msgs.Secpk) { + return fmt.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)", + mi, len(msgs.Secpk)) + } + } + } + } + + return nil +} + +// GetBlocks implements Client.GetBlocks(). Refer to the godocs there. +func (c *client) GetBlocks(ctx context.Context, tsk chain.TipSetKey, count int) ([]*chain.TipSet, error) { + ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("tipset", fmt.Sprint(tsk.Cids())), + trace.Int64Attribute("count", int64(count)), + ) + } + + req := &exchange.Request{ + Head: tsk.Cids(), + Length: uint64(count), + Options: exchange.Headers, + } + + validRes, err := c.doRequest(ctx, req, nil, nil) + if err != nil { + return nil, err + } + + return validRes.tipsets, nil +} + +// GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there. +func (c *client) GetFullTipSet(ctx context.Context, peer peer.ID, tsk chain.TipSetKey) (*chain.FullTipSet, error) { + // TODO: round robin through these peers on error + + req := &exchange.Request{ + Head: tsk.Cids(), + Length: 1, + Options: exchange.Headers | exchange.Messages, + } + + validRes, err := c.doRequest(ctx, req, &peer, nil) + if err != nil { + return nil, err + } + + return validRes.toFullTipSets()[0], nil + // If `doRequest` didn't fail we are guaranteed to have at least + // *one* tipset here, so it's safe to index directly. +} + +// GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there. +func (c *client) GetChainMessages(ctx context.Context, tipsets []*chain.TipSet) ([]*exchange.CompactedMessages, error) { + head := tipsets[0] + length := uint64(len(tipsets)) + + ctx, span := trace.StartSpan(ctx, "GetChainMessages") + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("tipset", fmt.Sprint(head.Cids())), + trace.Int64Attribute("count", int64(length)), + ) + } + defer span.End() + + req := &exchange.Request{ + Head: head.Cids(), + Length: length, + Options: exchange.Messages, + } + + validRes, err := c.doRequest(ctx, req, nil, tipsets) + if err != nil { + return nil, err + } + + return validRes.messages, nil +} + +// Send a request to a peer. Write request in the stream and read the +// response back. We do not do any processing of the request/response +// here. +func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *exchange.Request) (_ *exchange.Response, err error) { + // Trace code. + ctx, span := trace.StartSpan(ctx, "sendRequestToPeer") + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("peer", peer.Pretty()), + ) + } + defer func() { + if err != nil { + if span.IsRecordingEvents() { + span.SetStatus(trace.Status{ + Code: 5, + Message: err.Error(), + }) + } + } + }() + // -- TRACE -- + + supported, err := c.host.Peerstore().SupportsProtocols(peer, exchange.BlockSyncProtocolID, exchange.ChainExchangeProtocolID) + if err != nil { + c.RemovePeer(ctx, peer) + return nil, fmt.Errorf("failed to get protocols for peer: %w", err) + } + if len(supported) == 0 || (supported[0] != exchange.BlockSyncProtocolID && supported[0] != exchange.ChainExchangeProtocolID) { + return nil, fmt.Errorf("peer %s does not support protocols %s", + peer, []string{exchange.BlockSyncProtocolID, exchange.ChainExchangeProtocolID}) + } + + connectionStart := time.Now() + + // Open stream to peer. + stream, err := c.host.NewStream( + network.WithNoDial(ctx, "should already have connection"), + peer, + exchange.ChainExchangeProtocolID, exchange.BlockSyncProtocolID) + if err != nil { + c.RemovePeer(ctx, peer) + return nil, fmt.Errorf("failed to open stream to peer: %w", err) + } + + defer stream.Close() //nolint:errcheck + + // Write request. + _ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline)) + if err := cborutil.WriteCborRPC(stream, req); err != nil { + _ = stream.SetWriteDeadline(time.Time{}) + c.peerTracker.logFailure(peer, time.Since(connectionStart), req.Length) + // FIXME: Should we also remove peer here? + return nil, err + } + _ = stream.SetWriteDeadline(time.Time{}) // clear deadline // FIXME: Needs + // its own API (https://github.com/libp2p/go-libp2p/core/issues/162). + + // Read response. + var res exchange.Response + err = cborutil.ReadCborRPC( + bufio.NewReader(NewInct(stream, ReadResMinSpeed, ReadResDeadline)), + &res) + if err != nil { + c.peerTracker.logFailure(peer, time.Since(connectionStart), req.Length) + return nil, fmt.Errorf("failed to read chainxchg response: %w", err) + } + + // FIXME: Move all this together at the top using a defer as done elsewhere. + // Maybe we need to declare `res` in the signature. + if span.IsRecordingEvents() { + span.AddAttributes( + trace.Int64Attribute("resp_status", int64(res.Status)), + trace.StringAttribute("msg", res.ErrorMessage), + trace.Int64Attribute("chain_len", int64(len(res.Chain))), + ) + } + + c.peerTracker.logSuccess(peer, time.Since(connectionStart), uint64(len(res.Chain))) + // FIXME: We should really log a success only after we validate the response. + // It might be a bit hard to do. + return &res, nil +} + +// AddPeer implements Client.AddPeer(). Refer to the godocs there. +func (c *client) AddPeer(ctx context.Context, p peer.ID) { + c.peerTracker.addPeer(p) +} + +// RemovePeer implements Client.RemovePeer(). Refer to the godocs there. +func (c *client) RemovePeer(ctx context.Context, p peer.ID) { + c.peerTracker.removePeer(p) +} + +// getShuffledPeers returns a preference-sorted set of peers (by latency +// and failure counting), shuffling the first few peers so we don't always +// pick the same peer. +// FIXME: Consider merging with `shufflePrefix()s`. +func (c *client) getShuffledPeers() []peer.ID { + peers := c.peerTracker.prefSortedPeers() + shufflePrefix(peers) + return peers +} + +func shufflePrefix(peers []peer.ID) { + prefix := ShufflePeersPrefix + if len(peers) < prefix { + prefix = len(peers) + } + + buf := make([]peer.ID, prefix) + perm := rand.Perm(prefix) + for i, v := range perm { + buf[i] = peers[v] + } + + copy(peers, buf) +} diff --git a/venus-component/libp2p/exchange/client/inct.go b/venus-component/libp2p/exchange/client/inct.go new file mode 100644 index 0000000000..5c08d7de9b --- /dev/null +++ b/venus-component/libp2p/exchange/client/inct.go @@ -0,0 +1,68 @@ +package client + +import ( + "io" + "time" +) + +type ReaderDeadline interface { + Read([]byte) (int, error) + SetReadDeadline(time.Time) error +} + +type incrt struct { + rd ReaderDeadline + + waitPerByte time.Duration + wait time.Duration + maxWait time.Duration +} + +// New creates an Incremental Reader Timeout, with minimum sustained speed of +// minSpeed bytes per second and with maximum wait of maxWait +func NewInct(rd ReaderDeadline, minSpeed int64, maxWait time.Duration) io.Reader { + return &incrt{ + rd: rd, + waitPerByte: time.Second / time.Duration(minSpeed), + wait: maxWait, + maxWait: maxWait, + } +} + +type errNoWait struct{} + +func (err errNoWait) Error() string { + return "wait time exceeded" +} + +func (err errNoWait) Timeout() bool { + return true +} + +func (crt *incrt) Read(buf []byte) (int, error) { + start := time.Now() + if crt.wait == 0 { + return 0, errNoWait{} + } + + err := crt.rd.SetReadDeadline(start.Add(crt.wait)) + if err != nil { + log.Debugf("unable to set deadline: %+v", err) + } + + n, err := crt.rd.Read(buf) + + _ = crt.rd.SetReadDeadline(time.Time{}) + if err == nil { + dur := time.Since(start) + crt.wait -= dur + crt.wait += time.Duration(n) * crt.waitPerByte + if crt.wait < 0 { + crt.wait = 0 + } + if crt.wait > crt.maxWait { + crt.wait = crt.maxWait + } + } + return n, err +} diff --git a/venus-component/libp2p/exchange/client/peer_tracker.go b/venus-component/libp2p/exchange/client/peer_tracker.go new file mode 100644 index 0000000000..f92f3d3a13 --- /dev/null +++ b/venus-component/libp2p/exchange/client/peer_tracker.go @@ -0,0 +1,192 @@ +package client + +// FIXME: This needs to be reviewed. + +import ( + "context" + "sort" + "sync" + "time" + + host "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/fx" + + "github.com/filecoin-project/venus/venus-shared/libp2p" +) + +type peerStats struct { + successes int + failures int + firstSeen time.Time + averageTime time.Duration +} + +type bsPeerTracker struct { + lk sync.Mutex + + peers map[peer.ID]*peerStats + avgGlobalTime time.Duration + + pmgr libp2p.PeerManager +} + +func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr libp2p.PeerManager) *bsPeerTracker { + bsPt := &bsPeerTracker{ + peers: make(map[peer.ID]*peerStats), + pmgr: pmgr, + } + + sub, err := h.EventBus().Subscribe(new(libp2p.FilPeerEvent)) + if err != nil { + panic(err) + } + + go func() { + for evt := range sub.Out() { + pEvt := evt.(libp2p.FilPeerEvent) + switch pEvt.Type { + case libp2p.AddFilPeerEvt: + bsPt.addPeer(pEvt.ID) + case libp2p.RemoveFilPeerEvt: + bsPt.removePeer(pEvt.ID) + } + } + }() + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return sub.Close() + }, + }) + + return bsPt +} + +func (bpt *bsPeerTracker) addPeer(p peer.ID) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + if _, ok := bpt.peers[p]; ok { + return + } + bpt.peers[p] = &peerStats{ + firstSeen: time.Now(), + } +} + +const ( + // newPeerMul is how much better than average is the new peer assumed to be + // less than one to encourouge trying new peers + newPeerMul = 0.9 +) + +func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID { + // TODO: this could probably be cached, but as long as its not too many peers, fine for now + bpt.lk.Lock() + defer bpt.lk.Unlock() + out := make([]peer.ID, 0, len(bpt.peers)) + for p := range bpt.peers { + out = append(out, p) + } + + // sort by 'expected cost' of requesting data from that peer + // additionally handle edge cases where not enough data is available + sort.Slice(out, func(i, j int) bool { + pi := bpt.peers[out[i]] + pj := bpt.peers[out[j]] + + var costI, costJ float64 + + getPeerInitLat := func(p peer.ID) float64 { + return float64(bpt.avgGlobalTime) * newPeerMul + } + + if pi.successes+pi.failures > 0 { + failRateI := float64(pi.failures) / float64(pi.failures+pi.successes) + costI = float64(pi.averageTime) + failRateI*float64(bpt.avgGlobalTime) + } else { + costI = getPeerInitLat(out[i]) + } + + if pj.successes+pj.failures > 0 { + failRateJ := float64(pj.failures) / float64(pj.failures+pj.successes) + costJ = float64(pj.averageTime) + failRateJ*float64(bpt.avgGlobalTime) + } else { + costJ = getPeerInitLat(out[j]) + } + + return costI < costJ + }) + + return out +} + +const ( + // xInvAlpha = (N+1)/2 + + localInvAlpha = 10 // 86% of the value is the last 19 + globalInvAlpha = 25 // 86% of the value is the last 49 +) + +func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + if bpt.avgGlobalTime == 0 { + bpt.avgGlobalTime = dur + return + } + delta := (dur - bpt.avgGlobalTime) / globalInvAlpha + bpt.avgGlobalTime += delta +} + +func logTime(pi *peerStats, dur time.Duration) { + if pi.averageTime == 0 { + pi.averageTime = dur + return + } + delta := (dur - pi.averageTime) / localInvAlpha + pi.averageTime += delta +} + +func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration, reqSize uint64) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + var pi *peerStats + var ok bool + if pi, ok = bpt.peers[p]; !ok { + log.Warnw("log success called on peer not in tracker", "peerid", p.String()) + return + } + + pi.successes++ + if reqSize == 0 { + reqSize = 1 + } + logTime(pi, dur/time.Duration(reqSize)) +} + +func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration, reqSize uint64) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + var pi *peerStats + var ok bool + if pi, ok = bpt.peers[p]; !ok { + log.Warnw("log failure called on peer not in tracker", "peerid", p.String()) + return + } + + pi.failures++ + if reqSize == 0 { + reqSize = 1 + } + logTime(pi, dur/time.Duration(reqSize)) +} + +func (bpt *bsPeerTracker) removePeer(p peer.ID) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + delete(bpt.peers, p) +} diff --git a/venus-component/libp2p/exchange/client/response.go b/venus-component/libp2p/exchange/client/response.go new file mode 100644 index 0000000000..73d01d0ce4 --- /dev/null +++ b/venus-component/libp2p/exchange/client/response.go @@ -0,0 +1,67 @@ +package client + +import ( + "time" + + "github.com/filecoin-project/venus/venus-shared/chain" + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" +) + +const ( + // Extracted constants from the code. + // FIXME: Should be reviewed and confirmed. + SuccessPeerTagValue = 25 + WriteReqDeadline = 5 * time.Second + ReadResDeadline = WriteReqDeadline + ReadResMinSpeed = 50 << 10 + ShufflePeersPrefix = 16 +) + +// Response that has been validated according to the protocol +// and can be safely accessed. +type validatedResponse struct { + tipsets []*chain.TipSet + // List of all messages per tipset (grouped by tipset, + // not by block, hence a single index like `tipsets`). + messages []*exchange.CompactedMessages +} + +// Decompress messages and form full tipsets with them. The headers +// need to have been requested as well. +func (res *validatedResponse) toFullTipSets() []*chain.FullTipSet { + if len(res.tipsets) == 0 || len(res.tipsets) != len(res.messages) { + // This decompression can only be done if both headers and + // messages are returned in the response. (The second check + // is already implied by the guarantees of `validatedResponse`, + // added here just for completeness.) + return nil + } + + ftsList := make([]*chain.FullTipSet, len(res.tipsets)) + for tipsetIdx := range res.tipsets { + blksInTipset := res.tipsets[tipsetIdx].Blocks() + msgs := res.messages[tipsetIdx] + + fblks := make([]*chain.FullBlock, 0, len(blksInTipset)) + for blockIdx, b := range res.tipsets[tipsetIdx].Blocks() { + fb := &chain.FullBlock{ + Header: b, + BLSMessages: make([]*chain.Message, 0, len(msgs.Bls)), + SECPMessages: make([]*chain.SignedMessage, 0, len(msgs.Secpk)), + } + + for _, mi := range msgs.BlsIncludes[blockIdx] { + fb.BLSMessages = append(fb.BLSMessages, msgs.Bls[mi]) + } + for _, mi := range msgs.SecpkIncludes[blockIdx] { + fb.SECPMessages = append(fb.SECPMessages, msgs.Secpk[mi]) + } + + fblks = append(fblks, fb) + } + + ftsList[tipsetIdx] = chain.NewFullTipSet(fblks) + } + + return ftsList +} diff --git a/venus-component/libp2p/exchange/doc.go b/venus-component/libp2p/exchange/doc.go new file mode 100644 index 0000000000..21abc38c4b --- /dev/null +++ b/venus-component/libp2p/exchange/doc.go @@ -0,0 +1,19 @@ +// Package exchange contains the ChainExchange server and client components. +// +// ChainExchange is the basic chain synchronization protocol of Filecoin. +// ChainExchange is an RPC-oriented protocol, with a single operation to +// request blocks for now. +// +// A request contains a start anchor block (referred to with a CID), and a +// amount of blocks requested beyond the anchor (including the anchor itself). +// +// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports +// two options at the moment: +// +// - include block contents +// - include block messages +// +// The response will include a status code, an optional message, and the +// response payload in case of success. The payload is a slice of serialized +// tipsets. +package exchange diff --git a/venus-component/libp2p/exchange/server.go b/venus-component/libp2p/exchange/server.go new file mode 100644 index 0000000000..8142e80058 --- /dev/null +++ b/venus-component/libp2p/exchange/server.go @@ -0,0 +1,261 @@ +package exchange + +import ( + "bufio" + "context" + "fmt" + "time" + + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/host" + inet "github.com/libp2p/go-libp2p/core/network" + "go.opencensus.io/trace" + + "github.com/filecoin-project/venus/venus-shared/chain" + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/localstore" + "github.com/filecoin-project/venus/venus-shared/logging" +) + +var log = logging.New("exchange") + +const ( + WriteResDeadline = 60 * time.Second +) + +// `Request` processed and validated to query the tipsets needed. +type validatedRequest struct { + head chain.TipSetKey + length uint64 + options *exchange.Options +} + +func validateRequest(ctx context.Context, req *exchange.Request) (*validatedRequest, *exchange.Response) { + _, span := trace.StartSpan(ctx, "chainxchg.ValidateRequest") + defer span.End() + + if len(req.Head) == 0 { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: "no cids in request", + } + } + + if req.Length == 0 { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: "invalid request length of zero", + } + } + + if req.Length > exchange.MaxRequestLength { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: fmt.Sprintf("request length over maximum allowed (%d)", + exchange.MaxRequestLength), + } + } + + opts := exchange.ParseOptions(req.Options) + if opts.IsEmpty() { + return nil, &exchange.Response{ + Status: exchange.BadRequest, + ErrorMessage: "no options set", + } + } + + // FIXME: Add as a defer at the start. + span.AddAttributes( + trace.BoolAttribute("blocks", opts.IncludeHeaders), + trace.BoolAttribute("messages", opts.IncludeMessages), + trace.Int64Attribute("reqlen", int64(req.Length)), + ) + + return &validatedRequest{ + head: chain.NewTipSetKey(req.Head...), + length: req.Length, + options: opts, + }, nil +} + +// Server implements exchange.Server. It services requests for the +// libp2p ChainExchange protocol. +type Server struct { + loader localstore.ChainLoader + h host.Host +} + +// NewServer creates a new libp2p-based exchange.Server. It services requests +// for the libp2p ChainExchange protocol. +func NewServer(loader localstore.ChainLoader, h host.Host) *Server { + return &Server{ + loader: loader, + h: h, + } +} + +// HandleStream implements Server.HandleStream. Refer to the godocs there. +func (s *Server) HandleStream(stream inet.Stream) { + ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream") + defer span.End() + + // Note: this will become just stream.Close once we've completed the go-libp2p migration to + // go-libp2p-core 0.7.0 + defer stream.Close() //nolint:errcheck + + slog := log.With("peer", stream.Conn().RemotePeer()) + ctx = logging.ContextWithLogger(ctx, slog) + + var req exchange.Request + if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil { + slog.Warnf("failed to read block sync request: %s", err) + return + } + + slog.Infow("block sync request", "start", req.Head, "len", req.Length) + + resp, err := s.processRequest(ctx, &req) + if err != nil { + slog.Warnf("failed to process request: %s", err) + return + } + + _ = stream.SetDeadline(time.Now().Add(WriteResDeadline)) + if err := cborutil.WriteCborRPC(stream, resp); err != nil { + _ = stream.SetDeadline(time.Time{}) + slog.Warnw("failed to write back response for handle stream", + "err", err) + return + } + _ = stream.SetDeadline(time.Time{}) +} + +// Validate and service the request. We return either a protocol +// response or an internal error. +func (s *Server) processRequest(ctx context.Context, req *exchange.Request) (*exchange.Response, error) { + validReq, errResponse := validateRequest(ctx, req) + if errResponse != nil { + // The request did not pass validation, return the response + // indicating it. + return errResponse, nil + } + + return s.serviceRequest(ctx, validReq) +} + +func (s *Server) serviceRequest(ctx context.Context, req *validatedRequest) (*exchange.Response, error) { + _, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest") + defer span.End() + + chain, err := collectChainSegment(ctx, s.loader, req) + if err != nil { + logging.LoggerFromContext(ctx, log).Warnf("block sync request: collectChainSegment failed: %s", err) + return &exchange.Response{ + Status: exchange.InternalError, + ErrorMessage: err.Error(), + }, nil + } + + status := exchange.Ok + if len(chain) < int(req.length) { + status = exchange.Partial + } + + return &exchange.Response{ + Chain: chain, + Status: status, + }, nil +} + +func collectChainSegment(ctx context.Context, loader localstore.ChainLoader, req *validatedRequest) ([]*exchange.BSTipSet, error) { + var bstips []*exchange.BSTipSet + + cur := req.head + for { + var bst exchange.BSTipSet + ts, err := loader.GetTipSet(ctx, cur) + if err != nil { + return nil, fmt.Errorf("failed loading tipset %s: %w", cur, err) + } + + if req.options.IncludeHeaders { + bst.Blocks = ts.Blocks() + } + + if req.options.IncludeMessages { + bst.Messages, err = gatherMessages(ctx, loader, ts) + if err != nil { + return nil, fmt.Errorf("gather messages failed: %w", err) + } + } + + bstips = append(bstips, &bst) + + // If we collected the length requested or if we reached the + // start (genesis), then stop. + if uint64(len(bstips)) >= req.length || ts.Height() == 0 { + return bstips, nil + } + + cur = ts.Parents() + } +} + +func gatherMessages(ctx context.Context, loader localstore.ChainLoader, ts *chain.TipSet) (*exchange.CompactedMessages, error) { + blsmsgmap := make(map[cid.Cid]uint64) + secpkmsgmap := make(map[cid.Cid]uint64) + var secpkincl, blsincl [][]uint64 + + var blscids, secpkcids []cid.Cid + for _, block := range ts.Blocks() { + bc, sc, err := loader.ReadMsgMetaCids(ctx, block.Messages) + if err != nil { + return nil, err + } + + // FIXME: DRY. Use `chain.Message` interface. + bmi := make([]uint64, 0, len(bc)) + for _, m := range bc { + i, ok := blsmsgmap[m] + if !ok { + i = uint64(len(blscids)) + blscids = append(blscids, m) + blsmsgmap[m] = i + } + + bmi = append(bmi, i) + } + blsincl = append(blsincl, bmi) + + smi := make([]uint64, 0, len(sc)) + for _, m := range sc { + i, ok := secpkmsgmap[m] + if !ok { + i = uint64(len(secpkcids)) + secpkcids = append(secpkcids, m) + secpkmsgmap[m] = i + } + + smi = append(smi, i) + } + secpkincl = append(secpkincl, smi) + } + + blsmsgs, err := loader.LoadMessagesFromCids(ctx, blscids) + if err != nil { + return nil, err + } + + secpkmsgs, err := loader.LoadSignedMessagesFromCids(ctx, secpkcids) + if err != nil { + return nil, err + } + + return &exchange.CompactedMessages{ + Bls: blsmsgs, + BlsIncludes: blsincl, + Secpk: secpkmsgs, + SecpkIncludes: secpkincl, + }, nil +} diff --git a/venus-devtool/api-gen/client.go b/venus-devtool/api-gen/client.go new file mode 100644 index 0000000000..f1d9b7d86a --- /dev/null +++ b/venus-devtool/api-gen/client.go @@ -0,0 +1,133 @@ +package main + +import ( + "bytes" + "fmt" + "log" + "text/template" + + "github.com/filecoin-project/venus/venus-devtool/util" + "github.com/urfave/cli/v2" +) + +var clientCmd = &cli.Command{ + Name: "client", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + for _, target := range apiTargets { + err := genClientForAPI(target) + if err != nil { + log.Fatalf("got error while generating client codes for %s: %s", target.Type, err) + } + } + return nil + }, +} + +const clientGenTemplate = ` +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package {{ .PkgName }} + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = {{ .MajorVersion }} +const APINamespace = "{{ .APINs }}" +const MethodNamespace = "{{ .MethNs }}" + +// New{{ .APIName }}RPC creates a new httpparse jsonrpc remotecli. +func New{{ .APIName }}RPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) ({{ .APIName }}, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res {{ .APIStruct }} + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// Dial{{ .APIName }}RPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func Dial{{ .APIName }}RPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) ({{ .APIName }}, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res {{ .APIStruct }} + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} +` + +func genClientForAPI(t util.APIMeta) error { + ifaceMetas, astMeta, err := util.ParseInterfaceMetas(t.ParseOpt) + if err != nil { + return err + } + + apiName := t.Type.Name() + + var apiIface *util.InterfaceMeta + for i := range ifaceMetas { + if ifaceMetas[i].Name == apiName { + apiIface = ifaceMetas[i] + break + } + } + + if apiIface == nil { + return fmt.Errorf("api %s not found", apiName) + } + + tmpl, err := template.New("client").Parse(clientGenTemplate) + if err != nil { + return fmt.Errorf("parse template: %w", err) + } + + ns := t.RPCMeta.Namespace + if ns == "" { + ns = fmt.Sprintf("%s.%s", apiIface.Pkg.Name, apiIface.Name) + } + + methNs := t.RPCMeta.MethodNamespace + if methNs == "" { + methNs = "Filecoin" + } + + var buf bytes.Buffer + err = tmpl.Execute(&buf, map[string]interface{}{ + "PkgName": apiIface.Pkg.Name, + "APIName": apiName, + "APIStruct": structName(apiName), + "APINs": ns, + "MethNs": methNs, + "MajorVersion": t.RPCMeta.Version, + }) + if err != nil { + return fmt.Errorf("exec template: %w", err) + } + + return outputSourceFile(astMeta.Location, "client_gen.go", &buf) +} diff --git a/venus-devtool/api-gen/common.go b/venus-devtool/api-gen/common.go new file mode 100644 index 0000000000..f6dac72c65 --- /dev/null +++ b/venus-devtool/api-gen/common.go @@ -0,0 +1,124 @@ +package main + +import ( + "bytes" + "fmt" + "go/format" + "os" + "path/filepath" + "reflect" + + "github.com/filecoin-project/venus/venus-devtool/util" + gatewayv0 "github.com/filecoin-project/venus/venus-shared/api/gateway/v0" + gatewayv1 "github.com/filecoin-project/venus/venus-shared/api/gateway/v1" + "github.com/filecoin-project/venus/venus-shared/api/market" + market_client "github.com/filecoin-project/venus/venus-shared/api/market/client" + "github.com/filecoin-project/venus/venus-shared/api/messager" + "github.com/filecoin-project/venus/venus-shared/api/wallet" +) + +func init() { + for _, capi := range util.ChainAPIPairs { + apiTargets = append(apiTargets, capi.Venus) + } + + apiTargets = append(apiTargets, + util.APIMeta{ + Type: reflect.TypeOf((*messager.IMessager)(nil)).Elem(), + ParseOpt: util.InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/messager", + IncludeAll: true, + }, + RPCMeta: util.RPCMeta{ + Version: 0, + MethodNamespace: "Message", + }, + }, + util.APIMeta{ + Type: reflect.TypeOf((*wallet.IFullAPI)(nil)).Elem(), + ParseOpt: util.InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/wallet", + IncludeAll: true, + }, + RPCMeta: util.RPCMeta{ + Version: 0, + }, + }, + util.APIMeta{ + Type: reflect.TypeOf((*gatewayv1.IGateway)(nil)).Elem(), + ParseOpt: util.InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/gateway/v2", + IncludeAll: true, + }, + RPCMeta: util.RPCMeta{ + Version: 2, + MethodNamespace: "Gateway", + }, + }, + util.APIMeta{ + Type: reflect.TypeOf((*gatewayv1.IGateway)(nil)).Elem(), + ParseOpt: util.InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/gateway/v1", + IncludeAll: true, + }, + RPCMeta: util.RPCMeta{ + Version: 1, + MethodNamespace: "Gateway", + }, + }, + util.APIMeta{ + Type: reflect.TypeOf((*gatewayv0.IGateway)(nil)).Elem(), + ParseOpt: util.InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/gateway/v0", + IncludeAll: true, + }, + RPCMeta: util.RPCMeta{ + Version: 0, + MethodNamespace: "Gateway", + }, + }, + util.APIMeta{ + Type: reflect.TypeOf((*market.IMarket)(nil)).Elem(), + ParseOpt: util.InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/market", + IncludeAll: true, + }, + RPCMeta: util.RPCMeta{ + Version: 0, + MethodNamespace: "VENUS_MARKET", + }, + }, + util.APIMeta{ + Type: reflect.TypeOf((*market_client.IMarketClient)(nil)).Elem(), + ParseOpt: util.InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/market/client", + IncludeAll: true, + }, + RPCMeta: util.RPCMeta{ + Version: 0, + MethodNamespace: "VENUS_MARKET_CLIENT", + }, + }, + ) +} + +var apiTargets []util.APIMeta + +func structName(ifaceName string) string { + return ifaceName + "Struct" +} + +func outputSourceFile(location, fname string, buf *bytes.Buffer) error { + formatted, err := format.Source(buf.Bytes()) + if err != nil { + return fmt.Errorf("format source content: %w", err) + } + + outputFile := filepath.Join(location, fname) + err = os.WriteFile(outputFile, formatted, 0o644) + if err != nil { + return fmt.Errorf("write to output %s: %w", outputFile, err) + } + + return nil +} diff --git a/venus-devtool/api-gen/doc_gen.go b/venus-devtool/api-gen/doc_gen.go new file mode 100644 index 0000000000..d7acde2055 --- /dev/null +++ b/venus-devtool/api-gen/doc_gen.go @@ -0,0 +1,193 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/ast" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/venus-devtool/util" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/urfave/cli/v2" +) + +var ctxElem = reflect.TypeOf((*context.Context)(nil)).Elem() + +var docGenCmd = &cli.Command{ + Name: "doc", + Action: func(cctx *cli.Context) error { + if err := util.LoadExtraInterfaceMeta(); err != nil { + return err + } + for _, t := range apiTargets { + if err := genDocForAPI(t); err != nil { + return err + } + } + + return nil + }, +} + +type MethodGroup struct { + GroupName string + Methods []*Method +} + +type Method struct { + Name string + Comment string + Perm string + InputExample string + ResponseExample string +} + +func genDocForAPI(t util.APIMeta) error { + opt := t.ParseOpt + opt.ResolveImports = true + ifaceMetas, astMeta, err := util.ParseInterfaceMetas(opt) + if err != nil { + return err + } + + groups := make([]MethodGroup, 0, len(ifaceMetas)) + for _, im := range ifaceMetas { + mg := MethodGroup{GroupName: simpleGroupName(im.Name), Methods: make([]*Method, 0, len(im.Defined))} + for _, mm := range im.Defined { + method, ok := t.Type.MethodByName(mm.Name) + if !ok { + fmt.Println("not found method: ", mm.Name) + continue + } + in, out, err := fillExampleValue(method) + if err != nil { + return err + } + + m := &Method{ + Comment: getComment(mm.Comments), + Name: mm.Name, + InputExample: string(in), + ResponseExample: string(out), + Perm: util.GetAPIMethodPerm(mm), + } + mg.Methods = append(mg.Methods, m) + } + if len(mg.Methods) == 0 { + continue + } + groups = append(groups, mg) + } + + return writeAPIInfo(astMeta, groups) +} + +func simpleGroupName(groupName string) string { + // `IBlockStore` ==> `BlockStore` + // `IJwtAuthAPI` ==> `JwtAuth` + if len(groupName) > 0 && groupName[0] == 'I' { + groupName = groupName[1:] + } + groupName = strings.Replace(groupName, "API", "", 1) + + return groupName +} + +func fillExampleValue(m reflect.Method) ([]byte, []byte, error) { + ft := m.Type + + in := make([]interface{}, 0, ft.NumIn()) + for i := 0; i < ft.NumIn(); i++ { + if ft.In(i).Implements(ctxElem) { + continue + } + inp := ft.In(i) + in = append(in, ExampleValue(m.Name, inp, nil)) + } + + inVal, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, nil, err + } + + out := ExampleValue(m.Name, ft.Out(0), nil) + if out == nil { + return nil, nil, fmt.Errorf("ExampleValue for %s get nil", ft.Out(0).String()) + } + // json: unsupported type: map[address.Address]*types.Actor, so return {} + if _, ok := out.(map[address.Address]*types.Actor); ok { + return inVal, []byte{'{', '}'}, nil + } + + outVal, err := json.MarshalIndent(out, "", " ") + if err != nil { + return nil, nil, err + } + + return inVal, outVal, nil +} + +func getComment(comments []*ast.CommentGroup) string { + // skip permission comment + if len(comments) == 1 { + return "" + } + cmt := "" + for _, c := range comments[0].List { + cmt += strings.TrimSpace(strings.Replace(c.Text, "//", "", 1)) + "\n" + } + cmt = strings.Replace(cmt, "<", "\\<", -1) + return cmt +} + +func writeAPIInfo(astMeta *util.ASTMeta, groups []MethodGroup) error { + buf := &bytes.Buffer{} + fmt.Fprint(buf, "# Groups\n\n") + + sort.Slice(groups, func(i, j int) bool { + return groups[i].GroupName < groups[j].GroupName + }) + for _, g := range groups { + sort.Slice(g.Methods, func(i, j int) bool { + return g.Methods[i].Name < g.Methods[j].Name + }) + + fmt.Fprintf(buf, "* [%s](#%s)\n", g.GroupName, strings.ToLower(g.GroupName)) + for _, method := range g.Methods { + fmt.Fprintf(buf, " * [%s](#%s)\n", method.Name, strings.ToLower(method.Name)) + } + } + + fmt.Fprintf(buf, "\n") + for _, g := range groups { + fmt.Fprintf(buf, "## %s\n\n", g.GroupName) + + for _, m := range g.Methods { + fmt.Fprintf(buf, "### %s\n", m.Name) + fmt.Fprintf(buf, "%s\n\n", m.Comment) + + fmt.Fprintf(buf, "Perms: %s\n\n", m.Perm) + + if strings.Count(m.InputExample, "\n") > 0 { + fmt.Fprintf(buf, "Inputs:\n```json\n%s\n```\n\n", m.InputExample) + } else { + fmt.Fprintf(buf, "Inputs: `%s`\n\n", m.InputExample) + } + + if strings.Count(m.ResponseExample, "\n") > 0 { + fmt.Fprintf(buf, "Response:\n```json\n%s\n```\n\n", m.ResponseExample) + } else { + fmt.Fprintf(buf, "Response: `%s`\n\n", m.ResponseExample) + } + } + } + + return os.WriteFile(filepath.Join(astMeta.Location, "method.md"), buf.Bytes(), 0o644) +} diff --git a/venus-devtool/api-gen/example.go b/venus-devtool/api-gen/example.go new file mode 100644 index 0000000000..e5491d3fa3 --- /dev/null +++ b/venus-devtool/api-gen/example.go @@ -0,0 +1,352 @@ +package main + +import ( + "encoding/json" + "fmt" + "go/token" + "os" + "reflect" + "strings" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/venus/venus-shared/types/market" + auuid "github.com/google/uuid" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync" + textselector "github.com/ipld/go-ipld-selector-text-lite" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/venus/pkg/constants" + "github.com/filecoin-project/venus/venus-shared/api/chain" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/market/client" + "github.com/filecoin-project/venus/venus-shared/types/messager" + "github.com/filecoin-project/venus/venus-shared/types/wallet" +) + +var ExampleValues = map[reflect.Type]interface{}{ + reflect.TypeOf(auth.Permission("")): auth.Permission("write"), + reflect.TypeOf(""): "string value", + reflect.TypeOf(market.PieceStatus("")): market.Undefine, + reflect.TypeOf(uint64(42)): uint64(42), + reflect.TypeOf(uint(42)): uint(42), + reflect.TypeOf(byte(7)): byte(7), + reflect.TypeOf([]byte{}): []byte("byte array"), +} + +func addExample(v interface{}) { + ExampleValues[reflect.TypeOf(v)] = v +} + +func init() { + c, err := cid.Decode("bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4") + if err != nil { + panic(err) + } + + ExampleValues[reflect.TypeOf(c)] = c + + c2, err := cid.Decode("bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve") + if err != nil { + panic(err) + } + + tsk := types.NewTipSetKey(c, c2) + + ExampleValues[reflect.TypeOf(tsk)] = tsk + + addr, err := address.NewIDAddress(1234) + if err != nil { + panic(err) + } + + ExampleValues[reflect.TypeOf(addr)] = addr + + pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf") + if err != nil { + panic(err) + } + addExample(pid) + addExample(&pid) + uuid, err := types.ParseUUID("e26f1e5c-47f7-4561-a11d-18fab6e748af") + if err != nil { + panic(err) + } + addExample(constants.TestNetworkVersion) + allocationID := verifreg.AllocationId(0) + addExample(allocationID) + addExample(&allocationID) + addExample(map[verifreg.AllocationId]verifreg.Allocation{}) + claimID := verifreg.ClaimId(0) + addExample(claimID) + addExample(&claimID) + addExample(map[verifreg.ClaimId]verifreg.Claim{}) + textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash") + clientEvent := retrievalmarket.ClientEventDealAccepted + addExample(bitfield.NewFromSet([]uint64{5})) + addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1) + addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1) + addExample(abi.ChainEpoch(10101)) + addExample(crypto.SigTypeBLS) + addExample(types.KTBLS) + addExample(types.MTChainMsg) + addExample(int64(9)) + addExample(12.3) + addExample(123) + addExample(uintptr(0)) + addExample(abi.MethodNum(1)) + addExample(exitcode.ExitCode(0)) + addExample(crypto.DomainSeparationTag_ElectionProofProduction) + addExample(true) + addExample(abi.UnpaddedPieceSize(1024)) + addExample(abi.UnpaddedPieceSize(1024).Padded()) + addExample(abi.DealID(5432)) + addExample(abi.SectorNumber(9)) + addExample(abi.SectorSize(32 * 1024 * 1024 * 1024)) + addExample(types.MpoolChange(0)) + addExample(network.Connected) + addExample(types.NetworkName("mainnet")) + addExample(types.SyncStateStage(1)) + addExample(chain.FullAPIVersion1) + addExample(types.PCHInbound) + addExample(time.Minute) + reqIDBytes, err := uuid.MarshalBinary() + if err != nil { + panic(err) + } + reqID, err := graphsync.ParseRequestID(reqIDBytes) + if err != nil { + panic(err) + } + block := blocks.Block(&blocks.BasicBlock{}) + ExampleValues[reflect.TypeOf(&block).Elem()] = block + addExample(reqID) + addExample(datatransfer.TransferID(3)) + addExample(datatransfer.Ongoing) + addExample(clientEvent) + addExample(&clientEvent) + addExample(retrievalmarket.ClientEventDealAccepted) + addExample(retrievalmarket.DealStatusNew) + addExample(&textSelExample) + addExample(network.ReachabilityPublic) + addExample(map[string]int{"name": 42}) + addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()}) + addExample(map[string]cid.Cid{}) + addExample(&types.ExecutionTrace{ + Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message), + MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), + }) + addExample(map[string]types.Actor{ + "t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor), + }) + addExample(json.RawMessage(`"json raw message"`)) + addExample(map[address.Address]*types.Actor{ + addr: { + Code: c, + Head: c2, + Nonce: 10, + Balance: abi.NewTokenAmount(100), + }, + }) + addExample(map[string]*types.MarketDeal{ + "t026363": ExampleValue("init", reflect.TypeOf(&types.MarketDeal{}), nil).(*types.MarketDeal), + }) + addExample(map[string]types.MarketBalance{ + "t026363": ExampleValue("init", reflect.TypeOf(types.MarketBalance{}), nil).(types.MarketBalance), + }) + addExample([]*types.EstimateMessage{ + { + Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message), + Spec: ExampleValue("init", reflect.TypeOf(&types.MessageSendSpec{}), nil).(*types.MessageSendSpec), + }, + }) + addExample(map[string]*pubsub.TopicScoreSnapshot{ + "/blocks": { + TimeInMesh: time.Minute, + FirstMessageDeliveries: 122, + MeshMessageDeliveries: 1234, + InvalidMessageDeliveries: 3, + }, + }) + addExample(map[string]metrics.Stats{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + RateIn: 100, + RateOut: 50, + TotalIn: 174000, + TotalOut: 12500, + }, + }) + addExample(map[protocol.ID]metrics.Stats{ + "/fil/hello/1.0.0": { + RateIn: 100, + RateOut: 50, + TotalIn: 174000, + TotalOut: 12500, + }, + }) + maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior") + if err != nil { + panic(err) + } + // because reflect.TypeOf(maddr) returns the concrete type... + ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr + si := uint64(12) + addExample(&si) + addExample(retrievalmarket.DealID(5)) + addExample(abi.ActorID(1000)) + addExample(map[abi.SectorNumber]string{ + 123: "can't acquire read lock", + }) + addExample([]abi.SectorNumber{123, 124}) + addExample(types.CheckStatusCode(0)) + addExample(map[string]interface{}{"abc": 123}) + addExample(types.HCApply) + + // messager + i64 := int64(10000) + addExample(uuid) + addExample(messager.OnChainMsg) + addExample(messager.AddressStateAlive) + addExample(&i64) + addExample(ExampleValue("init", reflect.TypeOf(&messager.Address{}), nil).(*messager.Address)) + addExample(&messager.Node{ + ID: uuid, + Name: "venus", + URL: "/ip4/127.0.0.1/tcp/3453", + Token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0._eHBJJAiBzQmfcbD_vVmtTrkgyJQ-LOgGOiHfb8rU1I", + Type: messager.LightNode, + }) + addExample(&messager.Message{ + ID: uuid.String(), + UnsignedCid: &c, + SignedCid: &c, + Message: ExampleValue("init", reflect.TypeOf(types.Message{}), nil).(types.Message), + Signature: ExampleValue("init", reflect.TypeOf(&crypto.Signature{}), nil).(*crypto.Signature), + Height: 100, + Confidence: 10, + Receipt: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), + TipSetKey: tsk, + Meta: ExampleValue("init", reflect.TypeOf(&messager.SendSpec{}), nil).(*messager.SendSpec), + WalletName: "test", + State: messager.UnFillMsg, + }) + addExample(ExampleValue("init", reflect.TypeOf(&messager.SendSpec{}), nil).(*messager.SendSpec)) + addExample(messager.QuickSendParamsCodecJSON) + + // wallet + addExample(wallet.MEChainMsg) + + // used in gateway + addExample(types.PaddedByteIndex(10)) + + // used in market + addExample(filestore.Path("/some/path")) + + clientDataSelector := client.DataSelector("/ipld/a/b/c") + addExample(clientDataSelector) + addExample(&clientDataSelector) + + addExample(client.ImportID(1234)) + + uuidTmp := auuid.MustParse("102334ec-35a3-4b36-be9f-02883844503a") + addExample(&uuidTmp) +} + +func ExampleValue(method string, t, parent reflect.Type) interface{} { + v, ok := ExampleValues[t] + if ok { + return v + } + + switch t.Kind() { + case reflect.Slice: + out := reflect.New(t).Elem() + out = reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t))) + return out.Interface() + case reflect.Chan: + return ExampleValue(method, t.Elem(), nil) + case reflect.Struct: + es := exampleStruct(method, t, parent) + v := reflect.ValueOf(es).Elem().Interface() + ExampleValues[t] = v + return v + case reflect.Array: + out := reflect.New(t).Elem() + for i := 0; i < t.Len(); i++ { + out.Index(i).Set(reflect.ValueOf(ExampleValue(method, t.Elem(), t))) + } + return out.Interface() + case reflect.Map: + out := reflect.MakeMap(t) + out.SetMapIndex(reflect.ValueOf(ExampleValue(method, t.Key(), parent)), reflect.ValueOf(ExampleValue(method, t.Elem(), parent))) + return out.Interface() + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct { + es := exampleStruct(method, t.Elem(), t) + // ExampleValues[t] = es + return es + } + + case reflect.Interface: + if t.Implements(reflect.TypeOf((*error)(nil)).Elem()) { + return fmt.Errorf("empty error") + } + return struct{}{} + } + + _, _ = fmt.Fprintf(os.Stderr, "Warnning: No example value for type: %s (method '%s')\n", t, method) + return nil +} + +func exampleStruct(method string, t, parent reflect.Type) interface{} { + ns := reflect.New(t) + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if shouldIgnoreField(f, parent) { + continue + } + + if strings.Title(f.Name) == f.Name { + ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t))) + } + } + + return ns.Interface() +} + +func shouldIgnoreField(f reflect.StructField, parentType reflect.Type) bool { + if f.Type == parentType { + return true + } + + if len(f.Name) == 0 { + return true + } + + if !token.IsExported(f.Name) { + return true + } + + jtag := f.Tag.Get("json") + if len(jtag) == 0 { + return false + } + + return strings.Split(jtag, ",")[0] == "-" +} diff --git a/venus-devtool/api-gen/main.go b/venus-devtool/api-gen/main.go new file mode 100644 index 0000000000..642ef7d16e --- /dev/null +++ b/venus-devtool/api-gen/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli/v2" +) + +func main() { + app := &cli.App{ + Name: "api-gen", + Usage: "generate api related codes for venus-shared", + EnableBashCompletion: true, + Flags: []cli.Flag{}, + Commands: []*cli.Command{ + proxyCmd, + clientCmd, + docGenCmd, + mockCmd, + }, + } + + app.Setup() + + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "ERR: %v\n", err) // nolint: errcheck + } +} diff --git a/venus-devtool/api-gen/mock.go b/venus-devtool/api-gen/mock.go new file mode 100644 index 0000000000..8592631da4 --- /dev/null +++ b/venus-devtool/api-gen/mock.go @@ -0,0 +1,38 @@ +package main + +import ( + "os/exec" + "path/filepath" + "strings" + + "github.com/filecoin-project/venus/venus-devtool/util" + "github.com/urfave/cli/v2" +) + +var mockCmd = &cli.Command{ + Name: "mock", + Action: func(cctx *cli.Context) error { + for _, t := range apiTargets { + if err := mockAPI(t); err != nil { + return err + } + } + + return nil + }, +} + +func mockAPI(t util.APIMeta) error { + opt := t.ParseOpt + opt.ResolveImports = true + _, astMeta, err := util.ParseInterfaceMetas(opt) + if err != nil { + return err + } + + dest := filepath.Join(astMeta.Location, "mock/mock_"+strings.ToLower(t.Type.Name())+".go") + cmd := exec.Command("go", "run", "github.com/golang/mock/mockgen", "-destination", dest, + "-package", "mock", t.ParseOpt.ImportPath, t.Type.Name()) + + return cmd.Run() +} diff --git a/venus-devtool/api-gen/proxy.go b/venus-devtool/api-gen/proxy.go new file mode 100644 index 0000000000..d404dec9ee --- /dev/null +++ b/venus-devtool/api-gen/proxy.go @@ -0,0 +1,349 @@ +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "io" + "log" + "path/filepath" + "strings" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/venus/venus-devtool/util" +) + +var proxyCmd = &cli.Command{ + Name: "proxy", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + if err := util.LoadExtraInterfaceMeta(); err != nil { + return err + } + for _, target := range apiTargets { + err := genProxyForAPI(target) + if err != nil { + log.Fatalf("got error while generating proxy codes for %s: %s", target.Type, err) + } + } + return nil + }, +} + +func genProxyForAPI(t util.APIMeta) error { + opt := t.ParseOpt + opt.ResolveImports = true + ifaceMetas, astMeta, err := util.ParseInterfaceMetas(opt) + if err != nil { + return err + } + + ifaceMap := map[string]*util.InterfaceMeta{} + done := map[string]struct{}{} + deps := map[string]util.ImportMeta{} + + for i := range ifaceMetas { + ifaceMeta := ifaceMetas[i] + ifaceMap[ifaceMeta.Name] = ifaceMeta + } + + apiName := t.Type.Name() + api, has := ifaceMap[apiName] + if !has { + return fmt.Errorf("api %s not found", apiName) + } + + var contentBuffer bytes.Buffer + + err = writeForInterface(apiName, astMeta, ifaceMap, deps, done, &contentBuffer) + if err != nil { + return fmt.Errorf("gen for api interface %s: %w", apiName, err) + } + + var fileBuffer bytes.Buffer + + fmt.Fprintf(&fileBuffer, "// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT.\npackage %s\n\n", api.Pkg.Name) + + err = writeImports(deps, &fileBuffer) + if err != nil { + return fmt.Errorf("gen imports: %w", err) + } + + _, err = io.Copy(&fileBuffer, &contentBuffer) + if err != nil { + return fmt.Errorf("copy contents into output: %w", err) + } + + return outputSourceFile(astMeta.Location, "proxy_gen.go", &fileBuffer) +} + +func writeImports(deps map[string]util.ImportMeta, dst *bytes.Buffer) error { + imports := [3][][2]string{} + + for iname, imeta := range deps { + alias := "" + base := filepath.Base(imeta.Path) + if iname != base { + alias = iname + } + + imp := [2]string{alias, imeta.Path} + idx := 0 + if !imeta.IsStd { + idx++ + if strings.HasPrefix(imeta.Path, "github.com/filecoin-project/venus/") { + idx++ + } + } + + imports[idx] = append(imports[idx], imp) + + } + + fmt.Fprintln(dst, "import (") + for _, impGroup := range imports { + for _, imp := range impGroup { + _, err := fmt.Fprintf(dst, "\t%s \"%s\"\n", imp[0], imp[1]) + if err != nil { + return fmt.Errorf("write import for %#v: %w", imp, err) + } + } + + fmt.Fprintln(dst, "") + } + + fmt.Fprintln(dst, ")") + return nil +} + +func writeForInterface(name string, astMeta *util.ASTMeta, ifaces map[string]*util.InterfaceMeta, deps map[string]util.ImportMeta, done map[string]struct{}, dst *bytes.Buffer) error { + if _, has := done[name]; has { + return nil + } + + iface, has := ifaces[name] + if !has { + return fmt.Errorf("%s not found", name) + } + + for _, nested := range iface.Nested { + if err := writeForInterface(nested, astMeta, ifaces, deps, done, dst); err != nil { + return fmt.Errorf("gen nested interface for %s: %w", name, err) + } + } + + if err := writeStruct(dst, iface, astMeta); err != nil { + return fmt.Errorf("gen struct for %s: %w", name, err) + } + + if err := writeMethods(dst, iface, astMeta, deps); err != nil { + return fmt.Errorf("gen methods for %s: %w", name, err) + } + + done[name] = struct{}{} + return nil +} + +const ( + structHeadFormat = ` +type %s struct { +` + + structTail = ` +} + +` + + structInternalHead = ` + Internal struct { +` + + structInternalTail = ` + } +` +) + +func writeStruct(dst *bytes.Buffer, ifaceMeta *util.InterfaceMeta, astMeta *util.ASTMeta) error { + fmt.Fprintf(dst, structHeadFormat, structName(ifaceMeta.Name)) + + for _, nested := range ifaceMeta.Nested { + fmt.Fprintf(dst, "\t%s\n", structName(nested)) + } + + tmpBuf := &bytes.Buffer{} + if len(ifaceMeta.Defined) > 0 { + fmt.Fprint(dst, structInternalHead) + + for _, meth := range ifaceMeta.Defined { + fmt.Fprintf(dst, "\t\t%s ", meth.Name) + + err := printer.Fprint(tmpBuf, astMeta.FileSet, meth.FuncType) + if err != nil { + return fmt.Errorf("write func %s: %w", meth.Name, err) + } + + dst.WriteString(strings.ReplaceAll(tmpBuf.String(), "\n\t", "")) + tmpBuf.Reset() + + fmt.Fprintf(dst, " `perm:\"%s\"`\n", util.GetAPIMethodPerm(meth)) + } + + fmt.Fprint(dst, structInternalTail) + } + + fmt.Fprint(dst, structTail) + return nil +} + +func writeMethods(dst *bytes.Buffer, ifaceMeta *util.InterfaceMeta, astMeta *util.ASTMeta, deps map[string]util.ImportMeta) error { + var typBuf bytes.Buffer + for _, meth := range ifaceMeta.Defined { + err := writeMethodBody(dst, &typBuf, ifaceMeta, meth, astMeta, deps) + if err != nil { + return fmt.Errorf("write method for %s.%s: %w", ifaceMeta.Name, meth.Name, err) + } + } + fmt.Fprintln(dst, "") + return nil +} + +func resolveDep(typ ast.Expr, ifaceMeta *util.InterfaceMeta, deps map[string]util.ImportMeta) error { + var selector *ast.SelectorExpr + + switch t := typ.(type) { + case *ast.Ident: + return nil + + case *ast.SelectorExpr: + selector = t + + case *ast.MapType: + if err := resolveDep(t.Key, ifaceMeta, deps); err != nil { + return fmt.Errorf("resolve key dep for type %T: %w", typ, err) + } + + if err := resolveDep(t.Value, ifaceMeta, deps); err != nil { + return fmt.Errorf("resolve value dep for type %T: %w", typ, err) + } + + return nil + + case *ast.ArrayType: + if err := resolveDep(t.Elt, ifaceMeta, deps); err != nil { + return fmt.Errorf("resolve element dep for type %T: %w", typ, err) + } + + return nil + + case *ast.ChanType: + if err := resolveDep(t.Value, ifaceMeta, deps); err != nil { + return fmt.Errorf("resolve chan value dep for type %T: %w", typ, err) + } + + return nil + + case *ast.StarExpr: + if err := resolveDep(t.X, ifaceMeta, deps); err != nil { + return fmt.Errorf("resolve ptr dep for type %T: %w", typ, err) + } + + return nil + + case *ast.InterfaceType: + + return nil + + default: + return fmt.Errorf("found unexpected type: %T", typ) + } + + if selector == nil { + return fmt.Errorf("should be a *ast.SelectorExpr, found %T", typ) + } + + selector, ok := typ.(*ast.SelectorExpr) + if !ok { + return nil + } + + xident, ok := selector.X.(*ast.Ident) + if !ok || xident.Name == "" { + return nil + } + + importMeta, has := ifaceMeta.File.Imports[xident.Name] + if !has { + return fmt.Errorf("package for selector %s not found in file %s", xident.Name, ifaceMeta.File.Name) + } + + prev, has := deps[xident.Name] + if !has { + deps[xident.Name] = importMeta + return nil + } + + if prev.Path != importMeta.Path { + return fmt.Errorf("found duplicate package name %s for %s and %s", xident.Name, prev.Path, importMeta.Path) + } + + return nil +} + +func writeMethodBody(dst *bytes.Buffer, typBuf *bytes.Buffer, ifaceMeta *util.InterfaceMeta, methMeta util.InterfaceMethodMeta, astMeta *util.ASTMeta, deps map[string]util.ImportMeta) error { + paramNum := 0 + callNames := []string{} + params := []string{} + for pi, paramList := range methMeta.FuncType.Params.List { + if err := resolveDep(paramList.Type, ifaceMeta, deps); err != nil { + return fmt.Errorf("resolve dep for #%d param: %w", pi, err) + } + + typBuf.Reset() + err := printer.Fprint(typBuf, astMeta.FileSet, paramList.Type) + if err != nil { + return fmt.Errorf("write #%d param type: %w", pi, err) + } + + nameCount := len(paramList.Names) + if nameCount == 0 { + nameCount = 1 + } + + names := make([]string, nameCount) + for i := range names { + names[i] = fmt.Sprintf("p%d", paramNum) + paramNum++ + } + + callNames = append(callNames, names...) + params = append(params, strings.Join(names, ", ")+" "+strings.ReplaceAll(typBuf.String(), "\n\t", "")) + } + + results := []string{} + for ri, resultList := range methMeta.FuncType.Results.List { + if err := resolveDep(resultList.Type, ifaceMeta, deps); err != nil { + return fmt.Errorf("resolve dep for #%d result: %w", ri, err) + } + + typBuf.Reset() + err := printer.Fprint(typBuf, astMeta.FileSet, resultList.Type) + if err != nil { + return fmt.Errorf("write #%d result type: %w", ri, err) + } + + count := len(resultList.Names) + if count == 0 { + count = 1 + } + + for i := 0; i < count; i++ { + results = append(results, strings.ReplaceAll(typBuf.String(), "\n\t", "")) + } + } + + sname := structName(ifaceMeta.Name) + fmt.Fprintf(dst, "func(s *%s) %s(%s) (%s) { return s.Internal.%s(%s) }\n", sname, methMeta.Name, strings.Join(params, ", "), strings.Join(results, ", "), methMeta.Name, strings.Join(callNames, ", ")) + return nil +} diff --git a/venus-devtool/bundle-gen/main.go b/venus-devtool/bundle-gen/main.go new file mode 100644 index 0000000000..8742f8cdb6 --- /dev/null +++ b/venus-devtool/bundle-gen/main.go @@ -0,0 +1,77 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "text/template" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/venus/venus-devtool/util" + "github.com/filecoin-project/venus/venus-shared/actors" +) + +func main() { + app := &cli.App{ + Name: "bundle-gen", + Usage: "generate builtin actors for venus-shared", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "dst"}, + }, + Action: func(ctx *cli.Context) error { + metadata, err := actors.ReadEmbeddedBuiltinActorsMetadata() + if err != nil { + return err + } + + buf := &bytes.Buffer{} + if err := tmpl.Execute(buf, metadata); err != nil { + return err + } + + formatted, err := util.FmtFile("", buf.Bytes()) + if err != nil { + return err + } + + return os.WriteFile(ctx.String("dst"), formatted, 0o744) + }, + } + + app.Setup() + + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "ERR: %v\n", err) // nolint: errcheck + } +} + +var tmpl *template.Template = template.Must(template.New("actor-metadata").Parse(` +// WARNING: This file has automatically been generated +package actors +import ( + "github.com/ipfs/go-cid" +) +var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{ +{{- range . }} { + Network: {{printf "%q" .Network}}, + Version: {{.Version}}, + ManifestCid: mustParseCid({{printf "%q" .ManifestCid}}), + Actors: map[string]cid.Cid { + {{- range $name, $cid := .Actors }} + {{printf "%q" $name}}: mustParseCid({{printf "%q" $cid}}), + {{- end }} + }, +}, +{{- end -}} +} + +func mustParseCid(c string) cid.Cid { + ret, err := cid.Decode(c) + if err != nil { + panic(err) + } + + return ret +} +`)) diff --git a/venus-devtool/cborgen/main.go b/venus-devtool/cborgen/main.go new file mode 100644 index 0000000000..eb07e69851 --- /dev/null +++ b/venus-devtool/cborgen/main.go @@ -0,0 +1,194 @@ +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/filecoin-project/venus/pkg/chain" + "github.com/filecoin-project/venus/pkg/fvm" + market1 "github.com/filecoin-project/venus/pkg/market" + "github.com/filecoin-project/venus/pkg/net/helloprotocol" + "github.com/filecoin-project/venus/pkg/state/tree" + "github.com/filecoin-project/venus/pkg/vm/dispatch" + "github.com/filecoin-project/venus/venus-devtool/util" + "github.com/filecoin-project/venus/venus-shared/blockstore" + "github.com/filecoin-project/venus/venus-shared/libp2p/exchange" + "github.com/filecoin-project/venus/venus-shared/libp2p/hello" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/market" + + gen "github.com/whyrusleeping/cbor-gen" +) + +type genTarget struct { + dir string + pkg string + types []interface{} +} + +func main() { + targets := []genTarget{ + { + dir: "../venus-shared/libp2p/hello/", + types: []interface{}{ + hello.GreetingMessage{}, + hello.LatencyMessage{}, + }, + }, + { + dir: "../venus-shared/libp2p/exchange/", + types: []interface{}{ + exchange.Request{}, + exchange.Response{}, + exchange.CompactedMessages{}, + exchange.BSTipSet{}, + }, + }, + { + dir: "../venus-shared/types/", + types: []interface{}{ + types.BlockHeader{}, + types.Ticket{}, + types.ElectionProof{}, + types.BeaconEntry{}, + types.SignedMessage{}, + types.MessageRoot{}, + types.MessageReceipt{}, + types.BlockMsg{}, + types.ExpTipSet{}, + types.PaymentInfo{}, + }, + }, + { + dir: "../venus-shared/internal/", + types: []interface{}{ + types.Actor{}, + types.Message{}, + }, + }, + { + dir: "../venus-shared/types/market", + types: []interface{}{ + market.FundedAddressState{}, + market.MsgInfo{}, + market.ChannelInfo{}, + market.VoucherInfo{}, + market.MinerDeal{}, + market.RetrievalAsk{}, + market.ProviderDealState{}, + market.TimeStamp{}, + market.SignedStorageAsk{}, + }, + }, + { + dir: "../pkg/market", + types: []interface{}{ + market1.FundedAddressState{}, + }, + }, + { + dir: "../pkg/net/helloprotocol", + types: []interface{}{ + helloprotocol.HelloMessage{}, + helloprotocol.LatencyMessage{}, + }, + }, + { + dir: "../pkg/vm/dispatch", + types: []interface{}{ + dispatch.SimpleParams{}, + }, + }, + { + dir: "../pkg/state/tree", + types: []interface{}{ + tree.StateRoot{}, + }, + }, + { + dir: "../pkg/chain", + types: []interface{}{ + chain.TSState{}, + }, + }, + { + dir: "../pkg/fvm", + types: []interface{}{ + fvm.FvmExecutionTrace{}, + fvm.FvmGasCharge{}, + }, + }, + { + dir: "../venus-shared/blockstore", + types: []interface{}{ + blockstore.NetRPCReq{}, + blockstore.NetRPCResp{}, + blockstore.NetRPCErr{}, + }, + }, + } + + for _, target := range targets { + pkg := target.pkg + if pkg == "" { + pkg = filepath.Base(target.dir) + } + + if err := WriteTupleEncodersToFile(filepath.Join(target.dir, "cbor_gen.go"), pkg, target.types...); err != nil { + log.Fatalf("gen for %s: %s", target.dir, err) + } + } +} + +// WriteTupleEncodersToFile copy from https://github.com/whyrusleeping/cbor-gen/blob/master/writefile.go#L16 +func WriteTupleEncodersToFile(fname, pkg string, types ...interface{}) error { + buf := new(bytes.Buffer) + + typeInfos := make([]*gen.GenTypeInfo, len(types)) + for i, t := range types { + gti, err := gen.ParseTypeInfo(t) + if err != nil { + return fmt.Errorf("failed to parse type info: %w", err) + } + typeInfos[i] = gti + } + + if err := gen.PrintHeaderAndUtilityMethods(buf, pkg, typeInfos); err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + for _, t := range typeInfos { + if err := gen.GenTupleEncodersForType(t, buf); err != nil { + return fmt.Errorf("failed to generate encoders: %w", err) + } + } + + srcData := buf.Bytes() + if strings.Contains(fname, "pkg/fvm") { + srcData = bytes.ReplaceAll(srcData, []byte(`internal "github.com/filecoin-project/venus/venus-shared/internal"`), []byte{}) + srcData = bytes.ReplaceAll(srcData, []byte("internal."), []byte("types.")) + } + + data, err := util.FmtFile("", srcData) + if err != nil { + return err + } + + fi, err := os.Create(fname) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + + _, err = fi.Write(data) + if err != nil { + _ = fi.Close() + return err + } + _ = fi.Close() + + return nil +} diff --git a/venus-devtool/compatible/TODO.md b/venus-devtool/compatible/TODO.md new file mode 100644 index 0000000000..2ddf930307 --- /dev/null +++ b/venus-devtool/compatible/TODO.md @@ -0,0 +1,3 @@ +# TODO Items for Compatible Checks Between Lotus & Venus +- [ ] venus-shared/actors/version.go +- [ ] upgrade schedules diff --git a/venus-devtool/compatible/actors/list.go b/venus-devtool/compatible/actors/list.go new file mode 100644 index 0000000000..4fa9ac73e1 --- /dev/null +++ b/venus-devtool/compatible/actors/list.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "io/fs" + "os" + "sort" + "strings" +) + +var filterWithSuffix = func(suffix string) func(path string, d fs.DirEntry) bool { + return func(path string, d fs.DirEntry) bool { + if d.IsDir() { + return true + } + + if !strings.HasSuffix(path, suffix) { + return true + } + + return false + } +} + +func listFilesInDir(dir string, filter func(string, fs.DirEntry) bool) ([]string, error) { + var paths []string + + err := fs.WalkDir(os.DirFS(dir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return fmt.Errorf("walking %s: %w", path, err) + } + + if filter(path, d) { + return nil + } + + paths = append(paths, path) + return nil + }) + if err != nil { + return nil, fmt.Errorf("walk through the chain/actors subdir: %w", err) + } + + sort.Strings(paths) + return paths, nil +} diff --git a/venus-devtool/compatible/actors/main.go b/venus-devtool/compatible/actors/main.go new file mode 100644 index 0000000000..24a2954dbd --- /dev/null +++ b/venus-devtool/compatible/actors/main.go @@ -0,0 +1,227 @@ +package main + +import ( + "fmt" + "io/fs" + "log" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/urfave/cli/v2" +) + +func main() { + app := &cli.App{ + Name: "actors", + Usage: "devtool for template compatible checks between lotus & venus", + EnableBashCompletion: true, + Flags: []cli.Flag{}, + Commands: []*cli.Command{ + sourcesCmd, + templatesCmd, + renderCmd, + replicaCmd, + }, + } + + app.Setup() + + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "ERR: %v\n", err) // nolint: errcheck + } +} + +var templatesCmd = &cli.Command{ + Name: "templates", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "dst", + Value: "", + }, + }, + Action: func(c *cli.Context) error { + srcDir, err := findActorsPkgDir() + if err != nil { + return fmt.Errorf("find chain/actors: %w", err) + } + + log.Println("listing") + paths, err := listFilesInDir(srcDir, filterWithSuffix(goTemplateExt)) + if err != nil { + return fmt.Errorf("list template files: %w", err) + } + + fmt.Println("TEMPLATES IN chain/actors:") + + for _, p := range paths { + fmt.Printf("\t%s\n", p) + } + + dstDir := c.String("dst") + if dstDir == "" { + return nil + } + + log.Println("fetching") + + dstAbs, err := filepath.Abs(dstDir) + if err != nil { + return fmt.Errorf("get absolute dst path for %s: %w", dstDir, err) + } + + return fetch(srcDir, dstAbs, paths) + }, +} + +var renderCmd = &cli.Command{ + Name: "render", + ArgsUsage: "[dir]", + Action: func(cctx *cli.Context) error { + dir := cctx.Args().First() + if dir == "" { + return fmt.Errorf("dir is required") + } + + abs, err := filepath.Abs(dir) + if err != nil { + return fmt.Errorf("get abs path for %s: %w", dir, err) + } + + templates, err := listFilesInDir(dir, filterWithSuffix(goTemplateExt)) + if err != nil { + return fmt.Errorf("list templates in %s: %w", abs, err) + } + + log.Print("rendering") + for _, tpath := range templates { + versions := actors.Versions + // datacap actor available since version v9 + if strings.Contains(tpath, "builtin/datacap") { + versions = actors.Versions[8:] + } + err = render(filepath.Join(abs, tpath), versions) + if err != nil { + return fmt.Errorf("for %s: %w", tpath, err) + } + + log.Printf("%s done", tpath) + } + + return nil + }, +} + +var sourcesCmd = &cli.Command{ + Name: "sources", + Action: func(cctx *cli.Context) error { + srcDir, err := findActorsPkgDir() + if err != nil { + return fmt.Errorf("find chain/actors: %w", err) + } + + files, err := listFilesInDir(srcDir, filterWithSuffix(goSourceCodeExt)) + if err != nil { + return fmt.Errorf("list source code files: %w", err) + } + + fmt.Println("SOURCES IN chain/actors:") + + for _, p := range files { + fmt.Printf("\t%s\n", p) + } + return nil + }, +} + +// todo: move to the appropriate +var replicaCmd = &cli.Command{ + Name: "replica", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "dst", + Value: "", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + srcDir, err := findActorsPkgDir() + if err != nil { + return fmt.Errorf("find chain/actors: %w", err) + } + + reg := regexp.MustCompile(`v[0-9]+.go`) + + files, err := listFilesInDir(srcDir, func(path string, d fs.DirEntry) bool { + if d.IsDir() { + return true + } + + // exclude like builtin/[dir]/dir.go(replaced by actor.go) + dir, file := filepath.Split(path) + if strings.Contains(dir, "builtin") && strings.HasSuffix(file, ".go") { + pf := file[:strings.LastIndex(file, ".go")] + if strings.Contains(dir, pf) { + fmt.Println("path:", path) + return true + } + } + + // need adt.go diff_adt.go + if strings.Contains(path, "adt.go") { + return false + } + + // skip test file + if strings.HasSuffix(path, "test.go") { + return true + } + + if strings.HasSuffix(path, "main.go") || strings.Contains(path, "template") || + strings.Contains(path, "message") { + return true + } + + dir = filepath.Dir(path) + arr := strings.Split(dir, "/") + if strings.HasSuffix(path, fmt.Sprintf("%s.go", arr[len(arr)-1])) { + return true + } + + if reg.MatchString(d.Name()) { + return true + } + + return false + }) + if err != nil { + return fmt.Errorf("list replica files failed: %w", err) + } + + fmt.Println("replica files IN chain/actors:") + + for _, p := range files { + fmt.Printf("\t%s\n", p) + } + + replacers := [][2]string{ + {"github.com/filecoin-project/lotus/chain/actors", "github.com/filecoin-project/venus/venus-shared/actors"}, + {"github.com/filecoin-project/lotus/chain/actors/adt", "github.com/filecoin-project/venus/venus-shared/actors/adt"}, + {"github.com/filecoin-project/lotus/chain/actors/aerrors", "github.com/filecoin-project/venus/venus-shared/actors/aerrors"}, + {"dtypes.NetworkName", "string"}, + {"\"github.com/filecoin-project/lotus/node/modules/dtypes\"", ""}, + {"\"github.com/filecoin-project/lotus/chain/types\"", "types \"github.com/filecoin-project/venus/venus-shared/internal\""}, + {"\"github.com/filecoin-project/lotus/blockstore\"", "blockstore \"github.com/filecoin-project/venus/pkg/util/blockstoreutil\""}, + {"golang.org/x/xerrors", "fmt"}, + } + + for _, file := range files { + if err := fetchOne(srcDir, cctx.String("dst"), file, replacers); err != nil { + return fmt.Errorf("fetch for %s: %w", file, err) + } + } + return nil + }, +} diff --git a/venus-devtool/compatible/actors/render.go b/venus-devtool/compatible/actors/render.go new file mode 100644 index 0000000000..9bc5b0c031 --- /dev/null +++ b/venus-devtool/compatible/actors/render.go @@ -0,0 +1,113 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + "text/template" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/venus/venus-devtool/util" +) + +func importPath(v int) string { + if v == 0 { + return "/" + } + + return fmt.Sprintf("/v%d/", v) +} + +func render(tpath string, versions []int) error { + dir := filepath.Dir(tpath) + fname := filepath.Base(tpath) + + data, err := os.ReadFile(tpath) + if err != nil { + return fmt.Errorf("read file content: %w", err) + } + + var tname string + separated := false + if strings.HasSuffix(fname, separatedGoTemplateExt) { + tname = fname[:len(fname)-separatedGoTemplateExtLen] + separated = true + } else { + tname = fname[:len(fname)-goTemplateExtLen] + } + + funcMap := template.FuncMap{} + if !separated { + funcMap["import"] = importPath + } + + t, err := template.New(tname).Funcs(funcMap).Parse(string(data)) + if err != nil { + return fmt.Errorf("parse template: %w", err) + } + + if separated { + err = renderSeparated(t, dir, versions) + } else { + err = renderSingle(t, dir, versions) + } + + if err != nil { + return err + } + + return nil +} + +func renderSingle(t *template.Template, dir string, versions []int) error { + var buf bytes.Buffer + err := t.Execute(&buf, map[string]interface{}{ + "versions": versions, + "latestVersion": actors.LatestVersion, + }) + if err != nil { + return fmt.Errorf("render single template: %w", err) + } + + formatted, err := util.FmtFile("", buf.Bytes()) + if err != nil { + return fmt.Errorf("format go source file : %w", err) + } + + err = os.WriteFile(filepath.Join(dir, t.Name()+".go"), formatted, 0o644) + if err != nil { + return fmt.Errorf("write to file: %w", err) + } + + return nil +} + +func renderSeparated(t *template.Template, dir string, versions []int) error { + var buf bytes.Buffer + for _, v := range versions { + buf.Reset() + + err := t.Execute(&buf, map[string]interface{}{ + "v": v, + "import": importPath(v), + "latestVersion": actors.LatestVersion, + }) + if err != nil { + return fmt.Errorf("render separated template for ver %d: %w", v, err) + } + + formatted, err := util.FmtFile("", buf.Bytes()) + if err != nil { + return fmt.Errorf("format go source file for ver %d: %w", v, err) + } + + err = os.WriteFile(filepath.Join(dir, fmt.Sprintf("%s.v%d.go", t.Name(), v)), formatted, 0o644) + if err != nil { + return fmt.Errorf("write to file for ver %d: %w", v, err) + } + } + + return nil +} diff --git a/venus-devtool/compatible/actors/templates.go b/venus-devtool/compatible/actors/templates.go new file mode 100644 index 0000000000..d48d9679fb --- /dev/null +++ b/venus-devtool/compatible/actors/templates.go @@ -0,0 +1,169 @@ +package main + +import ( + "bytes" + "fmt" + "go/build" + "io" + "log" + "os" + "path/filepath" + "strings" +) + +const ( + goSourceCodeExt = ".go" + + goTemplateExt = ".go.template" + goTemplateExtLen = len(goTemplateExt) + + separatedGoTemplateExt = ".sep.go.template" + separatedGoTemplateExtLen = len(separatedGoTemplateExt) +) + +var separatedSuffixes = []string{ + "state.go.template", + "message.go.template", +} + +var replacers = [][2]string{ + { + "\"github.com/filecoin-project/lotus/chain/types\"", + "types \"github.com/filecoin-project/venus/venus-shared/internal\"", + }, + { + "github.com/filecoin-project/lotus/chain/actors", + "github.com/filecoin-project/venus/venus-shared/actors", + }, + { + "\"github.com/filecoin-project/lotus/node/modules/dtypes\"", + "", + }, + { + "dtypes.NetworkName", + "string", + }, + {"\"golang.org/x/xerrors\"", "\"fmt\""}, + {"xerrors.Errorf", "fmt.Errorf"}, + // fixed: actors/builtin/miner/state.v9.go:61:10: fmt.Errorf format %w has arg r of wrong type interface{} + {"failed to get available balance: %w", "failed to get available balance: %v"}, +} + +func findActorsPkgDir() (string, error) { + pkg, err := build.Import("github.com/filecoin-project/lotus/chain/actors", ".", build.FindOnly) + if err != nil { + return "", fmt.Errorf("find local build path for louts: %w", err) + } + + return pkg.Dir, nil +} + +func fetch(src, dst string, paths []string) error { + err := os.MkdirAll(dst, 0o755) + if err != nil { + return fmt.Errorf("mkdir-all for %s: %w", dst, err) + } + + for _, rel := range paths { + if err := fetchOne(src, dst, rel, replacers); err != nil { + return fmt.Errorf("fetch template for %s: %w", rel, err) + } + + log.Printf("\t%s done", rel) + } + + return nil +} + +func filterSamePkg(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + lineLen := len(lines) + buf := &bytes.Buffer{} + pkgs := make(map[string]struct{}) + var start, end bool + for i, line := range lines { + str := strings.TrimSpace(string(line)) + if str == "import (" { + start = true + } + if start && str == ")" { + end = true + start = false + } + if start && !end { + pkg := strings.TrimSpace(string(line)) + if _, ok := pkgs[pkg]; ok && strings.HasPrefix(pkg, "\"") { + continue + } else { + pkgs[pkg] = struct{}{} + } + } + buf.Write(line) + if i == lineLen-1 && len(line) == 0 { + } else { + buf.WriteString("\n") + } + } + + return buf.Bytes() +} + +func fetchOne(srcDir, dstDir string, rel string, replacers [][2]string) error { + dstRel := rel + for _, suffix := range separatedSuffixes { + if strings.HasSuffix(rel, suffix) { + dstRel = strings.ReplaceAll(rel, goTemplateExt, separatedGoTemplateExt) + break + } + } + + fsrc, err := os.Open(filepath.Join(srcDir, rel)) + if err != nil { + return fmt.Errorf("open source file: %w", err) + } + + defer fsrc.Close() // nolint: errcheck + + dstPath := filepath.Join(dstDir, dstRel) + err = os.MkdirAll(filepath.Dir(dstPath), 0o755) + if err != nil { + return fmt.Errorf("mkdir for %s: %w", dstPath, err) + } + + fdst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o644) + if err != nil { + return fmt.Errorf("open dst file: %w", err) + } + + defer fdst.Close() // nolint: errcheck + + var buf bytes.Buffer + + if _, err := buf.WriteString(fmt.Sprintf("// FETCHED FROM LOTUS: %s\n\n", rel)); err != nil { + return fmt.Errorf("write file header: %w", err) + } + + _, err = io.Copy(&buf, fsrc) + if err != nil { + return fmt.Errorf("copy to buffer: %w", err) + } + + data := buf.Bytes() + for _, replacer := range replacers { + data = bytes.ReplaceAll(data, []byte(replacer[0]), []byte(replacer[1])) + } + + data = filterSamePkg(data) + + _, err = io.Copy(fdst, bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("copy to dst file: %w", err) + } + + err = fdst.Sync() + if err != nil { + return fmt.Errorf("dst file sync: %w", err) + } + + return nil +} diff --git a/venus-devtool/compatible/apis/checksum.go b/venus-devtool/compatible/apis/checksum.go new file mode 100644 index 0000000000..0f97a50925 --- /dev/null +++ b/venus-devtool/compatible/apis/checksum.go @@ -0,0 +1,83 @@ +package main + +import ( + "bytes" + "crypto/md5" + "fmt" + "reflect" + "strings" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/venus/venus-devtool/util" +) + +var checksumCmd = &cli.Command{ + Name: "checksum", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + var buf bytes.Buffer + for _, pair := range util.ChainAPIPairs { + rt := pair.Lotus.Type + fmt.Printf("%s:\n", rt) + for mi := 0; mi < rt.NumMethod(); mi++ { + buf.Reset() + meth := rt.Method(mi) + numIn := meth.Type.NumIn() + numOut := meth.Type.NumOut() + + for ii := 0; ii < numIn; ii++ { + inTyp := meth.Type.In(ii) + fmt.Fprintf(&buf, "\tIn: %s\n", formatType(inTyp)) // nolint + } + + for oi := 0; oi < numOut; oi++ { + outTyp := meth.Type.Out(oi) + fmt.Fprintf(&buf, "\tOut: %s\n", formatType(outTyp)) // nolint + } + + fmt.Printf("\t%s:\tIn=%d,\tOut=%d,\tCheckSum=%x\n", meth.Name, numIn, numOut, md5.Sum(buf.Bytes())) + } + fmt.Println() + } + return nil + }, +} + +func formatType(rt reflect.Type) string { + switch rt.Kind() { + case reflect.Array: + return fmt.Sprintf("[%d]%s", rt.Len(), formatType(rt.Elem())) + + case reflect.Chan: + return fmt.Sprintf("%s %s", rt.ChanDir(), formatType(rt.Elem())) + + case reflect.Func: + ins := make([]string, rt.NumIn()) + outs := make([]string, rt.NumOut()) + for i := range ins { + ins[i] = formatType(rt.In(i)) + } + + for i := range outs { + outs[i] = formatType(rt.Out(i)) + } + + return fmt.Sprintf("func(%s) (%s)", strings.Join(ins, ", "), strings.Join(outs, ", ")) + + case reflect.Map: + return fmt.Sprintf("map[%s]%s", formatType(rt.Key()), formatType(rt.Elem())) + + case reflect.Ptr: + return fmt.Sprintf("*%s", formatType(rt.Elem())) + + case reflect.Slice: + return fmt.Sprintf("[]%s", formatType(rt.Elem())) + + default: + if p := rt.PkgPath(); p != "" { + return p + "." + rt.Name() + } + return rt.Name() + } +} diff --git a/venus-devtool/compatible/apis/diff.go b/venus-devtool/compatible/apis/diff.go new file mode 100644 index 0000000000..0b26bdb61b --- /dev/null +++ b/venus-devtool/compatible/apis/diff.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "reflect" + "sort" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/venus/venus-shared/typeutil" + + "github.com/filecoin-project/venus/venus-devtool/util" +) + +var diffCmd = &cli.Command{ + Name: "diff", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + for _, pair := range util.ChainAPIPairs { + showDiff(pair.Venus.Type, pair.Lotus.Type) + } + return nil + }, +} + +type methDiff struct { + typ string + name string + desc string +} + +func showDiff(impl, origin reflect.Type) { + fmt.Printf("%s <> %s:\n", formatType(impl), formatType(origin)) + implMethods := typeutil.ExportedMethods(impl) + originMethods := typeutil.ExportedMethods(origin) + + implMap := map[string]int{} + originMap := map[string]int{} + diffs := make([]methDiff, 0, len(implMethods)+len(originMethods)) + + for ii := range implMethods { + implMap[implMethods[ii].Name] = ii + } + + for oi := range originMethods { + methName := originMethods[oi].Name + originMap[methName] = oi + + ii, has := implMap[methName] + if !has { + // + diffs = append(diffs, methDiff{ + name: methName, + typ: "-", + }) + continue + } + + similar, reason := typeutil.Similar(implMethods[ii].Type, originMethods[oi].Type, typeutil.CodecJSON|typeutil.CodecCbor, typeutil.StructFieldsOrdered|typeutil.StructFieldTagsMatch) + if similar { + continue + } + + diffs = append(diffs, methDiff{ + typ: ">", + name: methName, + desc: reason.Error(), + }) + } + + for ii := range implMethods { + methName := implMethods[ii].Name + if _, has := originMap[methName]; !has { + diffs = append(diffs, methDiff{ + name: methName, + typ: "+", + }) + } + } + + sort.Slice(diffs, func(i, j int) bool { + return diffs[i].name < diffs[j].name + }) + + for _, d := range diffs { + if d.desc == "" { + fmt.Printf("\t%s %s\n", d.typ, d.name) + continue + } + + fmt.Printf("\t%s %s %s\n", d.typ, d.name, d.desc) + } + + fmt.Println() +} diff --git a/venus-devtool/compatible/apis/main.go b/venus-devtool/compatible/apis/main.go new file mode 100644 index 0000000000..5b0fc4cd39 --- /dev/null +++ b/venus-devtool/compatible/apis/main.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli/v2" +) + +func main() { + app := &cli.App{ + Name: "apis", + Usage: "devtool for api compatible checks between lotus & venus", + EnableBashCompletion: true, + Flags: []cli.Flag{}, + Commands: []*cli.Command{ + checksumCmd, + diffCmd, + permCmd, + }, + } + + app.Setup() + + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "ERR: %v\n", err) // nolint: errcheck + } +} diff --git a/venus-devtool/compatible/apis/perm.go b/venus-devtool/compatible/apis/perm.go new file mode 100644 index 0000000000..3fc3ceb1a3 --- /dev/null +++ b/venus-devtool/compatible/apis/perm.go @@ -0,0 +1,83 @@ +package main + +import ( + "fmt" + "log" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/venus/venus-devtool/util" +) + +var permCmd = &cli.Command{ + Name: "perm", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + if err := util.LoadExtraInterfaceMeta(); err != nil { + return err + } + for _, pair := range util.ChainAPIPairs { + originMetas, err := parsePermMetas(pair.Lotus.ParseOpt) + if err != nil { + log.Fatalln("parse lotus api interfaces:", err) + } + + targetMetas, err := parsePermMetas(pair.Venus.ParseOpt) + if err != nil { + log.Fatalln("parse venus chain api interfaces:", err) + } + + originMap := map[string]permMeta{} + for _, om := range originMetas { + if om.perm != "" { + originMap[om.meth] = om + } + } + + fmt.Printf("v%d: %s <> %s\n", pair.Ver, pair.Venus.ParseOpt.ImportPath, pair.Lotus.ParseOpt.ImportPath) + for _, tm := range targetMetas { + om, has := originMap[tm.meth] + if !has { + fmt.Printf("\t- %s.%s\n", tm.iface, tm.meth) + continue + } + + if tm.perm != om.perm { + fmt.Printf("\t> %s.%s: %s <> %s.%s: %s\n", tm.iface, tm.meth, tm.perm, om.iface, om.meth, om.perm) + } + } + + fmt.Println() + } + + return nil + }, +} + +type permMeta struct { + pkg string + iface string + meth string + perm string +} + +func parsePermMetas(opt util.InterfaceParseOption) ([]permMeta, error) { + ifaceMetas, _, err := util.ParseInterfaceMetas(opt) + if err != nil { + return nil, err + } + + var permMetas []permMeta + for _, iface := range ifaceMetas { + for _, ifMeth := range iface.Defined { + permMetas = append(permMetas, permMeta{ + pkg: opt.ImportPath, + iface: iface.Name, + meth: ifMeth.Name, + perm: util.GetAPIMethodPerm(ifMeth), + }) + } + } + + return permMetas, nil +} diff --git a/venus-devtool/compatible/main.go b/venus-devtool/compatible/main.go new file mode 100644 index 0000000000..c7ba71a5b7 --- /dev/null +++ b/venus-devtool/compatible/main.go @@ -0,0 +1,92 @@ +package main + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "log" + "path/filepath" + + _ "github.com/filecoin-project/lotus/build" +) + +func main() { + pkg, err := build.Import("github.com/filecoin-project/lotus/gen", ".", build.FindOnly) + if err != nil { + log.Fatalln("find pkg", err) + } + + targetFile := filepath.Join(pkg.Dir, "main.go") + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, targetFile, nil, 0) + if err != nil { + log.Fatalln("parse file", err) + } + + ast.Inspect(f, func(n ast.Node) bool { + if expr, ok := n.(*ast.CallExpr); ok { + if fn, ok := expr.Fun.(*ast.SelectorExpr); ok { + if fn.Sel != nil && fn.Sel.Name == "WriteMapEncodersToFile" { + ci, err := parseGenCallExpr(expr) + if err != nil { + log.Fatalln(err) + } + + fmt.Printf("%s: %s\n", ci.pkgName, filepath.Dir(filepath.Join("github.com/filecoin-project/lotus", ci.path))) + for ti := range ci.typeNames { + fmt.Printf("\t%s\n", ci.typeNames[ti]) + } + + fmt.Println("") + } + } + } + return true + }) +} + +type callInfo struct { + path string + pkgName string + typeNames []string +} + +func parseGenCallExpr(expr *ast.CallExpr) (*callInfo, error) { + if numIn := len(expr.Args); numIn < 3 { + return nil, fmt.Errorf("not enough args, got %d", numIn) + } + + first, ok := expr.Args[0].(*ast.BasicLit) + if !ok || first.Kind != token.STRING { + return nil, fmt.Errorf("1st arg should be a string, got %T", first) + } + + second, ok := expr.Args[1].(*ast.BasicLit) + if !ok || second.Kind != token.STRING { + return nil, fmt.Errorf("2nd arg should be a string, got %T", second) + } + + typeNames := make([]string, 0, len(expr.Args)-2) + + for _, typArg := range expr.Args[2:] { + lit, ok := typArg.(*ast.CompositeLit) + if !ok { + return nil, fmt.Errorf("should be CompositeLit, got %T", typArg) + } + + sel, ok := lit.Type.(*ast.SelectorExpr) + if !ok || sel.Sel == nil { + return nil, fmt.Errorf("unexpected literal type: %T", sel) + } + + typeNames = append(typeNames, sel.Sel.Name) + } + + return &callInfo{ + path: first.Value[1 : len(first.Value)-1], + pkgName: second.Value[1 : len(second.Value)-1], + typeNames: typeNames, + }, nil +} diff --git a/venus-devtool/go.mod b/venus-devtool/go.mod new file mode 100644 index 0000000000..b3df7271ad --- /dev/null +++ b/venus-devtool/go.mod @@ -0,0 +1,218 @@ +module github.com/filecoin-project/venus/venus-devtool + +go 1.18 + +require ( + github.com/filecoin-project/go-address v1.0.0 + github.com/filecoin-project/go-bitfield v0.2.4 + github.com/filecoin-project/go-data-transfer v1.15.2 + github.com/filecoin-project/go-fil-markets v1.24.1-rc1 + github.com/filecoin-project/go-jsonrpc v0.1.8 + github.com/filecoin-project/go-state-types v0.9.8 + github.com/filecoin-project/lotus v1.18.0 + github.com/filecoin-project/venus v0.0.0-00010101000000-000000000000 + github.com/ipfs/go-block-format v0.0.3 + github.com/ipfs/go-cid v0.2.0 + github.com/ipfs/go-graphsync v0.13.1 + github.com/ipld/go-ipld-selector-text-lite v0.0.1 + github.com/libp2p/go-libp2p v0.22.0 + github.com/libp2p/go-libp2p-pubsub v0.8.0 + github.com/multiformats/go-multiaddr v0.6.0 + github.com/urfave/cli/v2 v2.8.1 + github.com/whyrusleeping/cbor-gen v0.0.0-20220514204315-f29c37e9c44c + golang.org/x/tools v0.1.12 +) + +require ( + contrib.go.opencensus.io/exporter/jaeger v0.2.1 // indirect + contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect + github.com/BurntSushi/toml v1.2.0 // indirect + github.com/DataDog/zstd v1.4.1 // indirect + github.com/GeertJohan/go.incremental v1.0.0 // indirect + github.com/GeertJohan/go.rice v1.0.2 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/ahmetb/go-linq/v3 v3.2.0 // indirect + github.com/akavel/rsrc v0.8.0 // indirect + github.com/awnumar/memcall v0.0.0-20191004114545-73db50fd9f80 // indirect + github.com/awnumar/memguard v0.22.2 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/daaku/go.zipexe v1.0.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.3 // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/drand/drand v1.3.0 // indirect + github.com/drand/kyber v1.1.7 // indirect + github.com/drand/kyber-bls12381 v0.2.1 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f // indirect + github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect + github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 // indirect + github.com/filecoin-project/go-cbor-util v0.0.1 // indirect + github.com/filecoin-project/go-commp-utils v0.1.3 // indirect + github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 // indirect + github.com/filecoin-project/go-fil-commcid v0.1.0 // indirect + github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect + github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect + github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-padreader v0.0.1 // indirect + github.com/filecoin-project/go-statestore v0.2.0 // indirect + github.com/filecoin-project/pubsub v1.0.0 // indirect + github.com/filecoin-project/specs-actors v0.9.15 // indirect + github.com/filecoin-project/specs-actors/v2 v2.3.6 // indirect + github.com/filecoin-project/specs-actors/v3 v3.1.2 // indirect + github.com/filecoin-project/specs-actors/v4 v4.0.2 // indirect + github.com/filecoin-project/specs-actors/v5 v5.0.6 // indirect + github.com/filecoin-project/specs-actors/v6 v6.0.2 // indirect + github.com/filecoin-project/specs-actors/v7 v7.0.1 // indirect + github.com/filecoin-project/specs-actors/v8 v8.0.1 // indirect + github.com/filecoin-project/specs-storage v0.4.1 // indirect + github.com/fxamacker/cbor/v2 v2.4.0 // indirect + github.com/gbrlsnchs/jwt/v3 v3.0.1 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect + github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 // indirect + github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-blockservice v0.4.0 // indirect + github.com/ipfs/go-datastore v0.5.1 // indirect + github.com/ipfs/go-ds-badger2 v0.1.2 // indirect + github.com/ipfs/go-fs-lock v0.0.7 // indirect + github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect + github.com/ipfs/go-ipfs-cmds v0.8.1 // indirect + github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect + github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect + github.com/ipfs/go-ipfs-files v0.1.1 // indirect + github.com/ipfs/go-ipfs-http-client v0.4.0 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-cbor v0.0.6 // indirect + github.com/ipfs/go-ipld-format v0.4.0 // indirect + github.com/ipfs/go-ipld-legacy v0.1.1 // indirect + github.com/ipfs/go-ipns v0.2.0 // indirect + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/ipfs/go-merkledag v0.8.1 // indirect + github.com/ipfs/go-metrics-interface v0.0.1 // indirect + github.com/ipfs/go-path v0.3.0 // indirect + github.com/ipfs/go-unixfs v0.3.1 // indirect + github.com/ipfs/go-verifcid v0.0.1 // indirect + github.com/ipfs/interface-go-ipfs-core v0.7.0 // indirect + github.com/ipld/go-car v0.4.0 // indirect + github.com/ipld/go-car/v2 v2.4.1 // indirect + github.com/ipld/go-codec-dagpb v1.3.2 // indirect + github.com/ipld/go-ipld-prime v0.17.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/jessevdk/go-flags v1.4.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 // indirect + github.com/klauspost/cpuid/v2 v2.1.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-core v0.20.0 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.18.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect + github.com/libp2p/go-libp2p-record v0.2.0 // indirect + github.com/libp2p/go-msgio v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.0 // indirect + github.com/libp2p/go-openssl v0.1.0 // indirect + github.com/magefile/mage v1.13.0 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-pointer v0.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/miekg/dns v1.1.50 // indirect + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.0.4 // indirect + github.com/multiformats/go-base36 v0.1.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multibase v0.1.1 // indirect + github.com/multiformats/go-multicodec v0.5.0 // indirect + github.com/multiformats/go-multihash v0.2.1 // indirect + github.com/multiformats/go-varint v0.0.6 // indirect + github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c // indirect + github.com/nkovacs/streamquote v1.0.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pborman/uuid v1.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect + github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/statsd_exporter v0.21.0 // indirect + github.com/raulk/clock v1.1.0 // indirect + github.com/rs/cors v1.7.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shirou/gopsutil v2.18.12+incompatible // indirect + github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/stretchr/testify v1.8.0 // indirect + github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.1.0 // indirect + github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/whyrusleeping/go-logging v0.0.1 // indirect + github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect + go.opentelemetry.io/otel/trace v1.7.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.22.0 // indirect + go4.org v0.0.0-20200411211856-f5505b9728dd // indirect + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + google.golang.org/api v0.81.0 // indirect + google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect + google.golang.org/grpc v1.46.2 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.1.7 // indirect +) + +replace ( + github.com/filecoin-project/filecoin-ffi => .././extern/filecoin-ffi + github.com/filecoin-project/venus => ../ +) diff --git a/venus-devtool/go.sum b/venus-devtool/go.sum new file mode 100644 index 0000000000..04dfdeb39c --- /dev/null +++ b/venus-devtool/go.sum @@ -0,0 +1,2324 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= +contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= +contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.2 h1:PtRw+Tg3oa3HYwiDBZyvOJ8LdIyf6lAovJJtr7YOAYk= +github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ahmetb/go-linq/v3 v3.2.0 h1:BEuMfp+b59io8g5wYzNoFe9pWPalRklhlhbiU3hYZDE= +github.com/ahmetb/go-linq/v3 v3.2.0/go.mod h1:haQ3JfOeWK8HpVxMtHHEMPVgBKiYyQ+f1/kLZh/cj9U= +github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/awnumar/memcall v0.0.0-20191004114545-73db50fd9f80 h1:8kObYoBO4LNmQ+fLiScBfxEdxF1w2MHlvH/lr9MLaTg= +github.com/awnumar/memcall v0.0.0-20191004114545-73db50fd9f80/go.mod h1:S911igBPR9CThzd/hYQQmTc9SWNu3ZHIlCGaWsWsoJo= +github.com/awnumar/memguard v0.22.2 h1:tMxcq1WamhG13gigK8Yaj9i/CHNUO3fFlpS9ABBQAxw= +github.com/awnumar/memguard v0.22.2/go.mod h1:33OwJBHC+T4eEfFcDrQb78TMlBMBvcOPCXWU9xE34gM= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833 h1:yCfXxYaelOyqnia8F/Yng47qhmfC9nKTRIbYRrRueq4= +github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833/go.mod h1:8c4/i2VlovMO2gBnHGQPN5EJw+H0lx1u/5p+cgsXtCk= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= +github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= +github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= +github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= +github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= +github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= +github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/filecoin-project/dagstore v0.5.2 h1:Nd6oXdnolbbVhpMpkYT5PJHOjQp4OBSntHpMV5pxj3c= +github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= +github.com/filecoin-project/go-address v1.0.0 h1:IrexI0kpADLaPP+CdmU3CVAUqnW/FQC0KTmz4lVKiFU= +github.com/filecoin-project/go-address v1.0.0/go.mod h1:5t3z6qPmIADZBtuE9EIzi0EwzcRy2nVhpo0I/c1r0OA= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= +github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 h1:XM81BJ4/6h3FV0WfFjh74cIDIgqMbJsMBLM0fIuLUUk= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE= +github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= +github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= +github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= +github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 h1:4cITW0pwgvqLs86Q9bWQa34+jBfR1V687bDkmv2DgnA= +github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837/go.mod h1:e2YBjSblNVoBckkbv3PPqsq71q98oFkFqL7s1etViGo= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= +github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-data-transfer v1.15.2 h1:PzqsFr2Q/onMGKrGh7TtRT0dKsJcVJrioJJnjnKmxlk= +github.com/filecoin-project/go-data-transfer v1.15.2/go.mod h1:qXOJ3IF5dEJQHykXXTwcaRxu17bXAxr+LglXzkL6bZQ= +github.com/filecoin-project/go-ds-versioning v0.1.1 h1:JiyBqaQlwC+UM0WhcBtVEeT3XrX59mQhT8U3p7nu86o= +github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= +github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= +github.com/filecoin-project/go-fil-markets v1.24.1-rc1 h1:aT+A+8jg/FPBaPvMh830BZVetAJlwGulYSFjfohpvmk= +github.com/filecoin-project/go-fil-markets v1.24.1-rc1/go.mod h1:7PB9QdyLZY4V6W8QjN4C/VGHzD6qdLEuwkemO9TdSXQ= +github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= +github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= +github.com/filecoin-project/go-jsonrpc v0.1.8 h1:uXX/ikAk3Q4f/k8DRd9Zw+fWnfiYb5I+UI1tzlQgHog= +github.com/filecoin-project/go-jsonrpc v0.1.8/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= +github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= +github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= +github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.4/go.mod h1:xCA/WfKlC2zcn3fUmDv4IrzznwS98X5XW/irUP3Lhxg= +github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.1.8/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.9.8 h1:xkdITiR7h691z1tWOhNCJxHI+cq+Mq7ATkpHQ7f1gu8= +github.com/filecoin-project/go-state-types v0.9.8/go.mod h1:+HCZifUV+e8TlQkgll22Ucuiq8OrVJkK+4Kh4u75iiw= +github.com/filecoin-project/go-statemachine v1.0.2 h1:421SSWBk8GIoCoWYYTE/d+qCWccgmRH0uXotXRDjUbc= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/index-provider v0.8.1 h1:ggoBWvMSWR91HZQCWfv8SZjoTGNyJBwNMLuN9bJZrbU= +github.com/filecoin-project/lotus v1.18.0 h1:HxdShHMEZT703n9KlQTgPVoUF/ocidMC/d3TzwxzTP8= +github.com/filecoin-project/lotus v1.18.0/go.mod h1:jJih5ApnJZssc/wWsLJm+IWnfy8YaCyaDbvs/wTIVDk= +github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM= +github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg= +github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.15-0.20220514164640-94e0d5e123bd/go.mod h1:pjGEe3QlWtK20ju/aFRsiArbMX6Cn8rqEhhsiCM9xYE= +github.com/filecoin-project/specs-actors v0.9.15 h1:3VpKP5/KaDUHQKAMOg4s35g/syDaEBueKLws0vbsjMc= +github.com/filecoin-project/specs-actors v0.9.15/go.mod h1:pjGEe3QlWtK20ju/aFRsiArbMX6Cn8rqEhhsiCM9xYE= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= +github.com/filecoin-project/specs-actors/v3 v3.1.2 h1:Gq3gAbvdGLA/D0GKz1IJfewt9Fh7gA32TPt46Xv+1Cw= +github.com/filecoin-project/specs-actors/v3 v3.1.2/go.mod h1:uOJn+m6W8OW/1mdWMEvxeM1cjQPxmps7s1Z4bJ9V4kY= +github.com/filecoin-project/specs-actors/v4 v4.0.2 h1:VTsv30kIf1Keo8Jlu6Omco+2Ud0pG4EN5UAzyYCibh8= +github.com/filecoin-project/specs-actors/v4 v4.0.2/go.mod h1:zT0GVFxwFS93prGK0b/rMd1sePjRQKfAuodQ9DFAd6Y= +github.com/filecoin-project/specs-actors/v5 v5.0.6 h1:TLtA9hT3pHQF5vB83GmB+m6anw9u6MjdT+VVn/lyC+c= +github.com/filecoin-project/specs-actors/v5 v5.0.6/go.mod h1:myb/UGwESp0V1f1BACXSUrFgTWLvGUoG0ZZH7eqriFM= +github.com/filecoin-project/specs-actors/v6 v6.0.2 h1:K1xPRJoW5PBvb08QF9+4w1AjcnqwR6BjTmeltQFCvWo= +github.com/filecoin-project/specs-actors/v6 v6.0.2/go.mod h1:wnfVvPnYmzPZilNvSqCSSA/ZQX3rdV/U/Vf9EIoQhrI= +github.com/filecoin-project/specs-actors/v7 v7.0.1 h1:w72xCxijK7xs1qzmJiw+WYJaVt2EPHN8oiwpA1Ay3/4= +github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk= +github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y= +github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA= +github.com/filecoin-project/specs-storage v0.4.1 h1:yvLEaLZj8f+uByhNC4mFOtCUyL2wQku+NGBp6hjTe9M= +github.com/filecoin-project/specs-storage v0.4.1/go.mod h1:Z2eK6uMwAOSLjek6+sy0jNV2DSsMEENziMUz0GHRFBw= +github.com/filecoin-project/storetheindex v0.4.17 h1:w0dVc954TGPukoVbidlYvn9Xt+wVhk5vBvrqeJiRo8I= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gbrlsnchs/jwt/v3 v3.0.1 h1:lbUmgAKpxnClrKloyIwpxm4OuWeDl5wLk52G91ODPw4= +github.com/gbrlsnchs/jwt/v3 v3.0.1/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 h1:9tcYMdi+7Rb1y0E9Del1DRHui7Ne3za5lLw6CjMJv/M= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94/go.mod h1:GYeBD1CF7AqnKZK+UCytLcY3G+UKo0ByXX/3xfdNyqQ= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6 h1:8UsGZ2rr2ksmEru6lToqnXgA8Mz1DP11X4zSJ159C3k= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6/go.mod h1:xQig96I1VNBDIWGCdTt54nHt6EeI639SmHycLYL7FkA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitfield v1.0.0 h1:y/XHm2GEmD9wKngheWNNCNL0pzrWXZwCdQGv1ikXknQ= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= +github.com/ipfs/go-bitswap v0.10.2 h1:B81RIwkTnIvSYT1ZCzxjYTeF0Ek88xa9r1AMpTfk+9Q= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= +github.com/ipfs/go-blockservice v0.4.0 h1:7MUijAW5SqdsqEW/EhnNFRJXVF8mGU5aGhZ3CQaCWbY= +github.com/ipfs/go-blockservice v0.4.0/go.mod h1:kRjO3wlGW9mS1aKuiCeGhx9K1DagQ10ACpVO59qgAx4= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= +github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= +github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= +github.com/ipfs/go-filestore v1.2.0 h1:O2wg7wdibwxkEDcl7xkuQsPvJFRBVgVSsOJ/GP6z3yU= +github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= +github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= +github.com/ipfs/go-graphsync v0.13.1 h1:lWiP/WLycoPUYyj3IDEi1GJNP30kFuYOvimcfeuZyQs= +github.com/ipfs/go-graphsync v0.13.1/go.mod h1:y8e8G6CmZeL9Srvx1l15CtGiRdf3h5JdQuqPz/iYL0A= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= +github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-cmds v0.8.1 h1:El661DBWqdqwgz7B9xwKyUpigwqk6BBBHb5B8DfJP00= +github.com/ipfs/go-ipfs-cmds v0.8.1/go.mod h1:y0bflH6m4g6ary4HniYt98UqbrVnRxmRarzeMdLIUn0= +github.com/ipfs/go-ipfs-config v0.18.0 h1:Ta1aNGNEq6RIvzbw7dqzCVZJKb7j+Dd35JFnAOCpT8g= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-files v0.1.1 h1:/MbEowmpLo9PJTEQk16m9rKzUHjeP4KRU9nWJyJO324= +github.com/ipfs/go-ipfs-files v0.1.1/go.mod h1:8xkIrMWH+Y5P7HvJ4Yc5XWwIW2e52dyXUiC0tZyjDbM= +github.com/ipfs/go-ipfs-http-client v0.4.0 h1:LNuVbFoKfCohCmcNImml3byM3PpTxTT7RPrv/UoDFkI= +github.com/ipfs/go-ipfs-http-client v0.4.0/go.mod h1:NXzPUKt/QVCuR74a8angJCGOSLPImNi5LqaTxIep/70= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= +github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= +github.com/ipfs/go-merkledag v0.8.1 h1:N3yrqSre/ffvdwtHL4MXy0n7XH+VzN8DlzDrJySPa94= +github.com/ipfs/go-merkledag v0.8.1/go.mod h1:uYUlWE34GhbcTjGuUDEcdPzsEtOdnOupL64NgSRjmWI= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-path v0.3.0 h1:tkjga3MtpXyM5v+3EbRvOHEoo+frwi4oumw5K+KYWyA= +github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= +github.com/ipfs/go-unixfsnode v1.4.0 h1:9BUxHBXrbNi8mWHc6j+5C580WJqtVw9uoeEKn4tMhwA= +github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipfs/interface-go-ipfs-core v0.7.0 h1:7tb+2upz8oCcjIyjo1atdMk+P+u7wPmI+GksBlLE8js= +github.com/ipfs/interface-go-ipfs-core v0.7.0/go.mod h1:lF27E/nnSPbylPqKVXGZghal2hzifs3MmjyiEjnc9FY= +github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo= +github.com/ipfs/iptb-plugins v0.3.0 h1:C1rpq1o5lUZtaAOkLIox5akh6ba4uk/3RwWc6ttVxw0= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= +github.com/ipld/go-car v0.4.0 h1:U6W7F1aKF/OJMHovnOVdst2cpQE5GhmHibQkAixgNcQ= +github.com/ipld/go-car v0.4.0/go.mod h1:Uslcn4O9cBKK9wqHm/cLTFacg6RAPv6LZx2mxd2Ypl4= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.4.1 h1:9S+FYbQzQJ/XzsdiOV13W5Iu/i+gUnr6csbSD9laFEg= +github.com/ipld/go-car/v2 v2.4.1/go.mod h1:zjpRf0Jew9gHqSvjsKVyoq9OY9SWoEKdYCQUKVaaPT0= +github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= +github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= +github.com/ipld/go-codec-dagpb v1.3.2 h1:MZQUIjanHXXfDuYmtWYT8nFbqfFsZuyHClj6VDmSXr4= +github.com/ipld/go-codec-dagpb v1.3.2/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= +github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= +github.com/ipld/go-ipld-prime v0.17.0 h1:+U2peiA3aQsE7mrXjD2nYZaZrCcakoz2Wge8K42Ld8g= +github.com/ipld/go-ipld-prime v0.17.0/go.mod h1:aYcKm5TIvGfY8P3QBKz/2gKcLxzJ1zDaD+o0bOowhgs= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= +github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW06AlUGT5jnpj6nqQSILebcsikSjA= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= +github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= +github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= +github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= +github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw= +github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= +github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= +github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= +github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= +github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= +github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= +github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= +github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= +github.com/libp2p/go-libp2p-kad-dht v0.18.0 h1:akqO3gPMwixR7qFSFq70ezRun97g5hrA/lBW9jrjUYM= +github.com/libp2p/go-libp2p-kad-dht v0.18.0/go.mod h1:Gb92MYIPm3K2pJLGn8wl0m8wiKDvHrYpg+rOd0GzzPA= +github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= +github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= +github.com/libp2p/go-libp2p-pubsub v0.8.0 h1:KygfDpaa9AeUPGCVcpVenpXNFauDn+5kBYu3EjcL3Tg= +github.com/libp2p/go-libp2p-pubsub v0.8.0/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= +github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= +github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= +github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= +github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= +github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= +github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= +github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.13.0 h1:XtLJl8bcCM7EFoO8FyH8XK3t7G5hQAeK+i4tq+veT9M= +github.com/magefile/mage v1.13.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= +github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= +github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= +github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= +github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= +github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= +github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= +github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= +github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df h1:vdYtBU6zvL7v+Tr+0xFM/qhahw/EvY8DMMunZHKH6eE= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= +github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= +github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= +github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4= +github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4= +github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.10.0 h1:E86YlUMYfwIacEsQGlnTvjk1IgYkyTGjPhF0RnwTCmw= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5 h1:EYxr08r8x6r/5fLEAMMkida1BVgxVXE4LfZv/XV+znU= +github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= +github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20220514204315-f29c37e9c44c h1:6VPKXBDRt7mDUyiHx9X8ROnPYFDf3L7OfEuKCI5dZDI= +github.com/whyrusleeping/cbor-gen v0.0.0-20220514204315-f29c37e9c44c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1 h1:fwpzlmT0kRC/Fmd0MdmGgJG/CXIZ6gFq46FQZjprUcc= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= +go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= +go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= +go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= +go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= +go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= +go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= +go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.81.0 h1:o8WF5AvfidafWbFjsRyupxyEQJNUWxLZJCK5NXrxZZ8= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/venus-devtool/inline-gen/inlinegen-data.json b/venus-devtool/inline-gen/inlinegen-data.json new file mode 100644 index 0000000000..a55b82c483 --- /dev/null +++ b/venus-devtool/inline-gen/inlinegen-data.json @@ -0,0 +1,7 @@ +{ + "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9], + "latestActorsVersion": 9, + + "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + "latestNetworkVersion": 17 +} \ No newline at end of file diff --git a/venus-devtool/inline-gen/main.go b/venus-devtool/inline-gen/main.go new file mode 100644 index 0000000000..de4d77a70d --- /dev/null +++ b/venus-devtool/inline-gen/main.go @@ -0,0 +1,128 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "text/template" + + "github.com/filecoin-project/venus/venus-devtool/util" +) + +const ( + stateGlobal = iota + stateTemplate + stateGen +) + +func main() { + db, err := os.ReadFile(os.Args[2]) + if err != nil { + panic(err) + } + var data map[string]interface{} + if err := json.Unmarshal(db, &data); err != nil { + panic(err) + } + + err = filepath.WalkDir(os.Args[1], func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + if filepath.Ext(path) != ".go" { + return nil + } + fb, err := os.ReadFile(path) + if err != nil { + return err + } + + lines := strings.Split(string(fb), "\n") + + outLines := make([]string, 0, len(lines)) + var templateLines []string + + state := stateGlobal + + rewrite := false + + for i, line := range lines { + ln := i + 1 + switch state { + case stateGlobal: + outLines = append(outLines, line) + if strings.TrimSpace(line) == `/* inline-gen template` { + state = stateTemplate + fmt.Printf("template section start %s:%d\n", path, ln) + } + case stateTemplate: + outLines = append(outLines, line) // output all template lines + + if strings.TrimSpace(line) == `/* inline-gen start */` { + state = stateGen + fmt.Printf("generated section start %s:%d\n", path, ln) + continue + } + templateLines = append(templateLines, line) + case stateGen: + if strings.TrimSpace(line) != `/* inline-gen end */` { // empty line for goimports check + continue + } + fmt.Printf("generated section end %s:%d\n", path, ln) + + state = stateGlobal + rewrite = true + + tpl, err := template.New("").Funcs(template.FuncMap{ + "import": func(v float64) string { + if v == 0 { + return "/" + } + return fmt.Sprintf("/v%d/", int(v)) + }, + "add": func(a, b float64) float64 { + return a + b + }, + }).Parse(strings.Join(templateLines, "\n")) + if err != nil { + fmt.Printf("%s:%d: parsing template: %s\n", path, ln, err) + os.Exit(1) + } + + var b bytes.Buffer + err = tpl.Execute(&b, data) + if err != nil { + fmt.Printf("%s:%d: executing template: %s\n", path, ln, err) + os.Exit(1) + } + + outLines = append(outLines, strings.Split(b.String(), "\n")...) + outLines = append(outLines, line) + templateLines = nil + } + } + + if rewrite { + fmt.Printf("write %s\n", path) + formatted, err := util.FmtFile("", []byte(strings.Join(outLines, "\n"))) + if err != nil { + return err + } + if err := os.WriteFile(path, formatted, 0o664); err != nil { + return err + } + } + + return nil + }) + if err != nil { + panic(err) + } +} diff --git a/venus-devtool/state-type-gen/main.go b/venus-devtool/state-type-gen/main.go new file mode 100644 index 0000000000..d12482faaa --- /dev/null +++ b/venus-devtool/state-type-gen/main.go @@ -0,0 +1,238 @@ +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/fs" + "os" + "sort" + "strings" + + "github.com/filecoin-project/venus/venus-devtool/util" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/urfave/cli/v2" +) + +func main() { + app := &cli.App{ + Name: "state-type-gen", + Usage: "generate types related codes for go-state-types", + EnableBashCompletion: true, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "dst", + }, + }, + Action: run, + } + + app.Setup() + + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "ERR: %v\n", err) // nolint: errcheck + } +} + +var prePath = "github.com/filecoin-project/go-state-types/builtin" + +type pendingPkg struct { + name string + path string + ver actors.Version +} + +var pendingPkgs = func() map[string]*pendingPkg { + pkgs := make(map[string]*pendingPkg, 4) + list := []string{"market", "miner", "verifreg"} + pkgs["paych"] = &pendingPkg{ + name: "paych", + ver: actors.Version8, + path: fmt.Sprintf("%s/v%v/%s", prePath, actors.Version8, "paych"), + } + for _, pkgName := range list { + pkgs[pkgName] = &pendingPkg{ + name: pkgName, + ver: actors.Version(actors.LatestVersion), + path: fmt.Sprintf("%s/v%v/%s", prePath, actors.LatestVersion, pkgName), + } + } + + return pkgs +}() + +var ( + skips = map[string]struct{}{ + "State": {}, + "MinerInfo": {}, + "ConstructState": {}, + "Partition": {}, + "Deadline": {}, + } + skipFuncs = map[string]struct{}{ + "ConstructState": {}, + } + alias = map[string][]struct { + pkgName string + newName string + }{ + "WithdrawBalanceParams": { + {pkgName: "market", newName: "MarketWithdrawBalanceParams"}, + {pkgName: "miner", newName: "MinerWithdrawBalanceParams"}, + }, + } +) + +func run(cctx *cli.Context) error { + metas := make([]*metaVisitor, 0, len(pendingPkgs)) + for _, pkg := range toList(pendingPkgs) { + location, err := util.FindPackageLocation(pkg.path) + if err != nil { + return err + } + + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, location, filter, parser.AllErrors|parser.ParseComments) + if err != nil { + return err + } + + visitor := &metaVisitor{ + pkgName: pkg.name, + } + for _, pkg := range pkgs { + for _, file := range pkg.Files { + ast.Walk(visitor, file) + } + } + + sort.Slice(visitor.f, func(i, j int) bool { + return visitor.f[i] < visitor.f[j] + }) + sort.Slice(visitor.t, func(i, j int) bool { + return visitor.t[i] < visitor.t[j] + }) + sort.Slice(visitor.con, func(i, j int) bool { + return visitor.con[i] < visitor.con[j] + }) + metas = append(metas, visitor) + } + + return writeFile(cctx.String("dst"), metas) +} + +func toList(pkgs map[string]*pendingPkg) []*pendingPkg { + list := make([]*pendingPkg, 0, len(pkgs)) + for _, pkg := range pkgs { + list = append(list, pkg) + } + sort.Slice(list, func(i, j int) bool { + return list[i].name < list[j].name + }) + + return list +} + +func filter(fi fs.FileInfo) bool { + if strings.Contains(fi.Name(), "cbor_gen.go") { + return false + } + if strings.Contains(fi.Name(), "_test.go") { + return false + } + if strings.Contains(fi.Name(), "invariants.go") { + return false + } + if strings.Contains(fi.Name(), "methods.go") { + return false + } + return true +} + +type metaVisitor struct { + pkgName string + f []string // function + t []string // type + con []string // const +} + +func (v *metaVisitor) Visit(node ast.Node) (w ast.Visitor) { + if st, ok := node.(*ast.TypeSpec); ok { + if !st.Name.IsExported() { + return v + } + + name := st.Name.Name + _, ok = st.Type.(*ast.StructType) + _, ok3 := st.Type.(*ast.Ident) + if _, ok2 := skips[name]; !ok2 && (ok || ok3) { + v.t = append(v.t, name) + } + } else if ft, ok := node.(*ast.FuncDecl); ok { + if !ft.Name.IsExported() || ft.Recv != nil { + return v + } + + name := ft.Name.Name + if _, ok := skipFuncs[name]; !ok { + v.f = append(v.f, name) + } + } else if vt, ok := node.(*ast.ValueSpec); ok { + if !vt.Names[0].IsExported() || len(vt.Names) == 0 { + return v + } + if vt.Names[0].Obj != nil && vt.Names[0].Obj.Kind == ast.Con { + v.con = append(v.con, vt.Names[0].Name) + } + } + + return v +} + +func writeFile(dst string, metas []*metaVisitor) error { + var fileBuffer bytes.Buffer + fmt.Fprintf(&fileBuffer, "// Code generated by github.com/filecoin-project/venus/venus-devtool/state-type-gen. DO NOT EDIT.\npackage %s\n\n", "types") + + // write import + fmt.Fprintln(&fileBuffer, "import (") + for _, meta := range metas { + fmt.Fprintf(&fileBuffer, "\"%v\"\n", pendingPkgs[meta.pkgName].path) + } + fmt.Fprintln(&fileBuffer, ")\n") + + for _, meta := range metas { + fmt.Fprintf(&fileBuffer, "////////// %s //////////\n", meta.pkgName) + genDetail(&fileBuffer, meta.con, "const", meta.pkgName) + genDetail(&fileBuffer, meta.t, "type", meta.pkgName) + genDetail(&fileBuffer, meta.f, "var", meta.pkgName) + fmt.Fprintln(&fileBuffer, "\n") + } + + formatedBuf, err := util.FmtFile("", fileBuffer.Bytes()) + if err != nil { + return err + } + + return os.WriteFile(dst, formatedBuf, 0o755) +} + +func genDetail(buf *bytes.Buffer, list []string, typ string, pkgName string) { + if len(list) == 0 { + return + } + fmt.Fprintf(buf, "%s (\n", typ) + for _, one := range list { + if vals, ok := alias[one]; ok { + for _, val := range vals { + if val.pkgName == pkgName { + fmt.Fprintf(buf, "\t%s = %s.%s\n", val.newName, pkgName, one) + } + } + } else { + fmt.Fprintf(buf, "\t%s = %s.%s\n", one, pkgName, one) + } + } + fmt.Fprintln(buf, ")") +} diff --git a/venus-devtool/util/api_meta.go b/venus-devtool/util/api_meta.go new file mode 100644 index 0000000000..a43640adec --- /dev/null +++ b/venus-devtool/util/api_meta.go @@ -0,0 +1,94 @@ +package util + +import ( + "reflect" + "strings" + + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" + + v0 "github.com/filecoin-project/venus/venus-shared/api/chain/v0" + v1 "github.com/filecoin-project/venus/venus-shared/api/chain/v1" +) + +var ChainAPIPairs = []struct { + Ver int + Lotus APIMeta + Venus APIMeta +}{ + { + Ver: 0, + Lotus: APIMeta{ + Type: reflect.TypeOf((*v0api.FullNode)(nil)).Elem(), + ParseOpt: InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/lotus/api/v0api", + Included: []string{"FullNode", "Common", "Net"}, + }, + }, + Venus: APIMeta{ + Type: reflect.TypeOf((*v0.FullNode)(nil)).Elem(), + ParseOpt: InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/chain/v0", + IncludeAll: true, + }, + RPCMeta: RPCMeta{ + Version: 0, + }, + }, + }, + { + Ver: 1, + Lotus: APIMeta{ + Type: reflect.TypeOf((*v1api.FullNode)(nil)).Elem(), + ParseOpt: InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/lotus/api", + Included: []string{"FullNode", "Common", "Net"}, + }, + }, + Venus: APIMeta{ + Type: reflect.TypeOf((*v1.FullNode)(nil)).Elem(), + ParseOpt: InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api/chain/v1", + IncludeAll: true, + }, + RPCMeta: RPCMeta{ + Version: 1, + }, + }, + }, +} + +var LatestChainAPIPair = ChainAPIPairs[len(ChainAPIPairs)-1] + +type RPCMeta struct { + Version uint32 + Namespace string + MethodNamespace string +} + +type APIMeta struct { + Type reflect.Type + ParseOpt InterfaceParseOption + RPCMeta +} + +func GetAPIMethodPerm(m InterfaceMethodMeta) string { + permStr := "" + + if cmtNum := len(m.Comments); cmtNum > 0 { + if itemNum := len(m.Comments[cmtNum-1].List); itemNum > 0 { + if strings.HasPrefix(m.Comments[cmtNum-1].List[0].Text, "//") { + permStr = m.Comments[cmtNum-1].List[0].Text[2:] + } + } + } + + for _, piece := range strings.Split(permStr, " ") { + trimmed := strings.TrimSpace(piece) + if strings.HasPrefix(trimmed, "perm:") { + return trimmed[5:] + } + } + + return "" +} diff --git a/venus-devtool/util/fmt.go b/venus-devtool/util/fmt.go new file mode 100644 index 0000000000..d47f51db50 --- /dev/null +++ b/venus-devtool/util/fmt.go @@ -0,0 +1,7 @@ +package util + +import "golang.org/x/tools/imports" + +func FmtFile(path string, src []byte) ([]byte, error) { + return imports.Process(path, src, nil) +} diff --git a/venus-devtool/util/import.go b/venus-devtool/util/import.go new file mode 100644 index 0000000000..906c73b795 --- /dev/null +++ b/venus-devtool/util/import.go @@ -0,0 +1,48 @@ +package util + +import ( + "go/build" + "sync" +) + +type PackageBuildInfo struct { + *build.Package + Err error +} + +var pkgCache = struct { + sync.RWMutex + found map[string]PackageBuildInfo +}{ + found: map[string]PackageBuildInfo{}, +} + +func FindPackage(importPath string) PackageBuildInfo { + pkgCache.RLock() + found, ok := pkgCache.found[importPath] + pkgCache.RUnlock() + + if !ok { + pkgCache.Lock() + pkg, err := build.Import(importPath, ".", 0) + + found = PackageBuildInfo{ + Package: pkg, + Err: err, + } + + pkgCache.found[importPath] = found + pkgCache.Unlock() + } + + return found +} + +func FindPackageLocation(importPath string) (string, error) { + found := FindPackage(importPath) + if found.Err != nil { + return "", found.Err + } + + return found.Dir, nil +} diff --git a/venus-devtool/util/interface.go b/venus-devtool/util/interface.go new file mode 100644 index 0000000000..659db4e183 --- /dev/null +++ b/venus-devtool/util/interface.go @@ -0,0 +1,248 @@ +package util + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strings" +) + +type ASTMeta struct { + Location string + *token.FileSet +} + +type InterfaceParseOption struct { + ImportPath string + IncludeAll bool + Included []string + ResolveImports bool +} + +type PackageMeta struct { + Name string + *ast.Package +} + +type ImportMeta struct { + Path string + IsStd bool +} + +type FileMeta struct { + Name string + *ast.File + Imports map[string]ImportMeta +} + +type InterfaceMeta struct { + Pkg PackageMeta + File FileMeta + Name string + Defined []InterfaceMethodMeta + Nested []string +} + +type InterfaceMethodMeta struct { + Name string + Node ast.Node + FuncType *ast.FuncType + Comments []*ast.CommentGroup +} + +type ifaceMetaVisitor struct { + pkg PackageMeta + file FileMeta + included map[string]struct{} + includAll bool + comments ast.CommentMap + ifaces []*InterfaceMeta + ifaceIdxes map[string]int +} + +func (iv *ifaceMetaVisitor) Visit(node ast.Node) ast.Visitor { + st, ok := node.(*ast.TypeSpec) + if !ok { + return iv + } + + iface, ok := st.Type.(*ast.InterfaceType) + if !ok { + return iv + } + + if _, yes := iv.included[st.Name.Name]; !yes && !iv.includAll { + return iv + } + + ifaceIdx, ok := iv.ifaceIdxes[st.Name.Name] + if !ok { + ifaceIdx = len(iv.ifaces) + iv.ifaces = append(iv.ifaces, &InterfaceMeta{ + Pkg: iv.pkg, + File: iv.file, + Name: st.Name.Name, + }) + } + + ifaceMeta := iv.ifaces[ifaceIdx] + + for _, m := range iface.Methods.List { + switch meth := m.Type.(type) { + case *ast.Ident: + ifaceMeta.Nested = append(ifaceMeta.Nested, meth.Name) + + case *ast.FuncType: + ifaceMeta.Defined = append(ifaceMeta.Defined, InterfaceMethodMeta{ + Name: m.Names[0].Name, + Node: m, + FuncType: meth, + Comments: iv.comments.Filter(m).Comments(), + }) + case *ast.SelectorExpr: + methodName := meth.Sel.Name + methodMeta := extraInterfaceMethodMeta[methodName] + ifaceMeta.Defined = append(ifaceMeta.Defined, InterfaceMethodMeta{ + Name: methodName, + Node: m, + FuncType: methodMeta.FuncType, + Comments: methodMeta.Comments, + }) + for k, importMeta := range extraImports[methodName] { + ifaceMeta.File.Imports[k] = importMeta + } + } + } + + return iv +} + +func genFileMeta(name string, file *ast.File, resolveImports bool) (FileMeta, error) { + imports := map[string]ImportMeta{} + if resolveImports { + for _, imp := range file.Imports { + importPath := imp.Path.Value[1 : len(imp.Path.Value)-1] + found := FindPackage(importPath) + if found.Err != nil { + return FileMeta{}, fmt.Errorf("find package for %s: %w", importPath, found.Err) + } + + importMeta := ImportMeta{ + Path: importPath, + IsStd: strings.HasPrefix(found.Dir, found.SrcRoot), + } + + if imp.Name != nil && imp.Name.Name != "" { + imports[imp.Name.Name] = importMeta + } else { + imports[found.Name] = importMeta + } + } + } + + return FileMeta{ + Name: name, + File: file, + Imports: imports, + }, nil +} + +func ParseInterfaceMetas(opt InterfaceParseOption) ([]*InterfaceMeta, *ASTMeta, error) { + location, err := FindPackageLocation(opt.ImportPath) + if err != nil { + return nil, nil, err + } + + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, location, nil, parser.AllErrors|parser.ParseComments) + if err != nil { + return nil, nil, err + } + + var metas []*InterfaceMeta + + included := map[string]struct{}{} + for _, one := range opt.Included { + included[one] = struct{}{} + } + + for pname, pkg := range pkgs { + if strings.HasSuffix(pname, "_test") { + continue + } + + visitor := &ifaceMetaVisitor{ + pkg: PackageMeta{ + Name: pname, + Package: pkg, + }, + included: included, + includAll: opt.IncludeAll, + ifaceIdxes: map[string]int{}, + } + + for fname, file := range pkg.Files { + fileMeta, err := genFileMeta(fname, file, opt.ResolveImports) + if err != nil { + return nil, nil, fmt.Errorf("gen file meta for %s: %w", fname, err) + } + + visitor.file = fileMeta + visitor.comments = ast.NewCommentMap(fset, file, file.Comments) + ast.Walk(visitor, file) + } + + metas = append(metas, visitor.ifaces...) + } + + sort.Slice(metas, func(i, j int) bool { + if metas[i].Pkg != metas[j].Pkg { + return metas[i].Pkg.Name < metas[j].Pkg.Name + } + + if metas[i].File.Name != metas[j].File.Name { + return metas[i].File.Name < metas[j].File.Name + } + + return metas[i].Name < metas[j].Name + }) + + for mi := range metas { + sort.Slice(metas[mi].Defined, func(i, j int) bool { + return metas[mi].Defined[i].Name < metas[mi].Defined[j].Name + }) + } + + return metas, &ASTMeta{ + Location: location, + FileSet: fset, + }, nil +} + +var ( + extraInterfaceMethodMeta = make(map[string]InterfaceMethodMeta) + extraImports = make(map[string]map[string]ImportMeta) +) + +func LoadExtraInterfaceMeta() error { + opt := InterfaceParseOption{ + ImportPath: "github.com/filecoin-project/venus/venus-shared/api", + Included: []string{"Version"}, + ResolveImports: true, + } + + ifaceMetas, _, err := ParseInterfaceMetas(opt) + if err != nil { + return err + } + for _, meta := range ifaceMetas { + for _, methodMeta := range meta.Defined { + extraInterfaceMethodMeta[methodMeta.Name] = methodMeta + extraImports[methodMeta.Name] = meta.File.Imports + } + } + + return nil +} diff --git a/venus-shared/TODO.md b/venus-shared/TODO.md new file mode 100644 index 0000000000..5cce174343 --- /dev/null +++ b/venus-shared/TODO.md @@ -0,0 +1,6 @@ +- [ ] tests in actors +- [ ] venus-market + - [ ] 类型考虑再细分到不同的命名空间下 + - [ ] 对于 `venus-messager`、`venus-gateway` 的部分接口转发需要考虑更优雅的方式 + +- [ ] venus-devtool 中的 `lotus` 需要保持更新 diff --git a/venus-shared/actors/actor_cids.go b/venus-shared/actors/actor_cids.go new file mode 100644 index 0000000000..4be79b6eab --- /dev/null +++ b/venus-shared/actors/actor_cids.go @@ -0,0 +1,330 @@ +package actors + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" +) + +// GetActorCodeID looks up a builtin actor's code CID by actor version and canonical actor name. +func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) { + + // Actors V8 and above + if av >= actorstypes.Version8 { + if cids, ok := GetActorCodeIDsFromManifest(av); ok { + c, ok := cids[name] + return c, ok + } + } + + // Actors V7 and lower + switch name { + + case AccountKey: + switch av { + + case actorstypes.Version0: + return builtin0.AccountActorCodeID, true + + case actorstypes.Version2: + return builtin2.AccountActorCodeID, true + + case actorstypes.Version3: + return builtin3.AccountActorCodeID, true + + case actorstypes.Version4: + return builtin4.AccountActorCodeID, true + + case actorstypes.Version5: + return builtin5.AccountActorCodeID, true + + case actorstypes.Version6: + return builtin6.AccountActorCodeID, true + + case actorstypes.Version7: + return builtin7.AccountActorCodeID, true + } + + case CronKey: + switch av { + + case actorstypes.Version0: + return builtin0.CronActorCodeID, true + + case actorstypes.Version2: + return builtin2.CronActorCodeID, true + + case actorstypes.Version3: + return builtin3.CronActorCodeID, true + + case actorstypes.Version4: + return builtin4.CronActorCodeID, true + + case actorstypes.Version5: + return builtin5.CronActorCodeID, true + + case actorstypes.Version6: + return builtin6.CronActorCodeID, true + + case actorstypes.Version7: + return builtin7.CronActorCodeID, true + } + + case InitKey: + switch av { + + case actorstypes.Version0: + return builtin0.InitActorCodeID, true + + case actorstypes.Version2: + return builtin2.InitActorCodeID, true + + case actorstypes.Version3: + return builtin3.InitActorCodeID, true + + case actorstypes.Version4: + return builtin4.InitActorCodeID, true + + case actorstypes.Version5: + return builtin5.InitActorCodeID, true + + case actorstypes.Version6: + return builtin6.InitActorCodeID, true + + case actorstypes.Version7: + return builtin7.InitActorCodeID, true + } + + case MarketKey: + switch av { + + case actorstypes.Version0: + return builtin0.StorageMarketActorCodeID, true + + case actorstypes.Version2: + return builtin2.StorageMarketActorCodeID, true + + case actorstypes.Version3: + return builtin3.StorageMarketActorCodeID, true + + case actorstypes.Version4: + return builtin4.StorageMarketActorCodeID, true + + case actorstypes.Version5: + return builtin5.StorageMarketActorCodeID, true + + case actorstypes.Version6: + return builtin6.StorageMarketActorCodeID, true + + case actorstypes.Version7: + return builtin7.StorageMarketActorCodeID, true + } + + case MinerKey: + switch av { + + case actorstypes.Version0: + return builtin0.StorageMinerActorCodeID, true + + case actorstypes.Version2: + return builtin2.StorageMinerActorCodeID, true + + case actorstypes.Version3: + return builtin3.StorageMinerActorCodeID, true + + case actorstypes.Version4: + return builtin4.StorageMinerActorCodeID, true + + case actorstypes.Version5: + return builtin5.StorageMinerActorCodeID, true + + case actorstypes.Version6: + return builtin6.StorageMinerActorCodeID, true + + case actorstypes.Version7: + return builtin7.StorageMinerActorCodeID, true + } + + case MultisigKey: + switch av { + + case actorstypes.Version0: + return builtin0.MultisigActorCodeID, true + + case actorstypes.Version2: + return builtin2.MultisigActorCodeID, true + + case actorstypes.Version3: + return builtin3.MultisigActorCodeID, true + + case actorstypes.Version4: + return builtin4.MultisigActorCodeID, true + + case actorstypes.Version5: + return builtin5.MultisigActorCodeID, true + + case actorstypes.Version6: + return builtin6.MultisigActorCodeID, true + + case actorstypes.Version7: + return builtin7.MultisigActorCodeID, true + } + + case PaychKey: + switch av { + + case actorstypes.Version0: + return builtin0.PaymentChannelActorCodeID, true + + case actorstypes.Version2: + return builtin2.PaymentChannelActorCodeID, true + + case actorstypes.Version3: + return builtin3.PaymentChannelActorCodeID, true + + case actorstypes.Version4: + return builtin4.PaymentChannelActorCodeID, true + + case actorstypes.Version5: + return builtin5.PaymentChannelActorCodeID, true + + case actorstypes.Version6: + return builtin6.PaymentChannelActorCodeID, true + + case actorstypes.Version7: + return builtin7.PaymentChannelActorCodeID, true + } + + case PowerKey: + switch av { + + case actorstypes.Version0: + return builtin0.StoragePowerActorCodeID, true + + case actorstypes.Version2: + return builtin2.StoragePowerActorCodeID, true + + case actorstypes.Version3: + return builtin3.StoragePowerActorCodeID, true + + case actorstypes.Version4: + return builtin4.StoragePowerActorCodeID, true + + case actorstypes.Version5: + return builtin5.StoragePowerActorCodeID, true + + case actorstypes.Version6: + return builtin6.StoragePowerActorCodeID, true + + case actorstypes.Version7: + return builtin7.StoragePowerActorCodeID, true + } + + case RewardKey: + switch av { + + case actorstypes.Version0: + return builtin0.RewardActorCodeID, true + + case actorstypes.Version2: + return builtin2.RewardActorCodeID, true + + case actorstypes.Version3: + return builtin3.RewardActorCodeID, true + + case actorstypes.Version4: + return builtin4.RewardActorCodeID, true + + case actorstypes.Version5: + return builtin5.RewardActorCodeID, true + + case actorstypes.Version6: + return builtin6.RewardActorCodeID, true + + case actorstypes.Version7: + return builtin7.RewardActorCodeID, true + } + + case SystemKey: + switch av { + + case actorstypes.Version0: + return builtin0.SystemActorCodeID, true + + case actorstypes.Version2: + return builtin2.SystemActorCodeID, true + + case actorstypes.Version3: + return builtin3.SystemActorCodeID, true + + case actorstypes.Version4: + return builtin4.SystemActorCodeID, true + + case actorstypes.Version5: + return builtin5.SystemActorCodeID, true + + case actorstypes.Version6: + return builtin6.SystemActorCodeID, true + + case actorstypes.Version7: + return builtin7.SystemActorCodeID, true + } + + case VerifregKey: + switch av { + + case actorstypes.Version0: + return builtin0.VerifiedRegistryActorCodeID, true + + case actorstypes.Version2: + return builtin2.VerifiedRegistryActorCodeID, true + + case actorstypes.Version3: + return builtin3.VerifiedRegistryActorCodeID, true + + case actorstypes.Version4: + return builtin4.VerifiedRegistryActorCodeID, true + + case actorstypes.Version5: + return builtin5.VerifiedRegistryActorCodeID, true + + case actorstypes.Version6: + return builtin6.VerifiedRegistryActorCodeID, true + + case actorstypes.Version7: + return builtin7.VerifiedRegistryActorCodeID, true + } + } + + return cid.Undef, false +} + +// GetActorCodeIDs looks up all builtin actor's code CIDs by actor version. +func GetActorCodeIDs(av actorstypes.Version) (map[string]cid.Cid, error) { + cids, ok := GetActorCodeIDsFromManifest(av) + if ok { + return cids, nil + } + + actorsKeys := GetBuiltinActorsKeys(av) + synthCids := make(map[string]cid.Cid) + + for _, key := range actorsKeys { + c, ok := GetActorCodeID(av, key) + if !ok { + return nil, fmt.Errorf("could not find builtin actor cids for Actors version %d", av) + } + synthCids[key] = c + } + + return synthCids, nil +} diff --git a/venus-shared/actors/adt/adt.go b/venus-shared/actors/adt/adt.go new file mode 100644 index 0000000000..6cebb1e0e6 --- /dev/null +++ b/venus-shared/actors/adt/adt.go @@ -0,0 +1,31 @@ +// FETCHED FROM LOTUS: adt/adt.go + +package adt + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" +) + +type Map interface { + Root() (cid.Cid, error) + + Put(k abi.Keyer, v cbor.Marshaler) error + Get(k abi.Keyer, v cbor.Unmarshaler) (bool, error) + Delete(k abi.Keyer) error + + ForEach(v cbor.Unmarshaler, fn func(key string) error) error +} + +type Array interface { + Root() (cid.Cid, error) + + Set(idx uint64, v cbor.Marshaler) error + Get(idx uint64, v cbor.Unmarshaler) (bool, error) + Delete(idx uint64) error + Length() uint64 + + ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error +} diff --git a/venus-shared/actors/adt/diff_adt.go b/venus-shared/actors/adt/diff_adt.go new file mode 100644 index 0000000000..3b7795a186 --- /dev/null +++ b/venus-shared/actors/adt/diff_adt.go @@ -0,0 +1,125 @@ +// FETCHED FROM LOTUS: adt/diff_adt.go + +package adt + +import ( + "bytes" + + typegen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" +) + +// AdtArrayDiff generalizes adt.Array diffing by accepting a Deferred type that can unmarshalled to its corresponding struct +// in an interface implantation. +// Add should be called when a new k,v is added to the array +// Modify should be called when a value is modified in the array +// Remove should be called when a value is removed from the array +type AdtArrayDiff interface { + Add(key uint64, val *typegen.Deferred) error + Modify(key uint64, from, to *typegen.Deferred) error + Remove(key uint64, val *typegen.Deferred) error +} + +// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104 +// CBOR Marshaling will likely be the largest performance bottleneck here. + +// DiffAdtArray accepts two *adt.Array's and an AdtArrayDiff implementation. It does the following: +// - All values that exist in preArr and not in curArr are passed to AdtArrayDiff.Remove() +// - All values that exist in curArr nnd not in prevArr are passed to adtArrayDiff.Add() +// - All values that exist in preArr and in curArr are passed to AdtArrayDiff.Modify() +// - It is the responsibility of AdtArrayDiff.Modify() to determine if the values it was passed have been modified. +func DiffAdtArray(preArr, curArr Array, out AdtArrayDiff) error { + notNew := make(map[int64]struct{}, curArr.Length()) + prevVal := new(typegen.Deferred) + if err := preArr.ForEach(prevVal, func(i int64) error { + curVal := new(typegen.Deferred) + found, err := curArr.Get(uint64(i), curVal) + if err != nil { + return err + } + if !found { + if err := out.Remove(uint64(i), prevVal); err != nil { + return err + } + return nil + } + + // no modification + if !bytes.Equal(prevVal.Raw, curVal.Raw) { + if err := out.Modify(uint64(i), prevVal, curVal); err != nil { + return err + } + } + notNew[i] = struct{}{} + return nil + }); err != nil { + return err + } + + curVal := new(typegen.Deferred) + return curArr.ForEach(curVal, func(i int64) error { + if _, ok := notNew[i]; ok { + return nil + } + return out.Add(uint64(i), curVal) + }) +} + +// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104 +// CBOR Marshaling will likely be the largest performance bottleneck here. + +// AdtMapDiff generalizes adt.Map diffing by accepting a Deferred type that can unmarshalled to its corresponding struct +// in an interface implantation. +// AsKey should return the Keyer implementation specific to the map +// Add should be called when a new k,v is added to the map +// Modify should be called when a value is modified in the map +// Remove should be called when a value is removed from the map +type AdtMapDiff interface { + AsKey(key string) (abi.Keyer, error) + Add(key string, val *typegen.Deferred) error + Modify(key string, from, to *typegen.Deferred) error + Remove(key string, val *typegen.Deferred) error +} + +func DiffAdtMap(preMap, curMap Map, out AdtMapDiff) error { + notNew := make(map[string]struct{}) + prevVal := new(typegen.Deferred) + if err := preMap.ForEach(prevVal, func(key string) error { + curVal := new(typegen.Deferred) + k, err := out.AsKey(key) + if err != nil { + return err + } + + found, err := curMap.Get(k, curVal) + if err != nil { + return err + } + if !found { + if err := out.Remove(key, prevVal); err != nil { + return err + } + return nil + } + + // no modification + if !bytes.Equal(prevVal.Raw, curVal.Raw) { + if err := out.Modify(key, prevVal, curVal); err != nil { + return err + } + } + notNew[key] = struct{}{} + return nil + }); err != nil { + return err + } + + curVal := new(typegen.Deferred) + return curMap.ForEach(curVal, func(key string) error { + if _, ok := notNew[key]; ok { + return nil + } + return out.Add(key, curVal) + }) +} diff --git a/venus-shared/actors/adt/store.go b/venus-shared/actors/adt/store.go new file mode 100644 index 0000000000..722d76b69e --- /dev/null +++ b/venus-shared/actors/adt/store.go @@ -0,0 +1,20 @@ +// FETCHED FROM LOTUS: adt/store.go + +package adt + +import ( + "context" + + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +type Store interface { + Context() context.Context + cbor.IpldStore +} + +func WrapStore(ctx context.Context, store cbor.IpldStore) Store { + return adt.WrapStore(ctx, store) +} diff --git a/venus-shared/actors/aerrors/error.go b/venus-shared/actors/aerrors/error.go new file mode 100644 index 0000000000..df5f23a817 --- /dev/null +++ b/venus-shared/actors/aerrors/error.go @@ -0,0 +1,71 @@ +// FETCHED FROM LOTUS: aerrors/error.go + +package aerrors + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/exitcode" + "golang.org/x/xerrors" +) + +func IsFatal(err ActorError) bool { + return err != nil && err.IsFatal() +} +func RetCode(err ActorError) exitcode.ExitCode { + if err == nil { + return 0 + } + return err.RetCode() +} + +type internalActorError interface { + ActorError + FormatError(p xerrors.Printer) (next error) + Unwrap() error +} + +type ActorError interface { + error + IsFatal() bool + RetCode() exitcode.ExitCode +} + +type actorError struct { + fatal bool + retCode exitcode.ExitCode + + msg string + frame xerrors.Frame + err error +} + +func (e *actorError) IsFatal() bool { + return e.fatal +} + +func (e *actorError) RetCode() exitcode.ExitCode { + return e.retCode +} + +func (e *actorError) Error() string { + return fmt.Sprint(e) +} +func (e *actorError) Format(s fmt.State, v rune) { xerrors.FormatError(e, s, v) } +func (e *actorError) FormatError(p xerrors.Printer) (next error) { + p.Print(e.msg) + if e.fatal { + p.Print(" (FATAL)") + } else { + p.Printf(" (RetCode=%d)", e.retCode) + } + + e.frame.Format(p) + return e.err +} + +func (e *actorError) Unwrap() error { + return e.err +} + +var _ internalActorError = (*actorError)(nil) diff --git a/venus-shared/actors/aerrors/wrap.go b/venus-shared/actors/aerrors/wrap.go new file mode 100644 index 0000000000..70d6ceb224 --- /dev/null +++ b/venus-shared/actors/aerrors/wrap.go @@ -0,0 +1,205 @@ +// FETCHED FROM LOTUS: aerrors/wrap.go + +package aerrors + +import ( + "errors" + "fmt" + + "github.com/filecoin-project/go-state-types/exitcode" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" +) + +// New creates a new non-fatal error +func New(retCode exitcode.ExitCode, message string) ActorError { + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried creating an error and setting RetCode to 0", + frame: xerrors.Caller(1), + err: errors.New(message), + } + } + return &actorError{ + retCode: retCode, + + msg: message, + frame: xerrors.Caller(1), + } +} + +// Newf creates a new non-fatal error +func Newf(retCode exitcode.ExitCode, format string, args ...interface{}) ActorError { + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried creating an error and setting RetCode to 0", + frame: xerrors.Caller(1), + err: fmt.Errorf(format, args...), + } + } + return &actorError{ + retCode: retCode, + + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(1), + } +} + +// todo: bit hacky + +func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError { + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried creating an error and setting RetCode to 0", + frame: xerrors.Caller(skip), + err: fmt.Errorf(format, args...), + } + } + return &actorError{ + retCode: retCode, + + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(skip), + } +} + +func Fatal(message string, args ...interface{}) ActorError { + return &actorError{ + fatal: true, + msg: message, + frame: xerrors.Caller(1), + } +} + +func Fatalf(format string, args ...interface{}) ActorError { + return &actorError{ + fatal: true, + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(1), + } +} + +// Wrap extens chain of errors with a message +func Wrap(err ActorError, message string) ActorError { + if err == nil { + return nil + } + return &actorError{ + fatal: IsFatal(err), + retCode: RetCode(err), + + msg: message, + frame: xerrors.Caller(1), + err: err, + } +} + +// Wrapf extens chain of errors with a message +func Wrapf(err ActorError, format string, args ...interface{}) ActorError { + if err == nil { + return nil + } + return &actorError{ + fatal: IsFatal(err), + retCode: RetCode(err), + + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(1), + err: err, + } +} + +// Absorb takes and error and makes in not fatal ActorError +func Absorb(err error, retCode exitcode.ExitCode, msg string) ActorError { + if err == nil { + return nil + } + if aerr, ok := err.(ActorError); ok && IsFatal(aerr) { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried absorbing an error that is already a fatal error", + frame: xerrors.Caller(1), + err: err, + } + } + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried absorbing an error and setting RetCode to 0", + frame: xerrors.Caller(1), + err: err, + } + } + + return &actorError{ + fatal: false, + retCode: retCode, + + msg: msg, + frame: xerrors.Caller(1), + err: err, + } +} + +// Escalate takes and error and escalates it into a fatal error +func Escalate(err error, msg string) ActorError { + if err == nil { + return nil + } + return &actorError{ + fatal: true, + + msg: msg, + frame: xerrors.Caller(1), + err: err, + } +} + +func HandleExternalError(err error, msg string) ActorError { + if err == nil { + return nil + } + + if aerr, ok := err.(ActorError); ok { + return &actorError{ + fatal: IsFatal(aerr), + retCode: RetCode(aerr), + + msg: msg, + frame: xerrors.Caller(1), + err: aerr, + } + } + + if errors.Is(err, &cbor.SerializationError{}) { + return &actorError{ + fatal: false, + retCode: 253, + msg: msg, + frame: xerrors.Caller(1), + err: err, + } + } + + return &actorError{ + fatal: false, + retCode: 219, + + msg: msg, + frame: xerrors.Caller(1), + err: err, + } +} diff --git a/venus-shared/actors/builtin-actors-code/README.md b/venus-shared/actors/builtin-actors-code/README.md new file mode 100644 index 0000000000..981ad726a1 --- /dev/null +++ b/venus-shared/actors/builtin-actors-code/README.md @@ -0,0 +1,26 @@ +# Bundles + +This directory includes the actors bundles for each release. Each actor bundle is a zstd compressed +tarfile containing one bundle per network type. These tarfiles are subsequently embedded in the +lotus binary. + +## Updating + +To update, run the `./pack.sh` script. For example, the following will pack the [builtin actors release](https://github.com/filecoin-project/builtin-actors/releases) `dev/20220602` into the `v8` tarfile. + +```bash +./pack.sh v8 dev/20220602 +``` + +This will: + +1. Download the actors bundles and pack them into the appropriate tarfile (`$VERSION.tar.zst`). +2. Run `make bundle-gen` in the top-level directory to regenerate the bundle metadata file for _all_ network versions (all `*.tar.zst` files in this directory). + +## Overriding + +To build a bundle, but specify a different release/tag for a specific network, append `$network=$alternative_release` on the command line. For example: + +```bash +./pack.sh v8 dev/20220602 mainnet=v8.0.0 calibrationnet=v8.0.0-rc.1 +``` diff --git a/venus-shared/actors/builtin-actors-code/pack.sh b/venus-shared/actors/builtin-actors-code/pack.sh new file mode 100755 index 0000000000..8c6512c8b3 --- /dev/null +++ b/venus-shared/actors/builtin-actors-code/pack.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +NETWORKS=(devnet mainnet caterpillarnet butterflynet testing testing-fake-proofs calibrationnet) + +set -e + +if [[ $# -lt 2 ]]; then + echo "Usage: $0 VERSION RELEASE [NETWORK=RELEASE_OVERRIDE]..." >&2 + echo "expected at least two arguments, an actors version (e.g., v8), an actors release, and any number of release overrides." >&2 + exit 1 +fi + +VERSION="$1" # actors version +RELEASE="$2" # actors release name +RELEASE_OVERRIDES=("${@:3}") + +echo "Downloading bundles for actors version ${VERSION} release ${RELEASE}" +echo "With release overrides ${RELEASE_OVERRIDES[*]}" + +TARGET_FILE="$(pwd)/${VERSION}.tar.zst" +WORKDIR=$(mktemp -d -t "actor-bundles-${VERSION}.XXXXXXXXXX") +trap 'rm -rf -- "$WORKDIR"' EXIT + +encode_release() { + jq -rn --arg release "$1" '$release | @uri' +} + +pushd "${WORKDIR}" +encoded_release="$(jq -rn --arg release "$RELEASE" '$release | @uri')" +for network in "${NETWORKS[@]}"; do + release="$RELEASE" + # Ideally, we'd use an associative array (map). But that's not supported on macos. + for override in "${RELEASE_OVERRIDES[@]}"; do + if [[ "${network}" = "${override%%=*}" ]]; then + release="${override#*=}" + break + fi + done + encoded_release="$(encode_release "$release")" + echo "Downloading $release for network $network." + wget "https://github.com/filecoin-project/builtin-actors/releases/download/${encoded_release}/builtin-actors-${network}"{.car,.sha256} +done + +echo "Checking the checksums..." + +sha256sum -c -- *.sha256 + + +echo "Packing..." + +rm -f -- "$TARGET_FILE" +tar -cf "$TARGET_FILE" --use-compress-program "zstd -19" -- *.car +popd + +echo "Generating metadata..." + +make -C ../../../ bundle-gen diff --git a/venus-shared/actors/builtin-actors-code/v8.tar.zst b/venus-shared/actors/builtin-actors-code/v8.tar.zst new file mode 100644 index 0000000000..c4eb857b9f Binary files /dev/null and b/venus-shared/actors/builtin-actors-code/v8.tar.zst differ diff --git a/venus-shared/actors/builtin-actors-code/v9.tar.zst b/venus-shared/actors/builtin-actors-code/v9.tar.zst new file mode 100644 index 0000000000..95b887312f Binary files /dev/null and b/venus-shared/actors/builtin-actors-code/v9.tar.zst differ diff --git a/venus-shared/actors/builtin/README.md b/venus-shared/actors/builtin/README.md new file mode 100644 index 0000000000..b47dd68fed --- /dev/null +++ b/venus-shared/actors/builtin/README.md @@ -0,0 +1,31 @@ +// FETCHED FROM LOTUS: builtin/README.md + +# Actors + +This package contains shims for abstracting over different actor versions. + +## Design + +Shims in this package follow a few common design principles. + +### Structure Agnostic + +Shims interfaces defined in this package should (ideally) not change even if the +structure of the underlying data changes. For example: + +* All shims store an internal "store" object. That way, state can be moved into + a separate object without needing to add a store to the function signature. +* All functions must return an error, even if unused for now. + +### Minimal + +These interfaces should be expanded only as necessary to reduce maintenance burden. + +### Queries, not field assessors. + +When possible, functions should query the state instead of simply acting as +field assessors. These queries are more likely to remain stable across +specs-actor upgrades than specific state fields. + +Note: there is a trade-off here. Avoid implementing _complicated_ query logic +inside these shims, as it will need to be replicated in every shim. diff --git a/venus-shared/actors/builtin/account/actor.go b/venus-shared/actors/builtin/account/actor.go new file mode 100644 index 0000000000..b6ec5b9619 --- /dev/null +++ b/venus-shared/actors/builtin/account/actor.go @@ -0,0 +1,120 @@ +// FETCHED FROM LOTUS: builtin/account/actor.go.template + +package account + +import ( + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" +) + +var Methods = builtin9.MethodsAccount + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.AccountKey { + return nil, fmt.Errorf("actor code is not account: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.AccountActorCodeID: + return load0(store, act.Head) + + case builtin2.AccountActorCodeID: + return load2(store, act.Head) + + case builtin3.AccountActorCodeID: + return load3(store, act.Head) + + case builtin4.AccountActorCodeID: + return load4(store, act.Head) + + case builtin5.AccountActorCodeID: + return load5(store, act.Head) + + case builtin6.AccountActorCodeID: + return load6(store, act.Head) + + case builtin7.AccountActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store, addr) + + case actorstypes.Version2: + return make2(store, addr) + + case actorstypes.Version3: + return make3(store, addr) + + case actorstypes.Version4: + return make4(store, addr) + + case actorstypes.Version5: + return make5(store, addr) + + case actorstypes.Version6: + return make6(store, addr) + + case actorstypes.Version7: + return make7(store, addr) + + case actorstypes.Version8: + return make8(store, addr) + + case actorstypes.Version9: + return make9(store, addr) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + PubkeyAddress() (address.Address, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/account/actor.go.template b/venus-shared/actors/builtin/account/actor.go.template new file mode 100644 index 0000000000..04cbe1e4ce --- /dev/null +++ b/venus-shared/actors/builtin/account/actor.go.template @@ -0,0 +1,69 @@ +// FETCHED FROM LOTUS: builtin/account/actor.go.template + +package account + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" +) + +var Methods = builtin{{.latestVersion}}.MethodsAccount + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.AccountKey { + return nil, fmt.Errorf("actor code is not account: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.AccountActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store, addr) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + PubkeyAddress() (address.Address, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/account/state.sep.go.template b/venus-shared/actors/builtin/account/state.sep.go.template new file mode 100644 index 0000000000..248d2c9552 --- /dev/null +++ b/venus-shared/actors/builtin/account/state.sep.go.template @@ -0,0 +1,46 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + +{{if (le .v 7)}} + account{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/account" +{{else}} + account{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}account" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, addr address.Address) (State, error) { + out := state{{.v}}{store: store} + out.State = account{{.v}}.State{Address:addr} + return &out, nil +} + +type state{{.v}} struct { + account{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v0.go b/venus-shared/actors/builtin/account/state.v0.go new file mode 100644 index 0000000000..e3f442b83f --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v0.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account0 "github.com/filecoin-project/specs-actors/actors/builtin/account" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store, addr address.Address) (State, error) { + out := state0{store: store} + out.State = account0.State{Address: addr} + return &out, nil +} + +type state0 struct { + account0.State + store adt.Store +} + +func (s *state0) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v2.go b/venus-shared/actors/builtin/account/state.v2.go new file mode 100644 index 0000000000..27e3082740 --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v2.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/account" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store, addr address.Address) (State, error) { + out := state2{store: store} + out.State = account2.State{Address: addr} + return &out, nil +} + +type state2 struct { + account2.State + store adt.Store +} + +func (s *state2) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v3.go b/venus-shared/actors/builtin/account/state.v3.go new file mode 100644 index 0000000000..0d51da82b6 --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v3.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, addr address.Address) (State, error) { + out := state3{store: store} + out.State = account3.State{Address: addr} + return &out, nil +} + +type state3 struct { + account3.State + store adt.Store +} + +func (s *state3) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v4.go b/venus-shared/actors/builtin/account/state.v4.go new file mode 100644 index 0000000000..20c752440f --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v4.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/account" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, addr address.Address) (State, error) { + out := state4{store: store} + out.State = account4.State{Address: addr} + return &out, nil +} + +type state4 struct { + account4.State + store adt.Store +} + +func (s *state4) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v5.go b/venus-shared/actors/builtin/account/state.v5.go new file mode 100644 index 0000000000..a70c10aba0 --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v5.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/account" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, addr address.Address) (State, error) { + out := state5{store: store} + out.State = account5.State{Address: addr} + return &out, nil +} + +type state5 struct { + account5.State + store adt.Store +} + +func (s *state5) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v6.go b/venus-shared/actors/builtin/account/state.v6.go new file mode 100644 index 0000000000..6dff2bc031 --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v6.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/account" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store, addr address.Address) (State, error) { + out := state6{store: store} + out.State = account6.State{Address: addr} + return &out, nil +} + +type state6 struct { + account6.State + store adt.Store +} + +func (s *state6) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state6) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v7.go b/venus-shared/actors/builtin/account/state.v7.go new file mode 100644 index 0000000000..00141f18f5 --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v7.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, addr address.Address) (State, error) { + out := state7{store: store} + out.State = account7.State{Address: addr} + return &out, nil +} + +type state7 struct { + account7.State + store adt.Store +} + +func (s *state7) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v8.go b/venus-shared/actors/builtin/account/state.v8.go new file mode 100644 index 0000000000..e12e83aac1 --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v8.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account8 "github.com/filecoin-project/go-state-types/builtin/v8/account" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store, addr address.Address) (State, error) { + out := state8{store: store} + out.State = account8.State{Address: addr} + return &out, nil +} + +type state8 struct { + account8.State + store adt.Store +} + +func (s *state8) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state8) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/account/state.v9.go b/venus-shared/actors/builtin/account/state.v9.go new file mode 100644 index 0000000000..f62aa8640a --- /dev/null +++ b/venus-shared/actors/builtin/account/state.v9.go @@ -0,0 +1,42 @@ +// FETCHED FROM LOTUS: builtin/account/state.go.template + +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + account9 "github.com/filecoin-project/go-state-types/builtin/v9/account" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store, addr address.Address) (State, error) { + out := state9{store: store} + out.State = account9.State{Address: addr} + return &out, nil +} + +type state9 struct { + account9.State + store adt.Store +} + +func (s *state9) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state9) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/builtin.go b/venus-shared/actors/builtin/builtin.go new file mode 100644 index 0000000000..4efea24de4 --- /dev/null +++ b/venus-shared/actors/builtin/builtin.go @@ -0,0 +1,293 @@ +// FETCHED FROM LOTUS: builtin/builtin.go.template + +package builtin + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/proof" + + "github.com/filecoin-project/venus/venus-shared/actors" + + smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" +) + +var SystemActorAddr = builtin.SystemActorAddr +var BurntFundsActorAddr = builtin.BurntFundsActorAddr +var CronActorAddr = builtin.CronActorAddr +var SaftAddress = makeAddress("t0122") +var ReserveAddress = makeAddress("t090") +var RootVerifierAddress = makeAddress("t080") + +var ( + ExpectedLeadersPerEpoch = builtin.ExpectedLeadersPerEpoch +) + +const ( + EpochDurationSeconds = builtin.EpochDurationSeconds + EpochsInDay = builtin.EpochsInDay + SecondsInDay = builtin.SecondsInDay +) + +const ( + MethodSend = builtin.MethodSend + MethodConstructor = builtin.MethodConstructor +) + +// These are all just type aliases across actor versions. In the future, that might change +// and we might need to do something fancier. +type SectorInfo = proof.SectorInfo +type ExtendedSectorInfo = proof.ExtendedSectorInfo +type PoStProof = proof.PoStProof +type FilterEstimate = smoothingtypes.FilterEstimate + +func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { + return minertypes.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) +} + +func ActorNameByCode(c cid.Cid) string { + if name, version, ok := actors.GetActorMetaByCode(c); ok { + return fmt.Sprintf("fil/%d/%s", version, name) + } + + switch { + + case builtin0.IsBuiltinActor(c): + return builtin0.ActorNameByCode(c) + + case builtin2.IsBuiltinActor(c): + return builtin2.ActorNameByCode(c) + + case builtin3.IsBuiltinActor(c): + return builtin3.ActorNameByCode(c) + + case builtin4.IsBuiltinActor(c): + return builtin4.ActorNameByCode(c) + + case builtin5.IsBuiltinActor(c): + return builtin5.ActorNameByCode(c) + + case builtin6.IsBuiltinActor(c): + return builtin6.ActorNameByCode(c) + + case builtin7.IsBuiltinActor(c): + return builtin7.ActorNameByCode(c) + + default: + return "" + } +} + +func IsBuiltinActor(c cid.Cid) bool { + _, _, ok := actors.GetActorMetaByCode(c) + if ok { + return true + } + + if builtin0.IsBuiltinActor(c) { + return true + } + + if builtin2.IsBuiltinActor(c) { + return true + } + + if builtin3.IsBuiltinActor(c) { + return true + } + + if builtin4.IsBuiltinActor(c) { + return true + } + + if builtin5.IsBuiltinActor(c) { + return true + } + + if builtin6.IsBuiltinActor(c) { + return true + } + + if builtin7.IsBuiltinActor(c) { + return true + } + + return false +} + +func IsAccountActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == "account" + } + + if c == builtin0.AccountActorCodeID { + return true + } + + if c == builtin2.AccountActorCodeID { + return true + } + + if c == builtin3.AccountActorCodeID { + return true + } + + if c == builtin4.AccountActorCodeID { + return true + } + + if c == builtin5.AccountActorCodeID { + return true + } + + if c == builtin6.AccountActorCodeID { + return true + } + + if c == builtin7.AccountActorCodeID { + return true + } + + return false +} + +func IsStorageMinerActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == actors.MinerKey + } + + if c == builtin0.StorageMinerActorCodeID { + return true + } + + if c == builtin2.StorageMinerActorCodeID { + return true + } + + if c == builtin3.StorageMinerActorCodeID { + return true + } + + if c == builtin4.StorageMinerActorCodeID { + return true + } + + if c == builtin5.StorageMinerActorCodeID { + return true + } + + if c == builtin6.StorageMinerActorCodeID { + return true + } + + if c == builtin7.StorageMinerActorCodeID { + return true + } + + return false +} + +func IsMultisigActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == actors.MultisigKey + } + + if c == builtin0.MultisigActorCodeID { + return true + } + + if c == builtin2.MultisigActorCodeID { + return true + } + + if c == builtin3.MultisigActorCodeID { + return true + } + + if c == builtin4.MultisigActorCodeID { + return true + } + + if c == builtin5.MultisigActorCodeID { + return true + } + + if c == builtin6.MultisigActorCodeID { + return true + } + + if c == builtin7.MultisigActorCodeID { + return true + } + + return false +} + +func IsPaymentChannelActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == "paymentchannel" + } + + if c == builtin0.PaymentChannelActorCodeID { + return true + } + + if c == builtin2.PaymentChannelActorCodeID { + return true + } + + if c == builtin3.PaymentChannelActorCodeID { + return true + } + + if c == builtin4.PaymentChannelActorCodeID { + return true + } + + if c == builtin5.PaymentChannelActorCodeID { + return true + } + + if c == builtin6.PaymentChannelActorCodeID { + return true + } + + if c == builtin7.PaymentChannelActorCodeID { + return true + } + + return false +} + +func makeAddress(addr string) address.Address { + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +} diff --git a/venus-shared/actors/builtin/builtin.go.template b/venus-shared/actors/builtin/builtin.go.template new file mode 100644 index 0000000000..69c3e584df --- /dev/null +++ b/venus-shared/actors/builtin/builtin.go.template @@ -0,0 +1,164 @@ +// FETCHED FROM LOTUS: builtin/builtin.go.template + +package builtin + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/proof" + "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" +) + +var SystemActorAddr = builtin.SystemActorAddr +var BurntFundsActorAddr = builtin.BurntFundsActorAddr +var CronActorAddr = builtin.CronActorAddr +var SaftAddress = makeAddress("t0122") +var ReserveAddress = makeAddress("t090") +var RootVerifierAddress = makeAddress("t080") + +var ( + ExpectedLeadersPerEpoch = builtin.ExpectedLeadersPerEpoch +) + +const ( + EpochDurationSeconds = builtin.EpochDurationSeconds + EpochsInDay = builtin.EpochsInDay + SecondsInDay = builtin.SecondsInDay +) + +const ( + MethodSend = builtin.MethodSend + MethodConstructor = builtin.MethodConstructor +) + +// These are all just type aliases across actor versions. In the future, that might change +// and we might need to do something fancier. +type SectorInfo = proof.SectorInfo +type ExtendedSectorInfo = proof.ExtendedSectorInfo +type PoStProof = proof.PoStProof +type FilterEstimate = smoothingtypes.FilterEstimate + +func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { + return minertypes.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) +} + +func ActorNameByCode(c cid.Cid) string { + if name, version, ok := actors.GetActorMetaByCode(c); ok { + return fmt.Sprintf("fil/%d/%s", version, name) + } + + switch { + {{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.IsBuiltinActor(c): + return builtin{{.}}.ActorNameByCode(c) + {{end}} + {{end}} + default: + return "" + } +} + +func IsBuiltinActor(c cid.Cid) bool { + _, _, ok := actors.GetActorMetaByCode(c) + if ok { + return true + } + + {{range .versions}} + {{if (le . 7)}} + if builtin{{.}}.IsBuiltinActor(c) { + return true + } + {{end}} + {{end}} + return false +} + +func IsAccountActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == "account" + } + + {{range .versions}} + {{if (le . 7)}} + if c == builtin{{.}}.AccountActorCodeID { + return true + } + {{end}} + {{end}} + return false +} + +func IsStorageMinerActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == actors.MinerKey + } + + {{range .versions}} + {{if (le . 7)}} + if c == builtin{{.}}.StorageMinerActorCodeID { + return true + } + {{end}} + {{end}} + return false +} + +func IsMultisigActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == actors.MultisigKey + } + + {{range .versions}} + {{if (le . 7)}} + if c == builtin{{.}}.MultisigActorCodeID { + return true + } + {{end}} + {{end}} + return false +} + +func IsPaymentChannelActor(c cid.Cid) bool { + name, _, ok := actors.GetActorMetaByCode(c) + if ok { + return name == "paymentchannel" + } + + {{range .versions}} + {{if (le . 7)}} + if c == builtin{{.}}.PaymentChannelActorCodeID { + return true + } + {{end}} + {{end}} + return false +} + +func makeAddress(addr string) address.Address { + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +} diff --git a/venus-shared/actors/builtin/cron/actor.go b/venus-shared/actors/builtin/cron/actor.go new file mode 100644 index 0000000000..a12ac348f0 --- /dev/null +++ b/venus-shared/actors/builtin/cron/actor.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/cron/actor.go.template + +package cron + +import ( + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.CronKey { + return nil, fmt.Errorf("actor code is not cron: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.CronActorCodeID: + return load0(store, act.Head) + + case builtin2.CronActorCodeID: + return load2(store, act.Head) + + case builtin3.CronActorCodeID: + return load3(store, act.Head) + + case builtin4.CronActorCodeID: + return load4(store, act.Head) + + case builtin5.CronActorCodeID: + return load5(store, act.Head) + + case builtin6.CronActorCodeID: + return load6(store, act.Head) + + case builtin7.CronActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store) + + case actorstypes.Version2: + return make2(store) + + case actorstypes.Version3: + return make3(store) + + case actorstypes.Version4: + return make4(store) + + case actorstypes.Version5: + return make5(store) + + case actorstypes.Version6: + return make6(store) + + case actorstypes.Version7: + return make7(store) + + case actorstypes.Version8: + return make8(store) + + case actorstypes.Version9: + return make9(store) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +var ( + Address = builtin9.CronActorAddr + Methods = builtin9.MethodsCron +) + +type State interface { + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/cron/actor.go.template b/venus-shared/actors/builtin/cron/actor.go.template new file mode 100644 index 0000000000..5a9fcdead6 --- /dev/null +++ b/venus-shared/actors/builtin/cron/actor.go.template @@ -0,0 +1,66 @@ +// FETCHED FROM LOTUS: builtin/cron/actor.go.template + +package cron + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "fmt" + types "github.com/filecoin-project/venus/venus-shared/internal" + +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.CronKey { + return nil, fmt.Errorf("actor code is not cron: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.CronActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +var ( + Address = builtin{{.latestVersion}}.CronActorAddr + Methods = builtin{{.latestVersion}}.MethodsCron +) + + +type State interface { + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/cron/state.sep.go.template b/venus-shared/actors/builtin/cron/state.sep.go.template new file mode 100644 index 0000000000..967be20164 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.sep.go.template @@ -0,0 +1,41 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + +{{if (le .v 7)}} + cron{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/cron" +{{else}} + cron{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}cron" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = *cron{{.v}}.ConstructState(cron{{.v}}.BuiltInEntries()) + return &out, nil +} + +type state{{.v}} struct { + cron{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v0.go b/venus-shared/actors/builtin/cron/state.v0.go new file mode 100644 index 0000000000..ba0f37d5b6 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v0.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = *cron0.ConstructState(cron0.BuiltInEntries()) + return &out, nil +} + +type state0 struct { + cron0.State + store adt.Store +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v2.go b/venus-shared/actors/builtin/cron/state.v2.go new file mode 100644 index 0000000000..aff1f50282 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v2.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/cron" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = *cron2.ConstructState(cron2.BuiltInEntries()) + return &out, nil +} + +type state2 struct { + cron2.State + store adt.Store +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v3.go b/venus-shared/actors/builtin/cron/state.v3.go new file mode 100644 index 0000000000..b6a4a96121 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v3.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/cron" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = *cron3.ConstructState(cron3.BuiltInEntries()) + return &out, nil +} + +type state3 struct { + cron3.State + store adt.Store +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v4.go b/venus-shared/actors/builtin/cron/state.v4.go new file mode 100644 index 0000000000..0cd6d2c75e --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v4.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/cron" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = *cron4.ConstructState(cron4.BuiltInEntries()) + return &out, nil +} + +type state4 struct { + cron4.State + store adt.Store +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v5.go b/venus-shared/actors/builtin/cron/state.v5.go new file mode 100644 index 0000000000..12cf83a649 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v5.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = *cron5.ConstructState(cron5.BuiltInEntries()) + return &out, nil +} + +type state5 struct { + cron5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v6.go b/venus-shared/actors/builtin/cron/state.v6.go new file mode 100644 index 0000000000..7fe9cf8a2a --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v6.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/cron" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store) (State, error) { + out := state6{store: store} + out.State = *cron6.ConstructState(cron6.BuiltInEntries()) + return &out, nil +} + +type state6 struct { + cron6.State + store adt.Store +} + +func (s *state6) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v7.go b/venus-shared/actors/builtin/cron/state.v7.go new file mode 100644 index 0000000000..a1352ab5c9 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v7.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/cron" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = *cron7.ConstructState(cron7.BuiltInEntries()) + return &out, nil +} + +type state7 struct { + cron7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v8.go b/venus-shared/actors/builtin/cron/state.v8.go new file mode 100644 index 0000000000..2473905d97 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v8.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store) (State, error) { + out := state8{store: store} + out.State = *cron8.ConstructState(cron8.BuiltInEntries()) + return &out, nil +} + +type state8 struct { + cron8.State + store adt.Store +} + +func (s *state8) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/cron/state.v9.go b/venus-shared/actors/builtin/cron/state.v9.go new file mode 100644 index 0000000000..cb340945e3 --- /dev/null +++ b/venus-shared/actors/builtin/cron/state.v9.go @@ -0,0 +1,37 @@ +// FETCHED FROM LOTUS: builtin/cron/state.go.template + +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + cron9 "github.com/filecoin-project/go-state-types/builtin/v9/cron" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store) (State, error) { + out := state9{store: store} + out.State = *cron9.ConstructState(cron9.BuiltInEntries()) + return &out, nil +} + +type state9 struct { + cron9.State + store adt.Store +} + +func (s *state9) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/datacap/actor.go b/venus-shared/actors/builtin/datacap/actor.go new file mode 100644 index 0000000000..9d3f724a98 --- /dev/null +++ b/venus-shared/actors/builtin/datacap/actor.go @@ -0,0 +1,60 @@ +// FETCHED FROM LOTUS: builtin/datacap/actor.go.template + +package datacap + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin9 "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ( + Address = builtin9.DatacapActorAddr + Methods = builtin9.MethodsDatacap +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.DatacapKey { + return nil, fmt.Errorf("actor code is not datacap: %s", name) + } + + switch av { + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, governor address.Address, bitwidth uint64) (State, error) { + switch av { + + case actorstypes.Version9: + return make9(store, governor, bitwidth) + + default: + return nil, fmt.Errorf("datacap actor only valid for actors v9 and above, got %d", av) + + } +} + +type State interface { + cbor.Marshaler + + ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) + Governor() (address.Address, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/datacap/actor.go.template b/venus-shared/actors/builtin/datacap/actor.go.template new file mode 100644 index 0000000000..42ed36bcbd --- /dev/null +++ b/venus-shared/actors/builtin/datacap/actor.go.template @@ -0,0 +1,59 @@ +// FETCHED FROM LOTUS: builtin/datacap/actor.go.template + +package datacap + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ( + Address = builtin9.DatacapActorAddr + Methods = builtin9.MethodsDatacap +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.DatacapKey { + return nil, fmt.Errorf("actor code is not datacap: %s", name) + } + + switch av { + {{range .versions}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + } + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, governor address.Address, bitwidth uint64) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store, governor, bitwidth) + + default: return nil, fmt.Errorf("datacap actor only valid for actors v9 and above, got %d", av) +{{end}} + } +} + +type State interface { + cbor.Marshaler + + ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) + Governor() (address.Address, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/datacap/state.sep.go.template b/venus-shared/actors/builtin/datacap/state.sep.go.template new file mode 100644 index 0000000000..8af101b336 --- /dev/null +++ b/venus-shared/actors/builtin/datacap/state.sep.go.template @@ -0,0 +1,63 @@ +// FETCHED FROM LOTUS: builtin/datacap/state.go.template + +package datacap + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + datacap{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}datacap" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, governor address.Address, bitwidth uint64) (State, error) { + out := state{{.v}}{store: store} + s, err := datacap{{.v}}.ConstructState(store, governor, bitwidth) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state{{.v}} struct { + datacap{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) Governor() (address.Address, error) { + return s.State.Governor, nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +func (s *state{{.v}}) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachClient(s.store, actors.Version{{.v}}, s.verifiedClients, cb) +} + +func (s *state{{.v}}) verifiedClients() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth)) +} + +func (s *state{{.v}}) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version{{.v}}, s.verifiedClients, addr) +} diff --git a/venus-shared/actors/builtin/datacap/state.v9.go b/venus-shared/actors/builtin/datacap/state.v9.go new file mode 100644 index 0000000000..d658d2ab53 --- /dev/null +++ b/venus-shared/actors/builtin/datacap/state.v9.go @@ -0,0 +1,63 @@ +// FETCHED FROM LOTUS: builtin/datacap/state.go.template + +package datacap + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + datacap9 "github.com/filecoin-project/go-state-types/builtin/v9/datacap" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store, governor address.Address, bitwidth uint64) (State, error) { + out := state9{store: store} + s, err := datacap9.ConstructState(store, governor, bitwidth) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state9 struct { + datacap9.State + store adt.Store +} + +func (s *state9) Governor() (address.Address, error) { + return s.State.Governor, nil +} + +func (s *state9) GetState() interface{} { + return &s.State +} + +func (s *state9) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachClient(s.store, actors.Version9, s.verifiedClients, cb) +} + +func (s *state9) verifiedClients() (adt.Map, error) { + return adt9.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth)) +} + +func (s *state9) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version9, s.verifiedClients, addr) +} diff --git a/venus-shared/actors/builtin/datacap/util.go b/venus-shared/actors/builtin/datacap/util.go new file mode 100644 index 0000000000..0610cda82f --- /dev/null +++ b/venus-shared/actors/builtin/datacap/util.go @@ -0,0 +1,66 @@ +// FETCHED FROM LOTUS: builtin/datacap/util.go + +package datacap + +import ( + "fmt" + + "github.com/multiformats/go-varint" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" +) + +// taking this as a function instead of asking the caller to call it helps reduce some of the error +// checking boilerplate. +// +// "go made me do it" +type rootFunc func() (adt.Map, error) + +func getDataCap(store adt.Store, ver actors.Version, root rootFunc, addr address.Address) (bool, abi.StoragePower, error) { + if addr.Protocol() != address.ID { + return false, big.Zero(), fmt.Errorf("can only look up ID addresses") + } + vh, err := root() + if err != nil { + return false, big.Zero(), fmt.Errorf("loading datacap actor: %w", err) + } + + var dcap abi.StoragePower + if found, err := vh.Get(abi.IdAddrKey(addr), &dcap); err != nil { + return false, big.Zero(), fmt.Errorf("looking up addr: %w", err) + } else if !found { + return false, big.Zero(), nil + } + + return true, big.Div(dcap, verifreg.DataCapGranularity), nil +} + +func forEachClient(store adt.Store, ver actors.Version, root rootFunc, cb func(addr address.Address, dcap abi.StoragePower) error) error { + vh, err := root() + if err != nil { + return fmt.Errorf("loading verified clients: %w", err) + } + var dcap abi.StoragePower + return vh.ForEach(&dcap, func(key string) error { + id, n, err := varint.FromUvarint([]byte(key)) + if n != len([]byte(key)) { + return fmt.Errorf("could not get varint from address string") + } + if err != nil { + return err + } + + a, err := address.NewIDAddress(id) + if err != nil { + return fmt.Errorf("creating ID address from actor ID: %w", err) + } + + return cb(a, big.Div(dcap, verifreg.DataCapGranularity)) + }) +} diff --git a/venus-shared/actors/builtin/init/actor.go b/venus-shared/actors/builtin/init/actor.go new file mode 100644 index 0000000000..7bf73d25e9 --- /dev/null +++ b/venus-shared/actors/builtin/init/actor.go @@ -0,0 +1,145 @@ +// FETCHED FROM LOTUS: builtin/init/actor.go.template + +package init + +import ( + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" +) + +var ( + Address = builtin9.InitActorAddr + Methods = builtin9.MethodsInit +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.InitKey { + return nil, fmt.Errorf("actor code is not init: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.InitActorCodeID: + return load0(store, act.Head) + + case builtin2.InitActorCodeID: + return load2(store, act.Head) + + case builtin3.InitActorCodeID: + return load3(store, act.Head) + + case builtin4.InitActorCodeID: + return load4(store, act.Head) + + case builtin5.InitActorCodeID: + return load5(store, act.Head) + + case builtin6.InitActorCodeID: + return load6(store, act.Head) + + case builtin7.InitActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, networkName string) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store, networkName) + + case actorstypes.Version2: + return make2(store, networkName) + + case actorstypes.Version3: + return make3(store, networkName) + + case actorstypes.Version4: + return make4(store, networkName) + + case actorstypes.Version5: + return make5(store, networkName) + + case actorstypes.Version6: + return make6(store, networkName) + + case actorstypes.Version7: + return make7(store, networkName) + + case actorstypes.Version8: + return make8(store, networkName) + + case actorstypes.Version9: + return make9(store, networkName) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + ResolveAddress(address address.Address) (address.Address, bool, error) + MapAddressToNewID(address address.Address) (address.Address, error) + NetworkName() (string, error) + + ForEachActor(func(id abi.ActorID, address address.Address) error) error + + // Remove exists to support tooling that manipulates state for testing. + // It should not be used in production code, as init actor entries are + // immutable. + Remove(addrs ...address.Address) error + + // Sets the network's name. This should only be used on upgrade/fork. + SetNetworkName(name string) error + + // Sets the next ID for the init actor. This should only be used for testing. + SetNextID(id abi.ActorID) error + + // Sets the address map for the init actor. This should only be used for testing. + SetAddressMap(mcid cid.Cid) error + + AddressMap() (adt.Map, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/init/actor.go.template b/venus-shared/actors/builtin/init/actor.go.template new file mode 100644 index 0000000000..0bbfe0c24d --- /dev/null +++ b/venus-shared/actors/builtin/init/actor.go.template @@ -0,0 +1,94 @@ +// FETCHED FROM LOTUS: builtin/init/actor.go.template + +package init + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" +) + +var ( + Address = builtin{{.latestVersion}}.InitActorAddr + Methods = builtin{{.latestVersion}}.MethodsInit +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.InitKey { + return nil, fmt.Errorf("actor code is not init: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.InitActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, networkName string) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store, networkName) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + ResolveAddress(address address.Address) (address.Address, bool, error) + MapAddressToNewID(address address.Address) (address.Address, error) + NetworkName() (string, error) + + ForEachActor(func(id abi.ActorID, address address.Address) error) error + + // Remove exists to support tooling that manipulates state for testing. + // It should not be used in production code, as init actor entries are + // immutable. + Remove(addrs ...address.Address) error + + // Sets the network's name. This should only be used on upgrade/fork. + SetNetworkName(name string) error + + // Sets the next ID for the init actor. This should only be used for testing. + SetNextID(id abi.ActorID) error + + // Sets the address map for the init actor. This should only be used for testing. + SetAddressMap(mcid cid.Cid) error + + AddressMap() (adt.Map, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/init/diff.go b/venus-shared/actors/builtin/init/diff.go new file mode 100644 index 0000000000..e283ca0865 --- /dev/null +++ b/venus-shared/actors/builtin/init/diff.go @@ -0,0 +1,155 @@ +// FETCHED FROM LOTUS: builtin/init/diff.go + +package init + +import ( + "bytes" + + typegen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" +) + +func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) { + prem, err := pre.AddressMap() + if err != nil { + return nil, err + } + + curm, err := cur.AddressMap() + if err != nil { + return nil, err + } + + preRoot, err := prem.Root() + if err != nil { + return nil, err + } + + curRoot, err := curm.Root() + if err != nil { + return nil, err + } + + results := new(AddressMapChanges) + // no change. + if curRoot.Equals(preRoot) { + return results, nil + } + + err = adt.DiffAdtMap(prem, curm, &addressMapDiffer{results, pre, cur}) + if err != nil { + return nil, err + } + + return results, nil +} + +type addressMapDiffer struct { + Results *AddressMapChanges + pre, adter State +} + +type AddressMapChanges struct { + Added []AddressPair + Modified []AddressChange + Removed []AddressPair +} + +func (i *addressMapDiffer) AsKey(key string) (abi.Keyer, error) { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return nil, err + } + return abi.AddrKey(addr), nil +} + +func (i *addressMapDiffer) Add(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Results.Added = append(i.Results.Added, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +func (i *addressMapDiffer) Modify(key string, from, to *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + + fromID := new(typegen.CborInt) + if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil { + return err + } + fromIDAddr, err := address.NewIDAddress(uint64(*fromID)) + if err != nil { + return err + } + + toID := new(typegen.CborInt) + if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil { + return err + } + toIDAddr, err := address.NewIDAddress(uint64(*toID)) + if err != nil { + return err + } + + i.Results.Modified = append(i.Results.Modified, AddressChange{ + From: AddressPair{ + ID: fromIDAddr, + PK: pkAddr, + }, + To: AddressPair{ + ID: toIDAddr, + PK: pkAddr, + }, + }) + return nil +} + +func (i *addressMapDiffer) Remove(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Results.Removed = append(i.Results.Removed, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +type AddressChange struct { + From AddressPair + To AddressPair +} + +type AddressPair struct { + ID address.Address + PK address.Address +} diff --git a/venus-shared/actors/builtin/init/state.sep.go.template b/venus-shared/actors/builtin/init/state.sep.go.template new file mode 100644 index 0000000000..17aa65b9cd --- /dev/null +++ b/venus-shared/actors/builtin/init/state.sep.go.template @@ -0,0 +1,130 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + +{{if (le .v 7)}} + {{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + {{end}} + init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +{{else}} + init{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}init" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" + builtin{{.v}} "github.com/filecoin-project/go-state-types/builtin" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, networkName string) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + mr, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init{{.v}}.ConstructState(mr, networkName) + {{else}} + s, err := init{{.v}}.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + +type state{{.v}} struct { + init{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state{{.v}}) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state{{.v}}) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state{{.v}}) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state{{.v}}) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state{{.v}}) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) { + m, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state{{.v}}) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state{{.v}}) AddressMap() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v0.go b/venus-shared/actors/builtin/init/state.v0.go new file mode 100644 index 0000000000..52fb6f7fa5 --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v0.go @@ -0,0 +1,114 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store, networkName string) (State, error) { + out := state0{store: store} + + mr, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init0.ConstructState(mr, networkName) + + return &out, nil +} + +type state0 struct { + init0.State + store adt.Store +} + +func (s *state0) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state0) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state0) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt0.AsMap(s.store, s.State.AddressMap) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state0) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state0) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state0) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state0) Remove(addrs ...address.Address) (err error) { + m, err := adt0.AsMap(s.store, s.State.AddressMap) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state0) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state0) AddressMap() (adt.Map, error) { + return adt0.AsMap(s.store, s.State.AddressMap) +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v2.go b/venus-shared/actors/builtin/init/state.v2.go new file mode 100644 index 0000000000..61ef5d2f0e --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v2.go @@ -0,0 +1,114 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store, networkName string) (State, error) { + out := state2{store: store} + + mr, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init2.ConstructState(mr, networkName) + + return &out, nil +} + +type state2 struct { + init2.State + store adt.Store +} + +func (s *state2) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state2) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state2) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt2.AsMap(s.store, s.State.AddressMap) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state2) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state2) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state2) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state2) Remove(addrs ...address.Address) (err error) { + m, err := adt2.AsMap(s.store, s.State.AddressMap) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state2) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state2) AddressMap() (adt.Map, error) { + return adt2.AsMap(s.store, s.State.AddressMap) +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v3.go b/venus-shared/actors/builtin/init/state.v3.go new file mode 100644 index 0000000000..4ea03f994a --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v3.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, networkName string) (State, error) { + out := state3{store: store} + + s, err := init3.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + init3.State + store adt.Store +} + +func (s *state3) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state3) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state3) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state3) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state3) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state3) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state3) Remove(addrs ...address.Address) (err error) { + m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state3) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state3) AddressMap() (adt.Map, error) { + return adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v4.go b/venus-shared/actors/builtin/init/state.v4.go new file mode 100644 index 0000000000..95b393e952 --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v4.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, networkName string) (State, error) { + out := state4{store: store} + + s, err := init4.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + init4.State + store adt.Store +} + +func (s *state4) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state4) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state4) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state4) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state4) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state4) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state4) Remove(addrs ...address.Address) (err error) { + m, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state4) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state4) AddressMap() (adt.Map, error) { + return adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v5.go b/venus-shared/actors/builtin/init/state.v5.go new file mode 100644 index 0000000000..cf85332c27 --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v5.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, networkName string) (State, error) { + out := state5{store: store} + + s, err := init5.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + init5.State + store adt.Store +} + +func (s *state5) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state5) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state5) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state5) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state5) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state5) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state5) Remove(addrs ...address.Address) (err error) { + m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state5) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state5) AddressMap() (adt.Map, error) { + return adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v6.go b/venus-shared/actors/builtin/init/state.v6.go new file mode 100644 index 0000000000..a1b895011e --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v6.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + init6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/init" + adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store, networkName string) (State, error) { + out := state6{store: store} + + s, err := init6.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state6 struct { + init6.State + store adt.Store +} + +func (s *state6) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state6) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state6) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt6.AsMap(s.store, s.State.AddressMap, builtin6.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state6) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state6) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state6) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state6) Remove(addrs ...address.Address) (err error) { + m, err := adt6.AsMap(s.store, s.State.AddressMap, builtin6.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state6) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state6) AddressMap() (adt.Map, error) { + return adt6.AsMap(s.store, s.State.AddressMap, builtin6.DefaultHamtBitwidth) +} + +func (s *state6) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v7.go b/venus-shared/actors/builtin/init/state.v7.go new file mode 100644 index 0000000000..b670323197 --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v7.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, networkName string) (State, error) { + out := state7{store: store} + + s, err := init7.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + init7.State + store adt.Store +} + +func (s *state7) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state7) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state7) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state7) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state7) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state7) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state7) Remove(addrs ...address.Address) (err error) { + m, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state7) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state7) AddressMap() (adt.Map, error) { + return adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v8.go b/venus-shared/actors/builtin/init/state.v8.go new file mode 100644 index 0000000000..3944f0eee5 --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v8.go @@ -0,0 +1,115 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin8 "github.com/filecoin-project/go-state-types/builtin" + init8 "github.com/filecoin-project/go-state-types/builtin/v8/init" + adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store, networkName string) (State, error) { + out := state8{store: store} + + s, err := init8.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state8 struct { + init8.State + store adt.Store +} + +func (s *state8) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state8) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state8) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt8.AsMap(s.store, s.State.AddressMap, builtin8.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state8) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state8) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state8) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state8) Remove(addrs ...address.Address) (err error) { + m, err := adt8.AsMap(s.store, s.State.AddressMap, builtin8.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state8) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state8) AddressMap() (adt.Map, error) { + return adt8.AsMap(s.store, s.State.AddressMap, builtin8.DefaultHamtBitwidth) +} + +func (s *state8) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/init/state.v9.go b/venus-shared/actors/builtin/init/state.v9.go new file mode 100644 index 0000000000..7de9d41845 --- /dev/null +++ b/venus-shared/actors/builtin/init/state.v9.go @@ -0,0 +1,115 @@ +// FETCHED FROM LOTUS: builtin/init/state.go.template + +package init + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + init9 "github.com/filecoin-project/go-state-types/builtin/v9/init" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store, networkName string) (State, error) { + out := state9{store: store} + + s, err := init9.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state9 struct { + init9.State + store adt.Store +} + +func (s *state9) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state9) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state9) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt9.AsMap(s.store, s.State.AddressMap, builtin9.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state9) NetworkName() (string, error) { + return string(s.State.NetworkName), nil +} + +func (s *state9) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state9) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state9) Remove(addrs ...address.Address) (err error) { + m, err := adt9.AsMap(s.store, s.State.AddressMap, builtin9.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return fmt.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return fmt.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state9) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state9) AddressMap() (adt.Map, error) { + return adt9.AsMap(s.store, s.State.AddressMap, builtin9.DefaultHamtBitwidth) +} + +func (s *state9) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/market/actor.go b/venus-shared/actors/builtin/market/actor.go new file mode 100644 index 0000000000..3a93479ea0 --- /dev/null +++ b/venus-shared/actors/builtin/market/actor.go @@ -0,0 +1,279 @@ +// FETCHED FROM LOTUS: builtin/market/actor.go.template + +package market + +import ( + "unicode/utf8" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + + "fmt" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + cbg "github.com/whyrusleeping/cbor-gen" + + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ( + Address = builtintypes.StorageMarketActorAddr + Methods = builtintypes.MethodsMarket +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.MarketKey { + return nil, fmt.Errorf("actor code is not market: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.StorageMarketActorCodeID: + return load0(store, act.Head) + + case builtin2.StorageMarketActorCodeID: + return load2(store, act.Head) + + case builtin3.StorageMarketActorCodeID: + return load3(store, act.Head) + + case builtin4.StorageMarketActorCodeID: + return load4(store, act.Head) + + case builtin5.StorageMarketActorCodeID: + return load5(store, act.Head) + + case builtin6.StorageMarketActorCodeID: + return load6(store, act.Head) + + case builtin7.StorageMarketActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store) + + case actorstypes.Version2: + return make2(store) + + case actorstypes.Version3: + return make3(store) + + case actorstypes.Version4: + return make4(store) + + case actorstypes.Version5: + return make5(store) + + case actorstypes.Version6: + return make6(store) + + case actorstypes.Version7: + return make7(store) + + case actorstypes.Version8: + return make8(store) + + case actorstypes.Version9: + return make9(store) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + BalancesChanged(State) (bool, error) + EscrowTable() (BalanceTable, error) + LockedTable() (BalanceTable, error) + TotalLocked() (abi.TokenAmount, error) + StatesChanged(State) (bool, error) + States() (DealStates, error) + ProposalsChanged(State) (bool, error) + Proposals() (DealProposals, error) + VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, + ) (weight, verifiedWeight abi.DealWeight, err error) + NextID() (abi.DealID, error) + GetState() interface{} + GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) +} + +type BalanceTable interface { + ForEach(cb func(address.Address, abi.TokenAmount) error) error + Get(key address.Address) (abi.TokenAmount, error) +} + +type DealStates interface { + ForEach(cb func(id abi.DealID, ds DealState) error) error + Get(id abi.DealID) (*DealState, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*DealState, error) +} + +type DealProposals interface { + ForEach(cb func(id abi.DealID, dp markettypes.DealProposal) error) error + Get(id abi.DealID) (*markettypes.DealProposal, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*markettypes.DealProposal, error) +} + +type PublishStorageDealsReturn interface { + DealIDs() ([]abi.DealID, error) + // Note that this index is based on the batch of deals that were published, NOT the DealID + IsDealValid(index uint64) (bool, int, error) +} + +func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStorageDealsReturn, error) { + av, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return nil, err + } + + switch av { + + case actorstypes.Version0: + return decodePublishStorageDealsReturn0(b) + + case actorstypes.Version2: + return decodePublishStorageDealsReturn2(b) + + case actorstypes.Version3: + return decodePublishStorageDealsReturn3(b) + + case actorstypes.Version4: + return decodePublishStorageDealsReturn4(b) + + case actorstypes.Version5: + return decodePublishStorageDealsReturn5(b) + + case actorstypes.Version6: + return decodePublishStorageDealsReturn6(b) + + case actorstypes.Version7: + return decodePublishStorageDealsReturn7(b) + + case actorstypes.Version8: + return decodePublishStorageDealsReturn8(b) + + case actorstypes.Version9: + return decodePublishStorageDealsReturn9(b) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type DealProposal = markettypes.DealProposal +type DealLabel = markettypes.DealLabel + +type DealState = markettypes.DealState + +type DealStateChanges struct { + Added []DealIDState + Modified []DealStateChange + Removed []DealIDState +} + +type DealIDState struct { + ID abi.DealID + Deal DealState +} + +// DealStateChange is a change in deal state from -> to +type DealStateChange struct { + ID abi.DealID + From *DealState + To *DealState +} + +type DealProposalChanges struct { + Added []ProposalIDState + Removed []ProposalIDState +} + +type ProposalIDState struct { + ID abi.DealID + Proposal markettypes.DealProposal +} + +func EmptyDealState() *DealState { + return &DealState{ + SectorStartEpoch: -1, + SlashEpoch: -1, + LastUpdatedEpoch: -1, + } +} + +// returns the earned fees and pending fees for a given deal +func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) { + tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch))) + + ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch))) + if ef.LessThan(big.Zero()) { + ef = big.Zero() + } + + if ef.GreaterThan(tf) { + ef = tf + } + + return ef, big.Sub(tf, ef) +} + +func IsDealActive(state markettypes.DealState) bool { + return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +} + +func labelFromGoString(s string) (markettypes.DealLabel, error) { + if utf8.ValidString(s) { + return markettypes.NewLabelFromString(s) + } else { + return markettypes.NewLabelFromBytes([]byte(s)) + } +} diff --git a/venus-shared/actors/builtin/market/actor.go.template b/venus-shared/actors/builtin/market/actor.go.template new file mode 100644 index 0000000000..81a6b3e232 --- /dev/null +++ b/venus-shared/actors/builtin/market/actor.go.template @@ -0,0 +1,204 @@ +// FETCHED FROM LOTUS: builtin/market/actor.go.template + +package market + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "unicode/utf8" + + "github.com/filecoin-project/go-state-types/network" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + cbg "github.com/whyrusleeping/cbor-gen" + + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ( + Address = builtintypes.StorageMarketActorAddr + Methods = builtintypes.MethodsMarket +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.MarketKey { + return nil, fmt.Errorf("actor code is not market: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.StorageMarketActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + BalancesChanged(State) (bool, error) + EscrowTable() (BalanceTable, error) + LockedTable() (BalanceTable, error) + TotalLocked() (abi.TokenAmount, error) + StatesChanged(State) (bool, error) + States() (DealStates, error) + ProposalsChanged(State) (bool, error) + Proposals() (DealProposals, error) + VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, + ) (weight, verifiedWeight abi.DealWeight, err error) + NextID() (abi.DealID, error) + GetState() interface{} + GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) +} + +type BalanceTable interface { + ForEach(cb func(address.Address, abi.TokenAmount) error) error + Get(key address.Address) (abi.TokenAmount, error) +} + +type DealStates interface { + ForEach(cb func(id abi.DealID, ds DealState) error) error + Get(id abi.DealID) (*DealState, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*DealState, error) +} + +type DealProposals interface { + ForEach(cb func(id abi.DealID, dp markettypes.DealProposal) error) error + Get(id abi.DealID) (*markettypes.DealProposal, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*markettypes.DealProposal, error) +} + + +type PublishStorageDealsReturn interface { + DealIDs() ([]abi.DealID, error) + // Note that this index is based on the batch of deals that were published, NOT the DealID + IsDealValid(index uint64) (bool, int, error) +} + +func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStorageDealsReturn, error) { + av, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return nil, err + } + + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return decodePublishStorageDealsReturn{{.}}(b) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type DealProposal = markettypes.DealProposal +type DealLabel = markettypes.DealLabel + +type DealState = markettypes.DealState + +type DealStateChanges struct { + Added []DealIDState + Modified []DealStateChange + Removed []DealIDState +} + +type DealIDState struct { + ID abi.DealID + Deal DealState +} + +// DealStateChange is a change in deal state from -> to +type DealStateChange struct { + ID abi.DealID + From *DealState + To *DealState +} + +type DealProposalChanges struct { + Added []ProposalIDState + Removed []ProposalIDState +} + +type ProposalIDState struct { + ID abi.DealID + Proposal markettypes.DealProposal +} + +func EmptyDealState() *DealState { + return &DealState{ + SectorStartEpoch: -1, + SlashEpoch: -1, + LastUpdatedEpoch: -1, + } +} + +// returns the earned fees and pending fees for a given deal +func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) { + tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch))) + + ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch))) + if ef.LessThan(big.Zero()) { + ef = big.Zero() + } + + if ef.GreaterThan(tf) { + ef = tf + } + + return ef, big.Sub(tf, ef) +} + +func IsDealActive(state markettypes.DealState) bool { + return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +} + +func labelFromGoString(s string) (markettypes.DealLabel, error) { + if utf8.ValidString(s) { + return markettypes.NewLabelFromString(s) + } else { + return markettypes.NewLabelFromBytes([]byte(s)) + } +} diff --git a/venus-shared/actors/builtin/market/diff.go b/venus-shared/actors/builtin/market/diff.go new file mode 100644 index 0000000000..894dac6f65 --- /dev/null +++ b/venus-shared/actors/builtin/market/diff.go @@ -0,0 +1,95 @@ +// FETCHED FROM LOTUS: builtin/market/diff.go + +package market + +import ( + "fmt" + + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" +) + +func DiffDealProposals(pre, cur DealProposals) (*DealProposalChanges, error) { + results := new(DealProposalChanges) + if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketProposalsDiffer{results, pre, cur}); err != nil { + return nil, fmt.Errorf("diffing deal states: %w", err) + } + return results, nil +} + +type marketProposalsDiffer struct { + Results *DealProposalChanges + pre, cur DealProposals +} + +func (d *marketProposalsDiffer) Add(key uint64, val *cbg.Deferred) error { + dp, err := d.cur.decode(val) + if err != nil { + return err + } + d.Results.Added = append(d.Results.Added, ProposalIDState{abi.DealID(key), *dp}) + return nil +} + +func (d *marketProposalsDiffer) Modify(key uint64, from, to *cbg.Deferred) error { + // short circuit, DealProposals are static + return nil +} + +func (d *marketProposalsDiffer) Remove(key uint64, val *cbg.Deferred) error { + dp, err := d.pre.decode(val) + if err != nil { + return err + } + d.Results.Removed = append(d.Results.Removed, ProposalIDState{abi.DealID(key), *dp}) + return nil +} + +func DiffDealStates(pre, cur DealStates) (*DealStateChanges, error) { + results := new(DealStateChanges) + if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketStatesDiffer{results, pre, cur}); err != nil { + return nil, fmt.Errorf("diffing deal states: %w", err) + } + return results, nil +} + +type marketStatesDiffer struct { + Results *DealStateChanges + pre, cur DealStates +} + +func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error { + ds, err := d.cur.decode(val) + if err != nil { + return err + } + d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds}) + return nil +} + +func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error { + dsFrom, err := d.pre.decode(from) + if err != nil { + return err + } + dsTo, err := d.cur.decode(to) + if err != nil { + return err + } + if *dsFrom != *dsTo { + d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo}) + } + return nil +} + +func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error { + ds, err := d.pre.decode(val) + if err != nil { + return err + } + d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds}) + return nil +} diff --git a/venus-shared/actors/builtin/market/state.sep.go.template b/venus-shared/actors/builtin/market/state.sep.go.template new file mode 100644 index 0000000000..992a9ac643 --- /dev/null +++ b/venus-shared/actors/builtin/market/state.sep.go.template @@ -0,0 +1,391 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "fmt" + {{if (ge .v 6)}} + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-bitfield" + {{end}} + + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + +{{if (le .v 7)}} + market{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/market" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +{{else}} + market{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}market" + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" +{{end}} +{{if (ge .v 9)}} + "github.com/filecoin-project/go-state-types/builtin" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + ea, err := adt{{.v}}.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market{{.v}}.ConstructState(ea, em, em) + {{else}} + s, err := market{{.v}}.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + +type state{{.v}} struct { + market{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state{{.v}}) BalancesChanged(otherState State) (bool, error) { + otherState{{.v}}, ok := otherState.(*state{{.v}}) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState{{.v}}.State.EscrowTable) || !s.State.LockedTable.Equals(otherState{{.v}}.State.LockedTable), nil +} + +func (s *state{{.v}}) StatesChanged(otherState State) (bool, error) { + otherState{{.v}}, ok := otherState.(*state{{.v}}) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState{{.v}}.State.States), nil +} + +func (s *state{{.v}}) States() (DealStates, error) { + stateArray, err := adt{{.v}}.AsArray(s.store, s.State.States{{if (ge .v 3)}}, market{{.v}}.StatesAmtBitwidth{{end}}) + if err != nil { + return nil, err + } + return &dealStates{{.v}}{stateArray}, nil +} + +func (s *state{{.v}}) ProposalsChanged(otherState State) (bool, error) { + otherState{{.v}}, ok := otherState.(*state{{.v}}) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState{{.v}}.State.Proposals), nil +} + +func (s *state{{.v}}) Proposals() (DealProposals, error) { + proposalArray, err := adt{{.v}}.AsArray(s.store, s.State.Proposals{{if (ge .v 3)}}, market{{.v}}.ProposalsAmtBitwidth{{end}}) + if err != nil { + return nil, err + } + return &dealProposals{{.v}}{proposalArray}, nil +} + +func (s *state{{.v}}) EscrowTable() (BalanceTable, error) { + bt, err := adt{{.v}}.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable{{.v}}{bt}, nil +} + +func (s *state{{.v}}) LockedTable() (BalanceTable, error) { + bt, err := adt{{.v}}.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable{{.v}}{bt}, nil +} + +func (s *state{{.v}}) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw{{if (ge .v 2)}}, _{{end}}, err := market{{.v}}.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state{{.v}}) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable{{.v}} struct { + *adt{{.v}}.BalanceTable +} + +func (bt *balanceTable{{.v}}) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt{{.v}}.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates{{.v}} struct { + adt.Array +} + +func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal{{.v}} market{{.v}}.DealState + found, err := s.Array.Get(uint64(dealID), &deal{{.v}}) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV{{.v}}DealState(deal{{.v}}) + return &deal, true, nil +} + +func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds{{.v}} market{{.v}}.DealState + return s.Array.ForEach(&ds{{.v}}, func(idx int64) error { + return cb(abi.DealID(idx), fromV{{.v}}DealState(ds{{.v}})) + }) +} + +func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (*DealState, error) { + var ds{{.v}} market{{.v}}.DealState + if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV{{.v}}DealState(ds{{.v}}) + return &ds, nil +} + +func (s *dealStates{{.v}}) array() adt.Array { + return s.Array +} + +func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { + {{if (le .v 8)}} + return DealState{ + SectorStartEpoch: v{{.v}}.SectorStartEpoch, + LastUpdatedEpoch: v{{.v}}.LastUpdatedEpoch, + SlashEpoch: v{{.v}}.SlashEpoch, + VerifiedClaim: 0, + } + {{else}} + return (DealState)(v{{.v}}) + {{end}} +} + +type dealProposals{{.v}} struct { + adt.Array +} + +func (s *dealProposals{{.v}}) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal{{.v}} market{{.v}}.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal{{.v}}) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV{{.v}}DealProposal(proposal{{.v}}) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals{{.v}}) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp{{.v}} market{{.v}}.DealProposal + return s.Array.ForEach(&dp{{.v}}, func(idx int64) error { + dp, err := fromV{{.v}}DealProposal(dp{{.v}}) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals{{.v}}) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp{{.v}} market{{.v}}.DealProposal + if err := dp{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV{{.v}}DealProposal(dp{{.v}}) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals{{.v}}) array() adt.Array { + return s.Array +} + +func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) (DealProposal, error) { + {{if (le .v 7)}} + label, err := labelFromGoString(v{{.v}}.Label) + {{else}} + label, err := fromV{{.v}}Label(v{{.v}}.Label) + {{end}} + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v{{.v}}.PieceCID, + PieceSize: v{{.v}}.PieceSize, + VerifiedDeal: v{{.v}}.VerifiedDeal, + Client: v{{.v}}.Client, + Provider: v{{.v}}.Provider, + + Label: label, + + StartEpoch: v{{.v}}.StartEpoch, + EndEpoch: v{{.v}}.EndEpoch, + StoragePricePerEpoch: v{{.v}}.StoragePricePerEpoch, + + ProviderCollateral: v{{.v}}.ProviderCollateral, + ClientCollateral: v{{.v}}.ClientCollateral, + }, nil +} + +{{if (ge .v 8)}} + func fromV{{.v}}Label(v{{.v}} market{{.v}}.DealLabel) (DealLabel, error) { + if v{{.v}}.IsString() { + str, err := v{{.v}}.ToString() + if err != nil { + return markettypes.EmptyDealLabel, fmt.Errorf("failed to convert string label to string: %w", err) + } + return markettypes.NewLabelFromString(str) + } + + bs, err := v{{.v}}.ToBytes() + if err != nil { + return markettypes.EmptyDealLabel, fmt.Errorf("failed to convert bytes label to bytes: %w", err) + } + return markettypes.NewLabelFromBytes(bs) + } +{{end}} + + + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn{{.v}})(nil) + +func decodePublishStorageDealsReturn{{.v}}(b []byte) (PublishStorageDealsReturn, error) { + var retval market{{.v}}.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn{{.v}}{retval}, nil +} + +type publishStorageDealsReturn{{.v}} struct { + market{{.v}}.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn{{.v}}) IsDealValid(index uint64) (bool, int, error) { + {{if (ge .v 6)}} + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + {{else}} + // PublishStorageDeals only succeeded if all deals were valid in this version of actors + return true, int(index), nil + {{end}} +} + +func (r *publishStorageDealsReturn{{.v}}) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state{{.v}}) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { +{{if (le .v 8)}} + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") +{{else}} + allocations, err := adt9.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth) + if err != nil { + return verifregtypes.NoAllocationID, fmt.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + + var allocationId cbg.CborInt + found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId) + if err != nil { + return verifregtypes.NoAllocationID, fmt.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + if !found { + return verifregtypes.NoAllocationID, nil + } + + return verifregtypes.AllocationId(allocationId), nil +{{end}} +} diff --git a/venus-shared/actors/builtin/market/state.v0.go b/venus-shared/actors/builtin/market/state.v0.go new file mode 100644 index 0000000000..9a7a35b83e --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v0.go @@ -0,0 +1,312 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + + ea, err := adt0.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market0.ConstructState(ea, em, em) + + return &out, nil +} + +type state0 struct { + market0.State + store adt.Store +} + +func (s *state0) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state0) BalancesChanged(otherState State) (bool, error) { + otherState0, ok := otherState.(*state0) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState0.State.EscrowTable) || !s.State.LockedTable.Equals(otherState0.State.LockedTable), nil +} + +func (s *state0) StatesChanged(otherState State) (bool, error) { + otherState0, ok := otherState.(*state0) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState0.State.States), nil +} + +func (s *state0) States() (DealStates, error) { + stateArray, err := adt0.AsArray(s.store, s.State.States) + if err != nil { + return nil, err + } + return &dealStates0{stateArray}, nil +} + +func (s *state0) ProposalsChanged(otherState State) (bool, error) { + otherState0, ok := otherState.(*state0) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState0.State.Proposals), nil +} + +func (s *state0) Proposals() (DealProposals, error) { + proposalArray, err := adt0.AsArray(s.store, s.State.Proposals) + if err != nil { + return nil, err + } + return &dealProposals0{proposalArray}, nil +} + +func (s *state0) EscrowTable() (BalanceTable, error) { + bt, err := adt0.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable0{bt}, nil +} + +func (s *state0) LockedTable() (BalanceTable, error) { + bt, err := adt0.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable0{bt}, nil +} + +func (s *state0) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, err := market0.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state0) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable0 struct { + *adt0.BalanceTable +} + +func (bt *balanceTable0) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt0.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates0 struct { + adt.Array +} + +func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal0 market0.DealState + found, err := s.Array.Get(uint64(dealID), &deal0) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV0DealState(deal0) + return &deal, true, nil +} + +func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds0 market0.DealState + return s.Array.ForEach(&ds0, func(idx int64) error { + return cb(abi.DealID(idx), fromV0DealState(ds0)) + }) +} + +func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) { + var ds0 market0.DealState + if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV0DealState(ds0) + return &ds, nil +} + +func (s *dealStates0) array() adt.Array { + return s.Array +} + +func fromV0DealState(v0 market0.DealState) DealState { + + return DealState{ + SectorStartEpoch: v0.SectorStartEpoch, + LastUpdatedEpoch: v0.LastUpdatedEpoch, + SlashEpoch: v0.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals0 struct { + adt.Array +} + +func (s *dealProposals0) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal0 market0.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal0) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV0DealProposal(proposal0) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals0) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp0 market0.DealProposal + return s.Array.ForEach(&dp0, func(idx int64) error { + dp, err := fromV0DealProposal(dp0) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals0) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp0 market0.DealProposal + if err := dp0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV0DealProposal(dp0) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals0) array() adt.Array { + return s.Array +} + +func fromV0DealProposal(v0 market0.DealProposal) (DealProposal, error) { + + label, err := labelFromGoString(v0.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v0.PieceCID, + PieceSize: v0.PieceSize, + VerifiedDeal: v0.VerifiedDeal, + Client: v0.Client, + Provider: v0.Provider, + + Label: label, + + StartEpoch: v0.StartEpoch, + EndEpoch: v0.EndEpoch, + StoragePricePerEpoch: v0.StoragePricePerEpoch, + + ProviderCollateral: v0.ProviderCollateral, + ClientCollateral: v0.ClientCollateral, + }, nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn0)(nil) + +func decodePublishStorageDealsReturn0(b []byte) (PublishStorageDealsReturn, error) { + var retval market0.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn0{retval}, nil +} + +type publishStorageDealsReturn0 struct { + market0.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn0) IsDealValid(index uint64) (bool, int, error) { + + // PublishStorageDeals only succeeded if all deals were valid in this version of actors + return true, int(index), nil + +} + +func (r *publishStorageDealsReturn0) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state0) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v2.go b/venus-shared/actors/builtin/market/state.v2.go new file mode 100644 index 0000000000..fa8a3d1d9c --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v2.go @@ -0,0 +1,312 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + + ea, err := adt2.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market2.ConstructState(ea, em, em) + + return &out, nil +} + +type state2 struct { + market2.State + store adt.Store +} + +func (s *state2) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state2) BalancesChanged(otherState State) (bool, error) { + otherState2, ok := otherState.(*state2) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState2.State.EscrowTable) || !s.State.LockedTable.Equals(otherState2.State.LockedTable), nil +} + +func (s *state2) StatesChanged(otherState State) (bool, error) { + otherState2, ok := otherState.(*state2) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState2.State.States), nil +} + +func (s *state2) States() (DealStates, error) { + stateArray, err := adt2.AsArray(s.store, s.State.States) + if err != nil { + return nil, err + } + return &dealStates2{stateArray}, nil +} + +func (s *state2) ProposalsChanged(otherState State) (bool, error) { + otherState2, ok := otherState.(*state2) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState2.State.Proposals), nil +} + +func (s *state2) Proposals() (DealProposals, error) { + proposalArray, err := adt2.AsArray(s.store, s.State.Proposals) + if err != nil { + return nil, err + } + return &dealProposals2{proposalArray}, nil +} + +func (s *state2) EscrowTable() (BalanceTable, error) { + bt, err := adt2.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable2{bt}, nil +} + +func (s *state2) LockedTable() (BalanceTable, error) { + bt, err := adt2.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable2{bt}, nil +} + +func (s *state2) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market2.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state2) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable2 struct { + *adt2.BalanceTable +} + +func (bt *balanceTable2) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt2.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates2 struct { + adt.Array +} + +func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal2 market2.DealState + found, err := s.Array.Get(uint64(dealID), &deal2) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV2DealState(deal2) + return &deal, true, nil +} + +func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds2 market2.DealState + return s.Array.ForEach(&ds2, func(idx int64) error { + return cb(abi.DealID(idx), fromV2DealState(ds2)) + }) +} + +func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) { + var ds2 market2.DealState + if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV2DealState(ds2) + return &ds, nil +} + +func (s *dealStates2) array() adt.Array { + return s.Array +} + +func fromV2DealState(v2 market2.DealState) DealState { + + return DealState{ + SectorStartEpoch: v2.SectorStartEpoch, + LastUpdatedEpoch: v2.LastUpdatedEpoch, + SlashEpoch: v2.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals2 struct { + adt.Array +} + +func (s *dealProposals2) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal2 market2.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal2) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV2DealProposal(proposal2) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals2) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp2 market2.DealProposal + return s.Array.ForEach(&dp2, func(idx int64) error { + dp, err := fromV2DealProposal(dp2) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals2) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp2 market2.DealProposal + if err := dp2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV2DealProposal(dp2) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals2) array() adt.Array { + return s.Array +} + +func fromV2DealProposal(v2 market2.DealProposal) (DealProposal, error) { + + label, err := labelFromGoString(v2.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v2.PieceCID, + PieceSize: v2.PieceSize, + VerifiedDeal: v2.VerifiedDeal, + Client: v2.Client, + Provider: v2.Provider, + + Label: label, + + StartEpoch: v2.StartEpoch, + EndEpoch: v2.EndEpoch, + StoragePricePerEpoch: v2.StoragePricePerEpoch, + + ProviderCollateral: v2.ProviderCollateral, + ClientCollateral: v2.ClientCollateral, + }, nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn2)(nil) + +func decodePublishStorageDealsReturn2(b []byte) (PublishStorageDealsReturn, error) { + var retval market2.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn2{retval}, nil +} + +type publishStorageDealsReturn2 struct { + market2.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn2) IsDealValid(index uint64) (bool, int, error) { + + // PublishStorageDeals only succeeded if all deals were valid in this version of actors + return true, int(index), nil + +} + +func (r *publishStorageDealsReturn2) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state2) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v3.go b/venus-shared/actors/builtin/market/state.v3.go new file mode 100644 index 0000000000..ededb11042 --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v3.go @@ -0,0 +1,307 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + + s, err := market3.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + market3.State + store adt.Store +} + +func (s *state3) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state3) BalancesChanged(otherState State) (bool, error) { + otherState3, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState3.State.EscrowTable) || !s.State.LockedTable.Equals(otherState3.State.LockedTable), nil +} + +func (s *state3) StatesChanged(otherState State) (bool, error) { + otherState3, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState3.State.States), nil +} + +func (s *state3) States() (DealStates, error) { + stateArray, err := adt3.AsArray(s.store, s.State.States, market3.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates3{stateArray}, nil +} + +func (s *state3) ProposalsChanged(otherState State) (bool, error) { + otherState3, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState3.State.Proposals), nil +} + +func (s *state3) Proposals() (DealProposals, error) { + proposalArray, err := adt3.AsArray(s.store, s.State.Proposals, market3.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals3{proposalArray}, nil +} + +func (s *state3) EscrowTable() (BalanceTable, error) { + bt, err := adt3.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable3{bt}, nil +} + +func (s *state3) LockedTable() (BalanceTable, error) { + bt, err := adt3.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable3{bt}, nil +} + +func (s *state3) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market3.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state3) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable3 struct { + *adt3.BalanceTable +} + +func (bt *balanceTable3) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt3.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates3 struct { + adt.Array +} + +func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal3 market3.DealState + found, err := s.Array.Get(uint64(dealID), &deal3) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV3DealState(deal3) + return &deal, true, nil +} + +func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds3 market3.DealState + return s.Array.ForEach(&ds3, func(idx int64) error { + return cb(abi.DealID(idx), fromV3DealState(ds3)) + }) +} + +func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) { + var ds3 market3.DealState + if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV3DealState(ds3) + return &ds, nil +} + +func (s *dealStates3) array() adt.Array { + return s.Array +} + +func fromV3DealState(v3 market3.DealState) DealState { + + return DealState{ + SectorStartEpoch: v3.SectorStartEpoch, + LastUpdatedEpoch: v3.LastUpdatedEpoch, + SlashEpoch: v3.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals3 struct { + adt.Array +} + +func (s *dealProposals3) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal3 market3.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal3) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV3DealProposal(proposal3) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals3) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp3 market3.DealProposal + return s.Array.ForEach(&dp3, func(idx int64) error { + dp, err := fromV3DealProposal(dp3) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals3) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp3 market3.DealProposal + if err := dp3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV3DealProposal(dp3) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals3) array() adt.Array { + return s.Array +} + +func fromV3DealProposal(v3 market3.DealProposal) (DealProposal, error) { + + label, err := labelFromGoString(v3.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v3.PieceCID, + PieceSize: v3.PieceSize, + VerifiedDeal: v3.VerifiedDeal, + Client: v3.Client, + Provider: v3.Provider, + + Label: label, + + StartEpoch: v3.StartEpoch, + EndEpoch: v3.EndEpoch, + StoragePricePerEpoch: v3.StoragePricePerEpoch, + + ProviderCollateral: v3.ProviderCollateral, + ClientCollateral: v3.ClientCollateral, + }, nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn3)(nil) + +func decodePublishStorageDealsReturn3(b []byte) (PublishStorageDealsReturn, error) { + var retval market3.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn3{retval}, nil +} + +type publishStorageDealsReturn3 struct { + market3.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn3) IsDealValid(index uint64) (bool, int, error) { + + // PublishStorageDeals only succeeded if all deals were valid in this version of actors + return true, int(index), nil + +} + +func (r *publishStorageDealsReturn3) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state3) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v4.go b/venus-shared/actors/builtin/market/state.v4.go new file mode 100644 index 0000000000..e194f7fd5e --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v4.go @@ -0,0 +1,307 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + + s, err := market4.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + market4.State + store adt.Store +} + +func (s *state4) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state4) BalancesChanged(otherState State) (bool, error) { + otherState4, ok := otherState.(*state4) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState4.State.EscrowTable) || !s.State.LockedTable.Equals(otherState4.State.LockedTable), nil +} + +func (s *state4) StatesChanged(otherState State) (bool, error) { + otherState4, ok := otherState.(*state4) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState4.State.States), nil +} + +func (s *state4) States() (DealStates, error) { + stateArray, err := adt4.AsArray(s.store, s.State.States, market4.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates4{stateArray}, nil +} + +func (s *state4) ProposalsChanged(otherState State) (bool, error) { + otherState4, ok := otherState.(*state4) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState4.State.Proposals), nil +} + +func (s *state4) Proposals() (DealProposals, error) { + proposalArray, err := adt4.AsArray(s.store, s.State.Proposals, market4.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals4{proposalArray}, nil +} + +func (s *state4) EscrowTable() (BalanceTable, error) { + bt, err := adt4.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable4{bt}, nil +} + +func (s *state4) LockedTable() (BalanceTable, error) { + bt, err := adt4.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable4{bt}, nil +} + +func (s *state4) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market4.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state4) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable4 struct { + *adt4.BalanceTable +} + +func (bt *balanceTable4) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt4.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates4 struct { + adt.Array +} + +func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal4 market4.DealState + found, err := s.Array.Get(uint64(dealID), &deal4) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV4DealState(deal4) + return &deal, true, nil +} + +func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds4 market4.DealState + return s.Array.ForEach(&ds4, func(idx int64) error { + return cb(abi.DealID(idx), fromV4DealState(ds4)) + }) +} + +func (s *dealStates4) decode(val *cbg.Deferred) (*DealState, error) { + var ds4 market4.DealState + if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV4DealState(ds4) + return &ds, nil +} + +func (s *dealStates4) array() adt.Array { + return s.Array +} + +func fromV4DealState(v4 market4.DealState) DealState { + + return DealState{ + SectorStartEpoch: v4.SectorStartEpoch, + LastUpdatedEpoch: v4.LastUpdatedEpoch, + SlashEpoch: v4.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals4 struct { + adt.Array +} + +func (s *dealProposals4) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal4 market4.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal4) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV4DealProposal(proposal4) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals4) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp4 market4.DealProposal + return s.Array.ForEach(&dp4, func(idx int64) error { + dp, err := fromV4DealProposal(dp4) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals4) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp4 market4.DealProposal + if err := dp4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV4DealProposal(dp4) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals4) array() adt.Array { + return s.Array +} + +func fromV4DealProposal(v4 market4.DealProposal) (DealProposal, error) { + + label, err := labelFromGoString(v4.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v4.PieceCID, + PieceSize: v4.PieceSize, + VerifiedDeal: v4.VerifiedDeal, + Client: v4.Client, + Provider: v4.Provider, + + Label: label, + + StartEpoch: v4.StartEpoch, + EndEpoch: v4.EndEpoch, + StoragePricePerEpoch: v4.StoragePricePerEpoch, + + ProviderCollateral: v4.ProviderCollateral, + ClientCollateral: v4.ClientCollateral, + }, nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn4)(nil) + +func decodePublishStorageDealsReturn4(b []byte) (PublishStorageDealsReturn, error) { + var retval market4.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn4{retval}, nil +} + +type publishStorageDealsReturn4 struct { + market4.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn4) IsDealValid(index uint64) (bool, int, error) { + + // PublishStorageDeals only succeeded if all deals were valid in this version of actors + return true, int(index), nil + +} + +func (r *publishStorageDealsReturn4) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state4) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v5.go b/venus-shared/actors/builtin/market/state.v5.go new file mode 100644 index 0000000000..de4a0f35d5 --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v5.go @@ -0,0 +1,307 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := market5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + market5.State + store adt.Store +} + +func (s *state5) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state5) BalancesChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState5.State.EscrowTable) || !s.State.LockedTable.Equals(otherState5.State.LockedTable), nil +} + +func (s *state5) StatesChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState5.State.States), nil +} + +func (s *state5) States() (DealStates, error) { + stateArray, err := adt5.AsArray(s.store, s.State.States, market5.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates5{stateArray}, nil +} + +func (s *state5) ProposalsChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState5.State.Proposals), nil +} + +func (s *state5) Proposals() (DealProposals, error) { + proposalArray, err := adt5.AsArray(s.store, s.State.Proposals, market5.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals5{proposalArray}, nil +} + +func (s *state5) EscrowTable() (BalanceTable, error) { + bt, err := adt5.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable5{bt}, nil +} + +func (s *state5) LockedTable() (BalanceTable, error) { + bt, err := adt5.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable5{bt}, nil +} + +func (s *state5) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market5.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state5) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable5 struct { + *adt5.BalanceTable +} + +func (bt *balanceTable5) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt5.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates5 struct { + adt.Array +} + +func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal5 market5.DealState + found, err := s.Array.Get(uint64(dealID), &deal5) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV5DealState(deal5) + return &deal, true, nil +} + +func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds5 market5.DealState + return s.Array.ForEach(&ds5, func(idx int64) error { + return cb(abi.DealID(idx), fromV5DealState(ds5)) + }) +} + +func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) { + var ds5 market5.DealState + if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV5DealState(ds5) + return &ds, nil +} + +func (s *dealStates5) array() adt.Array { + return s.Array +} + +func fromV5DealState(v5 market5.DealState) DealState { + + return DealState{ + SectorStartEpoch: v5.SectorStartEpoch, + LastUpdatedEpoch: v5.LastUpdatedEpoch, + SlashEpoch: v5.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals5 struct { + adt.Array +} + +func (s *dealProposals5) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal5 market5.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal5) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV5DealProposal(proposal5) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals5) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp5 market5.DealProposal + return s.Array.ForEach(&dp5, func(idx int64) error { + dp, err := fromV5DealProposal(dp5) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals5) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp5 market5.DealProposal + if err := dp5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV5DealProposal(dp5) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals5) array() adt.Array { + return s.Array +} + +func fromV5DealProposal(v5 market5.DealProposal) (DealProposal, error) { + + label, err := labelFromGoString(v5.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v5.PieceCID, + PieceSize: v5.PieceSize, + VerifiedDeal: v5.VerifiedDeal, + Client: v5.Client, + Provider: v5.Provider, + + Label: label, + + StartEpoch: v5.StartEpoch, + EndEpoch: v5.EndEpoch, + StoragePricePerEpoch: v5.StoragePricePerEpoch, + + ProviderCollateral: v5.ProviderCollateral, + ClientCollateral: v5.ClientCollateral, + }, nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn5)(nil) + +func decodePublishStorageDealsReturn5(b []byte) (PublishStorageDealsReturn, error) { + var retval market5.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn5{retval}, nil +} + +type publishStorageDealsReturn5 struct { + market5.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn5) IsDealValid(index uint64) (bool, int, error) { + + // PublishStorageDeals only succeeded if all deals were valid in this version of actors + return true, int(index), nil + +} + +func (r *publishStorageDealsReturn5) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state5) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v6.go b/venus-shared/actors/builtin/market/state.v6.go new file mode 100644 index 0000000000..bf1c8d3b57 --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v6.go @@ -0,0 +1,326 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market" + adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store) (State, error) { + out := state6{store: store} + + s, err := market6.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state6 struct { + market6.State + store adt.Store +} + +func (s *state6) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state6) BalancesChanged(otherState State) (bool, error) { + otherState6, ok := otherState.(*state6) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState6.State.EscrowTable) || !s.State.LockedTable.Equals(otherState6.State.LockedTable), nil +} + +func (s *state6) StatesChanged(otherState State) (bool, error) { + otherState6, ok := otherState.(*state6) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState6.State.States), nil +} + +func (s *state6) States() (DealStates, error) { + stateArray, err := adt6.AsArray(s.store, s.State.States, market6.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates6{stateArray}, nil +} + +func (s *state6) ProposalsChanged(otherState State) (bool, error) { + otherState6, ok := otherState.(*state6) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState6.State.Proposals), nil +} + +func (s *state6) Proposals() (DealProposals, error) { + proposalArray, err := adt6.AsArray(s.store, s.State.Proposals, market6.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals6{proposalArray}, nil +} + +func (s *state6) EscrowTable() (BalanceTable, error) { + bt, err := adt6.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable6{bt}, nil +} + +func (s *state6) LockedTable() (BalanceTable, error) { + bt, err := adt6.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable6{bt}, nil +} + +func (s *state6) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market6.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state6) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable6 struct { + *adt6.BalanceTable +} + +func (bt *balanceTable6) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt6.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates6 struct { + adt.Array +} + +func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal6 market6.DealState + found, err := s.Array.Get(uint64(dealID), &deal6) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV6DealState(deal6) + return &deal, true, nil +} + +func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds6 market6.DealState + return s.Array.ForEach(&ds6, func(idx int64) error { + return cb(abi.DealID(idx), fromV6DealState(ds6)) + }) +} + +func (s *dealStates6) decode(val *cbg.Deferred) (*DealState, error) { + var ds6 market6.DealState + if err := ds6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV6DealState(ds6) + return &ds, nil +} + +func (s *dealStates6) array() adt.Array { + return s.Array +} + +func fromV6DealState(v6 market6.DealState) DealState { + + return DealState{ + SectorStartEpoch: v6.SectorStartEpoch, + LastUpdatedEpoch: v6.LastUpdatedEpoch, + SlashEpoch: v6.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals6 struct { + adt.Array +} + +func (s *dealProposals6) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal6 market6.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal6) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV6DealProposal(proposal6) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals6) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp6 market6.DealProposal + return s.Array.ForEach(&dp6, func(idx int64) error { + dp, err := fromV6DealProposal(dp6) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals6) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp6 market6.DealProposal + if err := dp6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV6DealProposal(dp6) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals6) array() adt.Array { + return s.Array +} + +func fromV6DealProposal(v6 market6.DealProposal) (DealProposal, error) { + + label, err := labelFromGoString(v6.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v6.PieceCID, + PieceSize: v6.PieceSize, + VerifiedDeal: v6.VerifiedDeal, + Client: v6.Client, + Provider: v6.Provider, + + Label: label, + + StartEpoch: v6.StartEpoch, + EndEpoch: v6.EndEpoch, + StoragePricePerEpoch: v6.StoragePricePerEpoch, + + ProviderCollateral: v6.ProviderCollateral, + ClientCollateral: v6.ClientCollateral, + }, nil +} + +func (s *state6) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn6)(nil) + +func decodePublishStorageDealsReturn6(b []byte) (PublishStorageDealsReturn, error) { + var retval market6.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn6{retval}, nil +} + +type publishStorageDealsReturn6 struct { + market6.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn6) IsDealValid(index uint64) (bool, int, error) { + + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + +} + +func (r *publishStorageDealsReturn6) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state6) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v7.go b/venus-shared/actors/builtin/market/state.v7.go new file mode 100644 index 0000000000..3ba876794f --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v7.go @@ -0,0 +1,326 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := market7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + market7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state7) BalancesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState7.State.EscrowTable) || !s.State.LockedTable.Equals(otherState7.State.LockedTable), nil +} + +func (s *state7) StatesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState7.State.States), nil +} + +func (s *state7) States() (DealStates, error) { + stateArray, err := adt7.AsArray(s.store, s.State.States, market7.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates7{stateArray}, nil +} + +func (s *state7) ProposalsChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState7.State.Proposals), nil +} + +func (s *state7) Proposals() (DealProposals, error) { + proposalArray, err := adt7.AsArray(s.store, s.State.Proposals, market7.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals7{proposalArray}, nil +} + +func (s *state7) EscrowTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) LockedTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market7.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state7) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable7 struct { + *adt7.BalanceTable +} + +func (bt *balanceTable7) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt7.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates7 struct { + adt.Array +} + +func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal7 market7.DealState + found, err := s.Array.Get(uint64(dealID), &deal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV7DealState(deal7) + return &deal, true, nil +} + +func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds7 market7.DealState + return s.Array.ForEach(&ds7, func(idx int64) error { + return cb(abi.DealID(idx), fromV7DealState(ds7)) + }) +} + +func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) { + var ds7 market7.DealState + if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV7DealState(ds7) + return &ds, nil +} + +func (s *dealStates7) array() adt.Array { + return s.Array +} + +func fromV7DealState(v7 market7.DealState) DealState { + + return DealState{ + SectorStartEpoch: v7.SectorStartEpoch, + LastUpdatedEpoch: v7.LastUpdatedEpoch, + SlashEpoch: v7.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals7 struct { + adt.Array +} + +func (s *dealProposals7) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal7 market7.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV7DealProposal(proposal7) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals7) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp7 market7.DealProposal + return s.Array.ForEach(&dp7, func(idx int64) error { + dp, err := fromV7DealProposal(dp7) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals7) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp7 market7.DealProposal + if err := dp7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV7DealProposal(dp7) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals7) array() adt.Array { + return s.Array +} + +func fromV7DealProposal(v7 market7.DealProposal) (DealProposal, error) { + + label, err := labelFromGoString(v7.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v7.PieceCID, + PieceSize: v7.PieceSize, + VerifiedDeal: v7.VerifiedDeal, + Client: v7.Client, + Provider: v7.Provider, + + Label: label, + + StartEpoch: v7.StartEpoch, + EndEpoch: v7.EndEpoch, + StoragePricePerEpoch: v7.StoragePricePerEpoch, + + ProviderCollateral: v7.ProviderCollateral, + ClientCollateral: v7.ClientCollateral, + }, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn7)(nil) + +func decodePublishStorageDealsReturn7(b []byte) (PublishStorageDealsReturn, error) { + var retval market7.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn7{retval}, nil +} + +type publishStorageDealsReturn7 struct { + market7.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, int, error) { + + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + +} + +func (r *publishStorageDealsReturn7) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state7) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v8.go b/venus-shared/actors/builtin/market/state.v8.go new file mode 100644 index 0000000000..87fc6c559e --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v8.go @@ -0,0 +1,343 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" + adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store) (State, error) { + out := state8{store: store} + + s, err := market8.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state8 struct { + market8.State + store adt.Store +} + +func (s *state8) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state8) BalancesChanged(otherState State) (bool, error) { + otherState8, ok := otherState.(*state8) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState8.State.EscrowTable) || !s.State.LockedTable.Equals(otherState8.State.LockedTable), nil +} + +func (s *state8) StatesChanged(otherState State) (bool, error) { + otherState8, ok := otherState.(*state8) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState8.State.States), nil +} + +func (s *state8) States() (DealStates, error) { + stateArray, err := adt8.AsArray(s.store, s.State.States, market8.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates8{stateArray}, nil +} + +func (s *state8) ProposalsChanged(otherState State) (bool, error) { + otherState8, ok := otherState.(*state8) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState8.State.Proposals), nil +} + +func (s *state8) Proposals() (DealProposals, error) { + proposalArray, err := adt8.AsArray(s.store, s.State.Proposals, market8.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals8{proposalArray}, nil +} + +func (s *state8) EscrowTable() (BalanceTable, error) { + bt, err := adt8.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable8{bt}, nil +} + +func (s *state8) LockedTable() (BalanceTable, error) { + bt, err := adt8.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable8{bt}, nil +} + +func (s *state8) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market8.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state8) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable8 struct { + *adt8.BalanceTable +} + +func (bt *balanceTable8) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt8.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates8 struct { + adt.Array +} + +func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal8 market8.DealState + found, err := s.Array.Get(uint64(dealID), &deal8) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV8DealState(deal8) + return &deal, true, nil +} + +func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds8 market8.DealState + return s.Array.ForEach(&ds8, func(idx int64) error { + return cb(abi.DealID(idx), fromV8DealState(ds8)) + }) +} + +func (s *dealStates8) decode(val *cbg.Deferred) (*DealState, error) { + var ds8 market8.DealState + if err := ds8.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV8DealState(ds8) + return &ds, nil +} + +func (s *dealStates8) array() adt.Array { + return s.Array +} + +func fromV8DealState(v8 market8.DealState) DealState { + + return DealState{ + SectorStartEpoch: v8.SectorStartEpoch, + LastUpdatedEpoch: v8.LastUpdatedEpoch, + SlashEpoch: v8.SlashEpoch, + VerifiedClaim: 0, + } + +} + +type dealProposals8 struct { + adt.Array +} + +func (s *dealProposals8) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal8 market8.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal8) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV8DealProposal(proposal8) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals8) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp8 market8.DealProposal + return s.Array.ForEach(&dp8, func(idx int64) error { + dp, err := fromV8DealProposal(dp8) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals8) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp8 market8.DealProposal + if err := dp8.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV8DealProposal(dp8) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals8) array() adt.Array { + return s.Array +} + +func fromV8DealProposal(v8 market8.DealProposal) (DealProposal, error) { + + label, err := fromV8Label(v8.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v8.PieceCID, + PieceSize: v8.PieceSize, + VerifiedDeal: v8.VerifiedDeal, + Client: v8.Client, + Provider: v8.Provider, + + Label: label, + + StartEpoch: v8.StartEpoch, + EndEpoch: v8.EndEpoch, + StoragePricePerEpoch: v8.StoragePricePerEpoch, + + ProviderCollateral: v8.ProviderCollateral, + ClientCollateral: v8.ClientCollateral, + }, nil +} + +func fromV8Label(v8 market8.DealLabel) (DealLabel, error) { + if v8.IsString() { + str, err := v8.ToString() + if err != nil { + return markettypes.EmptyDealLabel, fmt.Errorf("failed to convert string label to string: %w", err) + } + return markettypes.NewLabelFromString(str) + } + + bs, err := v8.ToBytes() + if err != nil { + return markettypes.EmptyDealLabel, fmt.Errorf("failed to convert bytes label to bytes: %w", err) + } + return markettypes.NewLabelFromBytes(bs) +} + +func (s *state8) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn8)(nil) + +func decodePublishStorageDealsReturn8(b []byte) (PublishStorageDealsReturn, error) { + var retval market8.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn8{retval}, nil +} + +type publishStorageDealsReturn8 struct { + market8.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn8) IsDealValid(index uint64) (bool, int, error) { + + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + +} + +func (r *publishStorageDealsReturn8) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state8) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + return verifregtypes.NoAllocationID, fmt.Errorf("unsupported before actors v9") + +} diff --git a/venus-shared/actors/builtin/market/state.v9.go b/venus-shared/actors/builtin/market/state.v9.go new file mode 100644 index 0000000000..f2b60cd261 --- /dev/null +++ b/venus-shared/actors/builtin/market/state.v9.go @@ -0,0 +1,354 @@ +// FETCHED FROM LOTUS: builtin/market/state.go.template + +package market + +import ( + "bytes" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + market9 "github.com/filecoin-project/go-state-types/builtin/v9/market" + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" + + "github.com/filecoin-project/go-state-types/builtin" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store) (State, error) { + out := state9{store: store} + + s, err := market9.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state9 struct { + market9.State + store adt.Store +} + +func (s *state9) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state9) BalancesChanged(otherState State) (bool, error) { + otherState9, ok := otherState.(*state9) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState9.State.EscrowTable) || !s.State.LockedTable.Equals(otherState9.State.LockedTable), nil +} + +func (s *state9) StatesChanged(otherState State) (bool, error) { + otherState9, ok := otherState.(*state9) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState9.State.States), nil +} + +func (s *state9) States() (DealStates, error) { + stateArray, err := adt9.AsArray(s.store, s.State.States, market9.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates9{stateArray}, nil +} + +func (s *state9) ProposalsChanged(otherState State) (bool, error) { + otherState9, ok := otherState.(*state9) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState9.State.Proposals), nil +} + +func (s *state9) Proposals() (DealProposals, error) { + proposalArray, err := adt9.AsArray(s.store, s.State.Proposals, market9.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals9{proposalArray}, nil +} + +func (s *state9) EscrowTable() (BalanceTable, error) { + bt, err := adt9.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable9{bt}, nil +} + +func (s *state9) LockedTable() (BalanceTable, error) { + bt, err := adt9.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable9{bt}, nil +} + +func (s *state9) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market9.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state9) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable9 struct { + *adt9.BalanceTable +} + +func (bt *balanceTable9) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt9.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates9 struct { + adt.Array +} + +func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal9 market9.DealState + found, err := s.Array.Get(uint64(dealID), &deal9) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV9DealState(deal9) + return &deal, true, nil +} + +func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds9 market9.DealState + return s.Array.ForEach(&ds9, func(idx int64) error { + return cb(abi.DealID(idx), fromV9DealState(ds9)) + }) +} + +func (s *dealStates9) decode(val *cbg.Deferred) (*DealState, error) { + var ds9 market9.DealState + if err := ds9.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV9DealState(ds9) + return &ds, nil +} + +func (s *dealStates9) array() adt.Array { + return s.Array +} + +func fromV9DealState(v9 market9.DealState) DealState { + + return (DealState)(v9) + +} + +type dealProposals9 struct { + adt.Array +} + +func (s *dealProposals9) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal9 market9.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal9) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV9DealProposal(proposal9) + if err != nil { + return nil, true, fmt.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals9) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp9 market9.DealProposal + return s.Array.ForEach(&dp9, func(idx int64) error { + dp, err := fromV9DealProposal(dp9) + if err != nil { + return fmt.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals9) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp9 market9.DealProposal + if err := dp9.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV9DealProposal(dp9) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals9) array() adt.Array { + return s.Array +} + +func fromV9DealProposal(v9 market9.DealProposal) (DealProposal, error) { + + label, err := fromV9Label(v9.Label) + + if err != nil { + return DealProposal{}, fmt.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v9.PieceCID, + PieceSize: v9.PieceSize, + VerifiedDeal: v9.VerifiedDeal, + Client: v9.Client, + Provider: v9.Provider, + + Label: label, + + StartEpoch: v9.StartEpoch, + EndEpoch: v9.EndEpoch, + StoragePricePerEpoch: v9.StoragePricePerEpoch, + + ProviderCollateral: v9.ProviderCollateral, + ClientCollateral: v9.ClientCollateral, + }, nil +} + +func fromV9Label(v9 market9.DealLabel) (DealLabel, error) { + if v9.IsString() { + str, err := v9.ToString() + if err != nil { + return markettypes.EmptyDealLabel, fmt.Errorf("failed to convert string label to string: %w", err) + } + return markettypes.NewLabelFromString(str) + } + + bs, err := v9.ToBytes() + if err != nil { + return markettypes.EmptyDealLabel, fmt.Errorf("failed to convert bytes label to bytes: %w", err) + } + return markettypes.NewLabelFromBytes(bs) +} + +func (s *state9) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn9)(nil) + +func decodePublishStorageDealsReturn9(b []byte) (PublishStorageDealsReturn, error) { + var retval market9.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn9{retval}, nil +} + +type publishStorageDealsReturn9 struct { + market9.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn9) IsDealValid(index uint64) (bool, int, error) { + + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + +} + +func (r *publishStorageDealsReturn9) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state9) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + allocations, err := adt9.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth) + if err != nil { + return verifregtypes.NoAllocationID, fmt.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + + var allocationId cbg.CborInt + found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId) + if err != nil { + return verifregtypes.NoAllocationID, fmt.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + if !found { + return verifregtypes.NoAllocationID, nil + } + + return verifregtypes.AllocationId(allocationId), nil + +} diff --git a/venus-shared/actors/builtin/miner/actor.go b/venus-shared/actors/builtin/miner/actor.go new file mode 100644 index 0000000000..bb7e4d2300 --- /dev/null +++ b/venus-shared/actors/builtin/miner/actor.go @@ -0,0 +1,297 @@ +// FETCHED FROM LOTUS: builtin/miner/actor.go.template + +package miner + +import ( + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/venus-shared/actors" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/proof" + + miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.MinerKey { + return nil, fmt.Errorf("actor code is not miner: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.StorageMinerActorCodeID: + return load0(store, act.Head) + + case builtin2.StorageMinerActorCodeID: + return load2(store, act.Head) + + case builtin3.StorageMinerActorCodeID: + return load3(store, act.Head) + + case builtin4.StorageMinerActorCodeID: + return load4(store, act.Head) + + case builtin5.StorageMinerActorCodeID: + return load5(store, act.Head) + + case builtin6.StorageMinerActorCodeID: + return load6(store, act.Head) + + case builtin7.StorageMinerActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + case actors.Version6: + return make6(store) + + case actors.Version7: + return make7(store) + + case actors.Version8: + return make8(store) + + case actors.Version9: + return make9(store) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + // Total available balance to spend. + AvailableBalance(abi.TokenAmount) (abi.TokenAmount, error) + // Funds that will vest by the given epoch. + VestedFunds(abi.ChainEpoch) (abi.TokenAmount, error) + // Funds locked for various reasons. + LockedFunds() (LockedFunds, error) + FeeDebt() (abi.TokenAmount, error) + + GetSector(abi.SectorNumber) (*SectorOnChainInfo, error) + FindSector(abi.SectorNumber) (*SectorLocation, error) + GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) + GetPrecommittedSector(abi.SectorNumber) (*miner9.SectorPreCommitOnChainInfo, error) + ForEachPrecommittedSector(func(miner9.SectorPreCommitOnChainInfo) error) error + LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) + NumLiveSectors() (uint64, error) + IsAllocated(abi.SectorNumber) (bool, error) + // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than + // count if there aren't enough). + UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) + GetAllocatedSectors() (*bitfield.BitField, error) + + // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors + GetProvingPeriodStart() (abi.ChainEpoch, error) + // Testing only + EraseAllUnproven() error + + LoadDeadline(idx uint64) (Deadline, error) + ForEachDeadline(cb func(idx uint64, dl Deadline) error) error + NumDeadlines() (uint64, error) + DeadlinesChanged(State) (bool, error) + + Info() (MinerInfo, error) + MinerInfoChanged(State) (bool, error) + + DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) + DeadlineCronActive() (bool, error) + + // Diff helpers. Used by Diff* functions internally. + sectors() (adt.Array, error) + decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) + precommits() (adt.Map, error) + decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (miner9.SectorPreCommitOnChainInfo, error) + GetState() interface{} +} + +type Deadline interface { + LoadPartition(idx uint64) (Partition, error) + ForEachPartition(cb func(idx uint64, part Partition) error) error + PartitionsPoSted() (bitfield.BitField, error) + + PartitionsChanged(Deadline) (bool, error) + DisputableProofCount() (uint64, error) +} + +type Partition interface { + // AllSectors returns all sector numbers in this partition, including faulty, unproven, and terminated sectors + AllSectors() (bitfield.BitField, error) + + // Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt). + // Faults ∩ Terminated = ∅ + FaultySectors() (bitfield.BitField, error) + + // Subset of faulty sectors expected to recover on next PoSt + // Recoveries ∩ Terminated = ∅ + RecoveringSectors() (bitfield.BitField, error) + + // Live sectors are those that are not terminated (but may be faulty). + LiveSectors() (bitfield.BitField, error) + + // Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power. + ActiveSectors() (bitfield.BitField, error) + + // Unproven sectors in this partition. This bitfield will be cleared on + // a successful window post (or at the end of the partition's next + // deadline). At that time, any still unproven sectors will be added to + // the faulty sector bitfield. + UnprovenSectors() (bitfield.BitField, error) +} + +type SectorOnChainInfo = miner9.SectorOnChainInfo + +func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { + // We added support for the new proofs in network version 7, and removed support for the old + // ones in network version 8. + if nver < network.Version7 { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return -1, fmt.Errorf("unrecognized window post type: %d", proof) + } + } + + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return -1, fmt.Errorf("unrecognized window post type: %d", proof) + } +} + +func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil + default: + return -1, fmt.Errorf("unknown proof type %d", proof) + } +} + +type MinerInfo = miner9.MinerInfo +type WorkerKeyChange = miner9.WorkerKeyChange +type SectorPreCommitOnChainInfo = miner9.SectorPreCommitOnChainInfo +type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo + +type SectorExpiration struct { + OnTime abi.ChainEpoch + + // non-zero if sector is faulty, epoch at which it will be permanently + // removed if it doesn't recover + Early abi.ChainEpoch +} + +type SectorLocation struct { + Deadline uint64 + Partition uint64 +} + +type SectorChanges struct { + Added []SectorOnChainInfo + Extended []SectorExtensions + Removed []SectorOnChainInfo +} + +type SectorExtensions struct { + From SectorOnChainInfo + To SectorOnChainInfo +} + +type PreCommitChanges struct { + Added []miner9.SectorPreCommitOnChainInfo + Removed []miner9.SectorPreCommitOnChainInfo +} + +type LockedFunds struct { + VestingFunds abi.TokenAmount + InitialPledgeRequirement abi.TokenAmount + PreCommitDeposits abi.TokenAmount +} + +func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount { + return big.Add(lf.VestingFunds, big.Add(lf.InitialPledgeRequirement, lf.PreCommitDeposits)) +} diff --git a/venus-shared/actors/builtin/miner/actor.go.template b/venus-shared/actors/builtin/miner/actor.go.template new file mode 100644 index 0000000000..0d84d17fb3 --- /dev/null +++ b/venus-shared/actors/builtin/miner/actor.go.template @@ -0,0 +1,247 @@ +// FETCHED FROM LOTUS: builtin/miner/actor.go.template + +package miner + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/venus-shared/actors" + cbg "github.com/whyrusleeping/cbor-gen" + "fmt" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/proof" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + miner{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin{{import .latestVersion}}miner" + +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.MinerKey { + return nil, fmt.Errorf("actor code is not miner: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.StorageMinerActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + // Total available balance to spend. + AvailableBalance(abi.TokenAmount) (abi.TokenAmount, error) + // Funds that will vest by the given epoch. + VestedFunds(abi.ChainEpoch) (abi.TokenAmount, error) + // Funds locked for various reasons. + LockedFunds() (LockedFunds, error) + FeeDebt() (abi.TokenAmount, error) + + GetSector(abi.SectorNumber) (*SectorOnChainInfo, error) + FindSector(abi.SectorNumber) (*SectorLocation, error) + GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) + GetPrecommittedSector(abi.SectorNumber) (*miner{{.latestVersion}}.SectorPreCommitOnChainInfo, error) + ForEachPrecommittedSector(func(miner{{.latestVersion}}.SectorPreCommitOnChainInfo) error) error + LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) + NumLiveSectors() (uint64, error) + IsAllocated(abi.SectorNumber) (bool, error) + // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than + // count if there aren't enough). + UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) + GetAllocatedSectors() (*bitfield.BitField, error) + + // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors + GetProvingPeriodStart() (abi.ChainEpoch, error) + // Testing only + EraseAllUnproven() error + + LoadDeadline(idx uint64) (Deadline, error) + ForEachDeadline(cb func(idx uint64, dl Deadline) error) error + NumDeadlines() (uint64, error) + DeadlinesChanged(State) (bool, error) + + Info() (MinerInfo, error) + MinerInfoChanged(State) (bool, error) + + DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) + DeadlineCronActive() (bool, error) + + // Diff helpers. Used by Diff* functions internally. + sectors() (adt.Array, error) + decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) + precommits() (adt.Map, error) + decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (miner{{.latestVersion}}.SectorPreCommitOnChainInfo, error) + GetState() interface{} +} + +type Deadline interface { + LoadPartition(idx uint64) (Partition, error) + ForEachPartition(cb func(idx uint64, part Partition) error) error + PartitionsPoSted() (bitfield.BitField, error) + + PartitionsChanged(Deadline) (bool, error) + DisputableProofCount() (uint64, error) +} + +type Partition interface { + // AllSectors returns all sector numbers in this partition, including faulty, unproven, and terminated sectors + AllSectors() (bitfield.BitField, error) + + // Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt). + // Faults ∩ Terminated = ∅ + FaultySectors() (bitfield.BitField, error) + + // Subset of faulty sectors expected to recover on next PoSt + // Recoveries ∩ Terminated = ∅ + RecoveringSectors() (bitfield.BitField, error) + + // Live sectors are those that are not terminated (but may be faulty). + LiveSectors() (bitfield.BitField, error) + + // Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power. + ActiveSectors() (bitfield.BitField, error) + + // Unproven sectors in this partition. This bitfield will be cleared on + // a successful window post (or at the end of the partition's next + // deadline). At that time, any still unproven sectors will be added to + // the faulty sector bitfield. + UnprovenSectors() (bitfield.BitField, error) +} + +type SectorOnChainInfo = miner{{.latestVersion}}.SectorOnChainInfo + +func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { + // We added support for the new proofs in network version 7, and removed support for the old + // ones in network version 8. + if nver < network.Version7 { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return -1, fmt.Errorf("unrecognized window post type: %d", proof) + } + } + + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return -1, fmt.Errorf("unrecognized window post type: %d", proof) + } +} + +func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil + default: + return -1, fmt.Errorf("unknown proof type %d", proof) + } +} + +type MinerInfo = miner{{.latestVersion}}.MinerInfo +type WorkerKeyChange = miner{{.latestVersion}}.WorkerKeyChange +type SectorPreCommitOnChainInfo = miner{{.latestVersion}}.SectorPreCommitOnChainInfo +type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo + +type SectorExpiration struct { + OnTime abi.ChainEpoch + + // non-zero if sector is faulty, epoch at which it will be permanently + // removed if it doesn't recover + Early abi.ChainEpoch +} + +type SectorLocation struct { + Deadline uint64 + Partition uint64 +} + +type SectorChanges struct { + Added []SectorOnChainInfo + Extended []SectorExtensions + Removed []SectorOnChainInfo +} + +type SectorExtensions struct { + From SectorOnChainInfo + To SectorOnChainInfo +} + +type PreCommitChanges struct { + Added []miner{{.latestVersion}}.SectorPreCommitOnChainInfo + Removed []miner{{.latestVersion}}.SectorPreCommitOnChainInfo +} + +type LockedFunds struct { + VestingFunds abi.TokenAmount + InitialPledgeRequirement abi.TokenAmount + PreCommitDeposits abi.TokenAmount +} + +func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount { + return big.Add(lf.VestingFunds, big.Add(lf.InitialPledgeRequirement, lf.PreCommitDeposits)) +} diff --git a/venus-shared/actors/builtin/miner/diff.go b/venus-shared/actors/builtin/miner/diff.go new file mode 100644 index 0000000000..f40fd163f8 --- /dev/null +++ b/venus-shared/actors/builtin/miner/diff.go @@ -0,0 +1,129 @@ +// FETCHED FROM LOTUS: builtin/miner/diff.go + +package miner + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + cbg "github.com/whyrusleeping/cbor-gen" +) + +func DiffPreCommits(pre, cur State) (*PreCommitChanges, error) { + results := new(PreCommitChanges) + + prep, err := pre.precommits() + if err != nil { + return nil, err + } + + curp, err := cur.precommits() + if err != nil { + return nil, err + } + + err = adt.DiffAdtMap(prep, curp, &preCommitDiffer{results, pre, cur}) + if err != nil { + return nil, err + } + + return results, nil +} + +type preCommitDiffer struct { + Results *PreCommitChanges + pre, after State +} + +func (m *preCommitDiffer) AsKey(key string) (abi.Keyer, error) { + sector, err := abi.ParseUIntKey(key) + if err != nil { + return nil, err + } + return abi.UIntKey(sector), nil +} + +func (m *preCommitDiffer) Add(key string, val *cbg.Deferred) error { + sp, err := m.after.decodeSectorPreCommitOnChainInfo(val) + if err != nil { + return err + } + m.Results.Added = append(m.Results.Added, sp) + return nil +} + +func (m *preCommitDiffer) Modify(key string, from, to *cbg.Deferred) error { + return nil +} + +func (m *preCommitDiffer) Remove(key string, val *cbg.Deferred) error { + sp, err := m.pre.decodeSectorPreCommitOnChainInfo(val) + if err != nil { + return err + } + m.Results.Removed = append(m.Results.Removed, sp) + return nil +} + +func DiffSectors(pre, cur State) (*SectorChanges, error) { + results := new(SectorChanges) + + pres, err := pre.sectors() + if err != nil { + return nil, err + } + + curs, err := cur.sectors() + if err != nil { + return nil, err + } + + err = adt.DiffAdtArray(pres, curs, §orDiffer{results, pre, cur}) + if err != nil { + return nil, err + } + + return results, nil +} + +type sectorDiffer struct { + Results *SectorChanges + pre, after State +} + +func (m *sectorDiffer) Add(key uint64, val *cbg.Deferred) error { + si, err := m.after.decodeSectorOnChainInfo(val) + if err != nil { + return err + } + m.Results.Added = append(m.Results.Added, si) + return nil +} + +func (m *sectorDiffer) Modify(key uint64, from, to *cbg.Deferred) error { + siFrom, err := m.pre.decodeSectorOnChainInfo(from) + if err != nil { + return err + } + + siTo, err := m.after.decodeSectorOnChainInfo(to) + if err != nil { + return err + } + + if siFrom.Expiration != siTo.Expiration { + m.Results.Extended = append(m.Results.Extended, SectorExtensions{ + From: siFrom, + To: siTo, + }) + } + return nil +} + +func (m *sectorDiffer) Remove(key uint64, val *cbg.Deferred) error { + si, err := m.pre.decodeSectorOnChainInfo(val) + if err != nil { + return err + } + m.Results.Removed = append(m.Results.Removed, si) + return nil +} diff --git a/venus-shared/actors/builtin/miner/diff_deadlines.go b/venus-shared/actors/builtin/miner/diff_deadlines.go new file mode 100644 index 0000000000..af39d81cc0 --- /dev/null +++ b/venus-shared/actors/builtin/miner/diff_deadlines.go @@ -0,0 +1,178 @@ +// FETCHED FROM LOTUS: builtin/miner/diff_deadlines.go + +package miner + +import ( + "errors" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/exitcode" +) + +type DeadlinesDiff map[uint64]DeadlineDiff + +func DiffDeadlines(pre, cur State) (DeadlinesDiff, error) { + changed, err := pre.DeadlinesChanged(cur) + if err != nil { + return nil, err + } + if !changed { + return nil, nil + } + + dlDiff := make(DeadlinesDiff) + if err := pre.ForEachDeadline(func(idx uint64, preDl Deadline) error { + curDl, err := cur.LoadDeadline(idx) + if err != nil { + return err + } + + diff, err := DiffDeadline(preDl, curDl) + if err != nil { + return err + } + + dlDiff[idx] = diff + return nil + }); err != nil { + return nil, err + } + return dlDiff, nil +} + +type DeadlineDiff map[uint64]*PartitionDiff + +func DiffDeadline(pre, cur Deadline) (DeadlineDiff, error) { + changed, err := pre.PartitionsChanged(cur) + if err != nil { + return nil, err + } + if !changed { + return nil, nil + } + + partDiff := make(DeadlineDiff) + if err := pre.ForEachPartition(func(idx uint64, prePart Partition) error { + // try loading current partition at this index + curPart, err := cur.LoadPartition(idx) + if err != nil { + if errors.Is(err, exitcode.ErrNotFound) { + // TODO correctness? + return nil // the partition was removed. + } + return err + } + + // compare it with the previous partition + diff, err := DiffPartition(prePart, curPart) + if err != nil { + return err + } + + partDiff[idx] = diff + return nil + }); err != nil { + return nil, err + } + + // all previous partitions have been walked. + // all partitions in cur and not in prev are new... can they be faulty already? + // TODO is this correct? + if err := cur.ForEachPartition(func(idx uint64, curPart Partition) error { + if _, found := partDiff[idx]; found { + return nil + } + faults, err := curPart.FaultySectors() + if err != nil { + return err + } + recovering, err := curPart.RecoveringSectors() + if err != nil { + return err + } + partDiff[idx] = &PartitionDiff{ + Removed: bitfield.New(), + Recovered: bitfield.New(), + Faulted: faults, + Recovering: recovering, + } + + return nil + }); err != nil { + return nil, err + } + + return partDiff, nil +} + +type PartitionDiff struct { + Removed bitfield.BitField + Recovered bitfield.BitField + Faulted bitfield.BitField + Recovering bitfield.BitField +} + +func DiffPartition(pre, cur Partition) (*PartitionDiff, error) { + prevLiveSectors, err := pre.LiveSectors() + if err != nil { + return nil, err + } + curLiveSectors, err := cur.LiveSectors() + if err != nil { + return nil, err + } + + removed, err := bitfield.SubtractBitField(prevLiveSectors, curLiveSectors) + if err != nil { + return nil, err + } + + prevRecoveries, err := pre.RecoveringSectors() + if err != nil { + return nil, err + } + + curRecoveries, err := cur.RecoveringSectors() + if err != nil { + return nil, err + } + + recovering, err := bitfield.SubtractBitField(curRecoveries, prevRecoveries) + if err != nil { + return nil, err + } + + prevFaults, err := pre.FaultySectors() + if err != nil { + return nil, err + } + + curFaults, err := cur.FaultySectors() + if err != nil { + return nil, err + } + + faulted, err := bitfield.SubtractBitField(curFaults, prevFaults) + if err != nil { + return nil, err + } + + // all current good sectors + curActiveSectors, err := cur.ActiveSectors() + if err != nil { + return nil, err + } + + // sectors that were previously fault and are now currently active are considered recovered. + recovered, err := bitfield.IntersectBitField(prevFaults, curActiveSectors) + if err != nil { + return nil, err + } + + return &PartitionDiff{ + Removed: removed, + Recovered: recovered, + Faulted: faulted, + Recovering: recovering, + }, nil +} diff --git a/venus-shared/actors/builtin/miner/state.sep.go.template b/venus-shared/actors/builtin/miner/state.sep.go.template new file mode 100644 index 0000000000..b1ea7b3e67 --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.sep.go.template @@ -0,0 +1,606 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" +{{if (le .v 1)}} + "github.com/filecoin-project/go-state-types/big" +{{end}} + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + minertypes "github.com/filecoin-project/go-state-types/builtin/v{{.latestVersion}}/miner" + +{{if (le .v 7)}} + {{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + {{end}} + miner{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/miner" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +{{else}} + miner{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}miner" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" + builtin{{.v}} "github.com/filecoin-project/go-state-types/builtin" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = miner{{.v}}.State{} + return &out, nil +} + +type state{{.v}} struct { + miner{{.v}}.State + store adt.Store +} + +type deadline{{.v}} struct { + miner{{.v}}.Deadline + store adt.Store +} + +type partition{{.v}} struct { + miner{{.v}}.Partition + store adt.Store +} + +func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state{{.v}}) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state{{.v}}) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}}, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state{{.v}}) FeeDebt() (abi.TokenAmount, error) { + return {{if (ge .v 2)}}s.State.FeeDebt{{else}}big.Zero(){{end}}, nil +} + +func (s *state{{.v}}) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}}, nil +} + +func (s *state{{.v}}) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state{{.v}}) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV{{.v}}SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state{{.v}}) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state{{.v}}) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. +{{if (ge .v 7) -}} + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). +{{- else -}} + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. +{{- end}} +{{if (ge .v 6) -}} + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. +{{- else -}} + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. +{{- end}} + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner{{.v}}.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner{{.v}}.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant{{if (ge .v 3)}}, miner{{.v}}.PartitionExpirationAmtBitwidth{{end}}) + if err != nil { + return err + } + var exp miner{{.v}}.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state{{.v}}) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV{{.v}}SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state{{.v}}) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { +{{if (ge .v 3) -}} + precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors, builtin{{.v}}.DefaultHamtBitwidth) +{{- else -}} + precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors) +{{- end}} + if err != nil { + return err + } + + var info miner{{.v}}.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV{{.v}}SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info{{.v}} miner{{.v}}.SectorOnChainInfo + if err := sectors.ForEach(&info{{.v}}, func(_ int64) error { + info := fromV{{.v}}SectorOnChainInfo(info{{.v}}) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos{{.v}}, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos{{.v}})) + for i, info{{.v}} := range infos{{.v}} { + info := fromV{{.v}}SectorOnChainInfo(*info{{.v}}) + infos[i] = &info + } + return infos, nil +} + +func (s *state{{.v}}) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state{{.v}}) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{ {Val: true, Len: abi.MaxSectorNumber} }}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state{{.v}}) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline{{.v}}{*dl, s.store}, nil +} + +func (s *state{{.v}}) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner{{.v}}.Deadline) error { + return cb(i, &deadline{{.v}}{*dl, s.store}) + }) +} + +func (s *state{{.v}}) NumDeadlines() (uint64, error) { + return miner{{.v}}.WPoStPeriodDeadlines, nil +} + +func (s *state{{.v}}) DeadlinesChanged(other State) (bool, error) { + other{{.v}}, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other{{.v}}.Deadlines), nil +} + +func (s *state{{.v}}) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state{{.v}}) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + +{{if (le .v 2)}} + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } +{{end}} + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: {{if (ge .v 3)}}info.WindowPoStProofType{{else}}wpp{{end}}, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: {{if (ge .v 2)}}info.ConsensusFaultElapsed{{else}}-1{{end}}, + {{if (ge .v 9)}} + Beneficiary: info.Beneficiary, + BeneficiaryTerm: info.BeneficiaryTerm, + PendingBeneficiaryTerm: info.PendingBeneficiaryTerm,{{end}} + } + + return mi, nil +} + +func (s *state{{.v}}) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.{{if (ge .v 4)}}Recorded{{end}}DeadlineInfo(epoch), nil +} + +func (s *state{{.v}}) DeadlineCronActive() (bool, error) { + return {{if (ge .v 4)}}s.State.DeadlineCronActive{{else}}true{{end}}, nil{{if (lt .v 4)}} // always active in this version{{end}} +} + +func (s *state{{.v}}) sectors() (adt.Array, error) { + return adt{{.v}}.AsArray(s.store, s.Sectors{{if (ge .v 3)}}, miner{{.v}}.SectorsAmtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner{{.v}}.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV{{.v}}SectorOnChainInfo(si), nil +} + +func (s *state{{.v}}) precommits() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.PreCommittedSectors{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner{{.v}}.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV{{.v}}SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state{{.v}}) EraseAllUnproven() error { + {{if (ge .v 2)}} + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner{{.v}}.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner{{.v}}.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + {{else}} + // field doesn't exist until v2 + return nil + {{end}} +} + +func (d *deadline{{.v}}) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition{{.v}}{*p, d.store}, nil +} + +func (d *deadline{{.v}}) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner{{.v}}.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition{{.v}}{part, d.store}) + }) +} + +func (d *deadline{{.v}}) PartitionsChanged(other Deadline) (bool, error) { + other{{.v}}, ok := other.(*deadline{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other{{.v}}.Deadline.Partitions), nil +} + +func (d *deadline{{.v}}) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.{{if (ge .v 3)}}PartitionsPoSted{{else}}PostSubmissions{{end}}, nil +} + +func (d *deadline{{.v}}) DisputableProofCount() (uint64, error) { +{{if (ge .v 3)}} + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil +{{else}} + // field doesn't exist until v3 + return 0, nil +{{end}} +} + +func (p *partition{{.v}}) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition{{.v}}) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition{{.v}}) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) { + return {{if (ge .v 2)}}p.Partition.Unproven{{else}}bitfield.New(){{end}}, nil +} + +func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v{{.v}}.SectorNumber, + SealProof: v{{.v}}.SealProof, + SealedCID: v{{.v}}.SealedCID, + DealIDs: v{{.v}}.DealIDs, + Activation: v{{.v}}.Activation, + Expiration: v{{.v}}.Expiration, + DealWeight: v{{.v}}.DealWeight, + VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, + InitialPledge: v{{.v}}.InitialPledge, + ExpectedDayReward: v{{.v}}.ExpectedDayReward, + ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge, + {{if (ge .v 7)}} + SectorKeyCID: v{{.v}}.SectorKeyCID, + {{end}} + } + return info +} + +func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + {{if (le .v 8)}}return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v{{.v}}.Info.SealProof, + SectorNumber: v{{.v}}.Info.SectorNumber, + SealedCID: v{{.v}}.Info.SealedCID, + SealRandEpoch: v{{.v}}.Info.SealRandEpoch, + DealIDs: v{{.v}}.Info.DealIDs, + Expiration: v{{.v}}.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v{{.v}}.PreCommitDeposit, + PreCommitEpoch: v{{.v}}.PreCommitEpoch, + }{{else}}return v{{.v}}{{end}} +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v0.go b/venus-shared/actors/builtin/miner/state.v0.go new file mode 100644 index 0000000000..349b39f2e0 --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v0.go @@ -0,0 +1,534 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-state-types/big" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = miner0.State{} + return &out, nil +} + +type state0 struct { + miner0.State + store adt.Store +} + +type deadline0 struct { + miner0.Deadline + store adt.Store +} + +type partition0 struct { + miner0.Partition + store adt.Store +} + +func (s *state0) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state0) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state0) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledgeRequirement, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state0) FeeDebt() (abi.TokenAmount, error) { + return big.Zero(), nil +} + +func (s *state0) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledgeRequirement, nil +} + +func (s *state0) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state0) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV0SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state0) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state0) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner0.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner0.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant) + if err != nil { + return err + } + var exp miner0.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV0SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state0) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt0.AsMap(s.store, s.State.PreCommittedSectors) + if err != nil { + return err + } + + var info miner0.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV0SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner0.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info0 miner0.SectorOnChainInfo + if err := sectors.ForEach(&info0, func(_ int64) error { + info := fromV0SectorOnChainInfo(info0) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos0, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos0)) + for i, info0 := range infos0 { + info := fromV0SectorOnChainInfo(*info0) + infos[i] = &info + } + return infos, nil +} + +func (s *state0) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state0) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state0) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state0) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline0{*dl, s.store}, nil +} + +func (s *state0) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner0.Deadline) error { + return cb(i, &deadline0{*dl, s.store}) + }) +} + +func (s *state0) NumDeadlines() (uint64, error) { + return miner0.WPoStPeriodDeadlines, nil +} + +func (s *state0) DeadlinesChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other0.Deadlines), nil +} + +func (s *state0) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state0) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: wpp, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: -1, + } + + return mi, nil +} + +func (s *state0) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.DeadlineInfo(epoch), nil +} + +func (s *state0) DeadlineCronActive() (bool, error) { + return true, nil // always active in this version +} + +func (s *state0) sectors() (adt.Array, error) { + return adt0.AsArray(s.store, s.Sectors) +} + +func (s *state0) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner0.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV0SectorOnChainInfo(si), nil +} + +func (s *state0) precommits() (adt.Map, error) { + return adt0.AsMap(s.store, s.PreCommittedSectors) +} + +func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner0.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV0SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state0) EraseAllUnproven() error { + + // field doesn't exist until v2 + return nil + +} + +func (d *deadline0) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition0{*p, d.store}, nil +} + +func (d *deadline0) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner0.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition0{part, d.store}) + }) +} + +func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) { + other0, ok := other.(*deadline0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil +} + +func (d *deadline0) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PostSubmissions, nil +} + +func (d *deadline0) DisputableProofCount() (uint64, error) { + + // field doesn't exist until v3 + return 0, nil + +} + +func (p *partition0) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition0) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition0) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition0) UnprovenSectors() (bitfield.BitField, error) { + return bitfield.New(), nil +} + +func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v0.SectorNumber, + SealProof: v0.SealProof, + SealedCID: v0.SealedCID, + DealIDs: v0.DealIDs, + Activation: v0.Activation, + Expiration: v0.Expiration, + DealWeight: v0.DealWeight, + VerifiedDealWeight: v0.VerifiedDealWeight, + InitialPledge: v0.InitialPledge, + ExpectedDayReward: v0.ExpectedDayReward, + ExpectedStoragePledge: v0.ExpectedStoragePledge, + } + return info +} + +func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v0.Info.SealProof, + SectorNumber: v0.Info.SectorNumber, + SealedCID: v0.Info.SealedCID, + SealRandEpoch: v0.Info.SealRandEpoch, + DealIDs: v0.Info.DealIDs, + Expiration: v0.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v0.PreCommitDeposit, + PreCommitEpoch: v0.PreCommitEpoch, + } +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v2.go b/venus-shared/actors/builtin/miner/state.v2.go new file mode 100644 index 0000000000..74e551737b --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v2.go @@ -0,0 +1,564 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = miner2.State{} + return &out, nil +} + +type state2 struct { + miner2.State + store adt.Store +} + +type deadline2 struct { + miner2.Deadline + store adt.Store +} + +type partition2 struct { + miner2.Partition + store adt.Store +} + +func (s *state2) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state2) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state2) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state2) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state2) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state2) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state2) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV2SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state2) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state2) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state2) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner2.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner2.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant) + if err != nil { + return err + } + var exp miner2.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state2) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV2SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state2) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt2.AsMap(s.store, s.State.PreCommittedSectors) + if err != nil { + return err + } + + var info miner2.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV2SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner2.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info2 miner2.SectorOnChainInfo + if err := sectors.ForEach(&info2, func(_ int64) error { + info := fromV2SectorOnChainInfo(info2) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos2, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos2)) + for i, info2 := range infos2 { + info := fromV2SectorOnChainInfo(*info2) + infos[i] = &info + } + return infos, nil +} + +func (s *state2) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state2) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state2) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline2{*dl, s.store}, nil +} + +func (s *state2) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner2.Deadline) error { + return cb(i, &deadline2{*dl, s.store}) + }) +} + +func (s *state2) NumDeadlines() (uint64, error) { + return miner2.WPoStPeriodDeadlines, nil +} + +func (s *state2) DeadlinesChanged(other State) (bool, error) { + other2, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other2.Deadlines), nil +} + +func (s *state2) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state2) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: wpp, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + return mi, nil +} + +func (s *state2) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.DeadlineInfo(epoch), nil +} + +func (s *state2) DeadlineCronActive() (bool, error) { + return true, nil // always active in this version +} + +func (s *state2) sectors() (adt.Array, error) { + return adt2.AsArray(s.store, s.Sectors) +} + +func (s *state2) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner2.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV2SectorOnChainInfo(si), nil +} + +func (s *state2) precommits() (adt.Map, error) { + return adt2.AsMap(s.store, s.PreCommittedSectors) +} + +func (s *state2) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner2.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV2SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state2) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner2.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner2.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline2) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition2{*p, d.store}, nil +} + +func (d *deadline2) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner2.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition2{part, d.store}) + }) +} + +func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) { + other2, ok := other.(*deadline2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil +} + +func (d *deadline2) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PostSubmissions, nil +} + +func (d *deadline2) DisputableProofCount() (uint64, error) { + + // field doesn't exist until v3 + return 0, nil + +} + +func (p *partition2) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition2) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition2) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition2) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v2.SectorNumber, + SealProof: v2.SealProof, + SealedCID: v2.SealedCID, + DealIDs: v2.DealIDs, + Activation: v2.Activation, + Expiration: v2.Expiration, + DealWeight: v2.DealWeight, + VerifiedDealWeight: v2.VerifiedDealWeight, + InitialPledge: v2.InitialPledge, + ExpectedDayReward: v2.ExpectedDayReward, + ExpectedStoragePledge: v2.ExpectedStoragePledge, + } + return info +} + +func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v2.Info.SealProof, + SectorNumber: v2.Info.SectorNumber, + SealedCID: v2.Info.SealedCID, + SealRandEpoch: v2.Info.SealRandEpoch, + DealIDs: v2.Info.DealIDs, + Expiration: v2.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v2.PreCommitDeposit, + PreCommitEpoch: v2.PreCommitEpoch, + } +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v3.go b/venus-shared/actors/builtin/miner/state.v3.go new file mode 100644 index 0000000000..b764b65e3c --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v3.go @@ -0,0 +1,565 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = miner3.State{} + return &out, nil +} + +type state3 struct { + miner3.State + store adt.Store +} + +type deadline3 struct { + miner3.Deadline + store adt.Store +} + +type partition3 struct { + miner3.Partition + store adt.Store +} + +func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state3) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state3) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state3) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state3) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state3) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state3) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV3SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state3) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state3) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner3.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner3.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner3.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner3.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV3SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state3) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt3.AsMap(s.store, s.State.PreCommittedSectors, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner3.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV3SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner3.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info3 miner3.SectorOnChainInfo + if err := sectors.ForEach(&info3, func(_ int64) error { + info := fromV3SectorOnChainInfo(info3) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos3, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos3)) + for i, info3 := range infos3 { + info := fromV3SectorOnChainInfo(*info3) + infos[i] = &info + } + return infos, nil +} + +func (s *state3) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state3) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state3) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state3) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline3{*dl, s.store}, nil +} + +func (s *state3) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner3.Deadline) error { + return cb(i, &deadline3{*dl, s.store}) + }) +} + +func (s *state3) NumDeadlines() (uint64, error) { + return miner3.WPoStPeriodDeadlines, nil +} + +func (s *state3) DeadlinesChanged(other State) (bool, error) { + other3, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other3.Deadlines), nil +} + +func (s *state3) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state3) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + return mi, nil +} + +func (s *state3) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.DeadlineInfo(epoch), nil +} + +func (s *state3) DeadlineCronActive() (bool, error) { + return true, nil // always active in this version +} + +func (s *state3) sectors() (adt.Array, error) { + return adt3.AsArray(s.store, s.Sectors, miner3.SectorsAmtBitwidth) +} + +func (s *state3) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner3.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV3SectorOnChainInfo(si), nil +} + +func (s *state3) precommits() (adt.Map, error) { + return adt3.AsMap(s.store, s.PreCommittedSectors, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner3.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV3SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state3) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner3.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner3.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline3) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition3{*p, d.store}, nil +} + +func (d *deadline3) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner3.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition3{part, d.store}) + }) +} + +func (d *deadline3) PartitionsChanged(other Deadline) (bool, error) { + other3, ok := other.(*deadline3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other3.Deadline.Partitions), nil +} + +func (d *deadline3) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline3) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition3) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition3) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition3) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition3) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v3.SectorNumber, + SealProof: v3.SealProof, + SealedCID: v3.SealedCID, + DealIDs: v3.DealIDs, + Activation: v3.Activation, + Expiration: v3.Expiration, + DealWeight: v3.DealWeight, + VerifiedDealWeight: v3.VerifiedDealWeight, + InitialPledge: v3.InitialPledge, + ExpectedDayReward: v3.ExpectedDayReward, + ExpectedStoragePledge: v3.ExpectedStoragePledge, + } + return info +} + +func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v3.Info.SealProof, + SectorNumber: v3.Info.SectorNumber, + SealedCID: v3.Info.SealedCID, + SealRandEpoch: v3.Info.SealRandEpoch, + DealIDs: v3.Info.DealIDs, + Expiration: v3.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v3.PreCommitDeposit, + PreCommitEpoch: v3.PreCommitEpoch, + } +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v4.go b/venus-shared/actors/builtin/miner/state.v4.go new file mode 100644 index 0000000000..0c99ff9b0d --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v4.go @@ -0,0 +1,565 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = miner4.State{} + return &out, nil +} + +type state4 struct { + miner4.State + store adt.Store +} + +type deadline4 struct { + miner4.Deadline + store adt.Store +} + +type partition4 struct { + miner4.Partition + store adt.Store +} + +func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state4) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state4) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state4) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state4) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state4) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state4) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV4SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state4) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state4) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner4.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner4.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner4.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner4.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state4) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV4SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state4) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt4.AsMap(s.store, s.State.PreCommittedSectors, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner4.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV4SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner4.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info4 miner4.SectorOnChainInfo + if err := sectors.ForEach(&info4, func(_ int64) error { + info := fromV4SectorOnChainInfo(info4) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos4, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos4)) + for i, info4 := range infos4 { + info := fromV4SectorOnChainInfo(*info4) + infos[i] = &info + } + return infos, nil +} + +func (s *state4) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state4) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state4) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state4) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline4{*dl, s.store}, nil +} + +func (s *state4) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner4.Deadline) error { + return cb(i, &deadline4{*dl, s.store}) + }) +} + +func (s *state4) NumDeadlines() (uint64, error) { + return miner4.WPoStPeriodDeadlines, nil +} + +func (s *state4) DeadlinesChanged(other State) (bool, error) { + other4, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other4.Deadlines), nil +} + +func (s *state4) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state4) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + return mi, nil +} + +func (s *state4) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state4) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state4) sectors() (adt.Array, error) { + return adt4.AsArray(s.store, s.Sectors, miner4.SectorsAmtBitwidth) +} + +func (s *state4) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner4.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV4SectorOnChainInfo(si), nil +} + +func (s *state4) precommits() (adt.Map, error) { + return adt4.AsMap(s.store, s.PreCommittedSectors, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner4.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV4SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state4) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner4.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner4.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline4) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition4{*p, d.store}, nil +} + +func (d *deadline4) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner4.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition4{part, d.store}) + }) +} + +func (d *deadline4) PartitionsChanged(other Deadline) (bool, error) { + other4, ok := other.(*deadline4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other4.Deadline.Partitions), nil +} + +func (d *deadline4) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline4) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition4) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition4) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition4) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition4) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v4.SectorNumber, + SealProof: v4.SealProof, + SealedCID: v4.SealedCID, + DealIDs: v4.DealIDs, + Activation: v4.Activation, + Expiration: v4.Expiration, + DealWeight: v4.DealWeight, + VerifiedDealWeight: v4.VerifiedDealWeight, + InitialPledge: v4.InitialPledge, + ExpectedDayReward: v4.ExpectedDayReward, + ExpectedStoragePledge: v4.ExpectedStoragePledge, + } + return info +} + +func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v4.Info.SealProof, + SectorNumber: v4.Info.SectorNumber, + SealedCID: v4.Info.SealedCID, + SealRandEpoch: v4.Info.SealRandEpoch, + DealIDs: v4.Info.DealIDs, + Expiration: v4.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v4.PreCommitDeposit, + PreCommitEpoch: v4.PreCommitEpoch, + } +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v5.go b/venus-shared/actors/builtin/miner/state.v5.go new file mode 100644 index 0000000000..bbe73b6609 --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v5.go @@ -0,0 +1,565 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = miner5.State{} + return &out, nil +} + +type state5 struct { + miner5.State + store adt.Store +} + +type deadline5 struct { + miner5.Deadline + store adt.Store +} + +type partition5 struct { + miner5.Partition + store adt.Store +} + +func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state5) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state5) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state5) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state5) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state5) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state5) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV5SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state5) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state5) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner5.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner5.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner5.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner5.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV5SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state5) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt5.AsMap(s.store, s.State.PreCommittedSectors, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner5.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV5SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner5.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info5 miner5.SectorOnChainInfo + if err := sectors.ForEach(&info5, func(_ int64) error { + info := fromV5SectorOnChainInfo(info5) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos5, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos5)) + for i, info5 := range infos5 { + info := fromV5SectorOnChainInfo(*info5) + infos[i] = &info + } + return infos, nil +} + +func (s *state5) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state5) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state5) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline5{*dl, s.store}, nil +} + +func (s *state5) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner5.Deadline) error { + return cb(i, &deadline5{*dl, s.store}) + }) +} + +func (s *state5) NumDeadlines() (uint64, error) { + return miner5.WPoStPeriodDeadlines, nil +} + +func (s *state5) DeadlinesChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other5.Deadlines), nil +} + +func (s *state5) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state5) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + return mi, nil +} + +func (s *state5) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state5) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state5) sectors() (adt.Array, error) { + return adt5.AsArray(s.store, s.Sectors, miner5.SectorsAmtBitwidth) +} + +func (s *state5) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner5.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV5SectorOnChainInfo(si), nil +} + +func (s *state5) precommits() (adt.Map, error) { + return adt5.AsMap(s.store, s.PreCommittedSectors, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner5.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV5SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state5) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner5.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner5.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline5) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition5{*p, d.store}, nil +} + +func (d *deadline5) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner5.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition5{part, d.store}) + }) +} + +func (d *deadline5) PartitionsChanged(other Deadline) (bool, error) { + other5, ok := other.(*deadline5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other5.Deadline.Partitions), nil +} + +func (d *deadline5) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline5) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition5) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition5) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition5) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition5) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v5.SectorNumber, + SealProof: v5.SealProof, + SealedCID: v5.SealedCID, + DealIDs: v5.DealIDs, + Activation: v5.Activation, + Expiration: v5.Expiration, + DealWeight: v5.DealWeight, + VerifiedDealWeight: v5.VerifiedDealWeight, + InitialPledge: v5.InitialPledge, + ExpectedDayReward: v5.ExpectedDayReward, + ExpectedStoragePledge: v5.ExpectedStoragePledge, + } + return info +} + +func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v5.Info.SealProof, + SectorNumber: v5.Info.SectorNumber, + SealedCID: v5.Info.SealedCID, + SealRandEpoch: v5.Info.SealRandEpoch, + DealIDs: v5.Info.DealIDs, + Expiration: v5.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v5.PreCommitDeposit, + PreCommitEpoch: v5.PreCommitEpoch, + } +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v6.go b/venus-shared/actors/builtin/miner/state.v6.go new file mode 100644 index 0000000000..c903e6e2d5 --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v6.go @@ -0,0 +1,565 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" + adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store) (State, error) { + out := state6{store: store} + out.State = miner6.State{} + return &out, nil +} + +type state6 struct { + miner6.State + store adt.Store +} + +type deadline6 struct { + miner6.Deadline + store adt.Store +} + +type partition6 struct { + miner6.Partition + store adt.Store +} + +func (s *state6) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state6) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state6) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state6) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state6) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state6) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state6) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV6SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state6) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state6) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state6) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner6.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner6.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner6.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner6.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state6) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV6SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state6) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt6.AsMap(s.store, s.State.PreCommittedSectors, builtin6.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner6.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV6SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state6) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner6.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info6 miner6.SectorOnChainInfo + if err := sectors.ForEach(&info6, func(_ int64) error { + info := fromV6SectorOnChainInfo(info6) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos6, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos6)) + for i, info6 := range infos6 { + info := fromV6SectorOnChainInfo(*info6) + infos[i] = &info + } + return infos, nil +} + +func (s *state6) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state6) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state6) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state6) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state6) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state6) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline6{*dl, s.store}, nil +} + +func (s *state6) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner6.Deadline) error { + return cb(i, &deadline6{*dl, s.store}) + }) +} + +func (s *state6) NumDeadlines() (uint64, error) { + return miner6.WPoStPeriodDeadlines, nil +} + +func (s *state6) DeadlinesChanged(other State) (bool, error) { + other6, ok := other.(*state6) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other6.Deadlines), nil +} + +func (s *state6) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state6) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state6) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + return mi, nil +} + +func (s *state6) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state6) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state6) sectors() (adt.Array, error) { + return adt6.AsArray(s.store, s.Sectors, miner6.SectorsAmtBitwidth) +} + +func (s *state6) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner6.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV6SectorOnChainInfo(si), nil +} + +func (s *state6) precommits() (adt.Map, error) { + return adt6.AsMap(s.store, s.PreCommittedSectors, builtin6.DefaultHamtBitwidth) +} + +func (s *state6) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner6.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV6SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state6) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner6.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner6.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline6) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition6{*p, d.store}, nil +} + +func (d *deadline6) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner6.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition6{part, d.store}) + }) +} + +func (d *deadline6) PartitionsChanged(other Deadline) (bool, error) { + other6, ok := other.(*deadline6) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other6.Deadline.Partitions), nil +} + +func (d *deadline6) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline6) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition6) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition6) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition6) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition6) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v6.SectorNumber, + SealProof: v6.SealProof, + SealedCID: v6.SealedCID, + DealIDs: v6.DealIDs, + Activation: v6.Activation, + Expiration: v6.Expiration, + DealWeight: v6.DealWeight, + VerifiedDealWeight: v6.VerifiedDealWeight, + InitialPledge: v6.InitialPledge, + ExpectedDayReward: v6.ExpectedDayReward, + ExpectedStoragePledge: v6.ExpectedStoragePledge, + } + return info +} + +func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v6.Info.SealProof, + SectorNumber: v6.Info.SectorNumber, + SealedCID: v6.Info.SealedCID, + SealRandEpoch: v6.Info.SealRandEpoch, + DealIDs: v6.Info.DealIDs, + Expiration: v6.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v6.PreCommitDeposit, + PreCommitEpoch: v6.PreCommitEpoch, + } +} + +func (s *state6) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v7.go b/venus-shared/actors/builtin/miner/state.v7.go new file mode 100644 index 0000000000..0fda2f2dd1 --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v7.go @@ -0,0 +1,566 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = miner7.State{} + return &out, nil +} + +type state7 struct { + miner7.State + store adt.Store +} + +type deadline7 struct { + miner7.Deadline + store adt.Store +} + +type partition7 struct { + miner7.Partition + store adt.Store +} + +func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state7) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state7) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state7) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state7) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state7) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state7) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state7) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state7) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state7) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner7.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner7.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner7.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner7.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state7) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state7) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt7.AsMap(s.store, s.State.PreCommittedSectors, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner7.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV7SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state7) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner7.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info7 miner7.SectorOnChainInfo + if err := sectors.ForEach(&info7, func(_ int64) error { + info := fromV7SectorOnChainInfo(info7) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos7, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos7)) + for i, info7 := range infos7 { + info := fromV7SectorOnChainInfo(*info7) + infos[i] = &info + } + return infos, nil +} + +func (s *state7) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state7) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state7) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state7) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state7) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state7) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline7{*dl, s.store}, nil +} + +func (s *state7) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner7.Deadline) error { + return cb(i, &deadline7{*dl, s.store}) + }) +} + +func (s *state7) NumDeadlines() (uint64, error) { + return miner7.WPoStPeriodDeadlines, nil +} + +func (s *state7) DeadlinesChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other7.Deadlines), nil +} + +func (s *state7) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state7) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + return mi, nil +} + +func (s *state7) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state7) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state7) sectors() (adt.Array, error) { + return adt7.AsArray(s.store, s.Sectors, miner7.SectorsAmtBitwidth) +} + +func (s *state7) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner7.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV7SectorOnChainInfo(si), nil +} + +func (s *state7) precommits() (adt.Map, error) { + return adt7.AsMap(s.store, s.PreCommittedSectors, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner7.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV7SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state7) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner7.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner7.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline7) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition7{*p, d.store}, nil +} + +func (d *deadline7) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner7.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition7{part, d.store}) + }) +} + +func (d *deadline7) PartitionsChanged(other Deadline) (bool, error) { + other7, ok := other.(*deadline7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other7.Deadline.Partitions), nil +} + +func (d *deadline7) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline7) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition7) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition7) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition7) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition7) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV7SectorOnChainInfo(v7 miner7.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v7.SectorNumber, + SealProof: v7.SealProof, + SealedCID: v7.SealedCID, + DealIDs: v7.DealIDs, + Activation: v7.Activation, + Expiration: v7.Expiration, + DealWeight: v7.DealWeight, + VerifiedDealWeight: v7.VerifiedDealWeight, + InitialPledge: v7.InitialPledge, + ExpectedDayReward: v7.ExpectedDayReward, + ExpectedStoragePledge: v7.ExpectedStoragePledge, + + SectorKeyCID: v7.SectorKeyCID, + } + return info +} + +func fromV7SectorPreCommitOnChainInfo(v7 miner7.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v7.Info.SealProof, + SectorNumber: v7.Info.SectorNumber, + SealedCID: v7.Info.SealedCID, + SealRandEpoch: v7.Info.SealRandEpoch, + DealIDs: v7.Info.DealIDs, + Expiration: v7.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v7.PreCommitDeposit, + PreCommitEpoch: v7.PreCommitEpoch, + } +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v8.go b/venus-shared/actors/builtin/miner/state.v8.go new file mode 100644 index 0000000000..da78fb4fdb --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v8.go @@ -0,0 +1,565 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin8 "github.com/filecoin-project/go-state-types/builtin" + miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" + adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store) (State, error) { + out := state8{store: store} + out.State = miner8.State{} + return &out, nil +} + +type state8 struct { + miner8.State + store adt.Store +} + +type deadline8 struct { + miner8.Deadline + store adt.Store +} + +type partition8 struct { + miner8.Partition + store adt.Store +} + +func (s *state8) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state8) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state8) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state8) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state8) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state8) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state8) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV8SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state8) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state8) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner8.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state8) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner8.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner8.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner8.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner8.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner8.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state8) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV8SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state8) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt8.AsMap(s.store, s.State.PreCommittedSectors, builtin8.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner8.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV8SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state8) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner8.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info8 miner8.SectorOnChainInfo + if err := sectors.ForEach(&info8, func(_ int64) error { + info := fromV8SectorOnChainInfo(info8) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos8, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos8)) + for i, info8 := range infos8 { + info := fromV8SectorOnChainInfo(*info8) + infos[i] = &info + } + return infos, nil +} + +func (s *state8) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state8) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state8) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state8) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state8) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state8) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline8{*dl, s.store}, nil +} + +func (s *state8) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner8.Deadline) error { + return cb(i, &deadline8{*dl, s.store}) + }) +} + +func (s *state8) NumDeadlines() (uint64, error) { + return miner8.WPoStPeriodDeadlines, nil +} + +func (s *state8) DeadlinesChanged(other State) (bool, error) { + other8, ok := other.(*state8) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other8.Deadlines), nil +} + +func (s *state8) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state8) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state8) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + return mi, nil +} + +func (s *state8) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state8) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state8) sectors() (adt.Array, error) { + return adt8.AsArray(s.store, s.Sectors, miner8.SectorsAmtBitwidth) +} + +func (s *state8) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner8.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV8SectorOnChainInfo(si), nil +} + +func (s *state8) precommits() (adt.Map, error) { + return adt8.AsMap(s.store, s.PreCommittedSectors, builtin8.DefaultHamtBitwidth) +} + +func (s *state8) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner8.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV8SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state8) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner8.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner8.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline8) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition8{*p, d.store}, nil +} + +func (d *deadline8) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner8.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition8{part, d.store}) + }) +} + +func (d *deadline8) PartitionsChanged(other Deadline) (bool, error) { + other8, ok := other.(*deadline8) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other8.Deadline.Partitions), nil +} + +func (d *deadline8) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline8) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition8) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition8) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition8) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition8) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV8SectorOnChainInfo(v8 miner8.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v8.SectorNumber, + SealProof: v8.SealProof, + SealedCID: v8.SealedCID, + DealIDs: v8.DealIDs, + Activation: v8.Activation, + Expiration: v8.Expiration, + DealWeight: v8.DealWeight, + VerifiedDealWeight: v8.VerifiedDealWeight, + InitialPledge: v8.InitialPledge, + ExpectedDayReward: v8.ExpectedDayReward, + ExpectedStoragePledge: v8.ExpectedStoragePledge, + + SectorKeyCID: v8.SectorKeyCID, + } + return info +} + +func fromV8SectorPreCommitOnChainInfo(v8 miner8.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return minertypes.SectorPreCommitOnChainInfo{ + Info: minertypes.SectorPreCommitInfo{ + SealProof: v8.Info.SealProof, + SectorNumber: v8.Info.SectorNumber, + SealedCID: v8.Info.SealedCID, + SealRandEpoch: v8.Info.SealRandEpoch, + DealIDs: v8.Info.DealIDs, + Expiration: v8.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v8.PreCommitDeposit, + PreCommitEpoch: v8.PreCommitEpoch, + } +} + +func (s *state8) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/state.v9.go b/venus-shared/actors/builtin/miner/state.v9.go new file mode 100644 index 0000000000..53b7147753 --- /dev/null +++ b/venus-shared/actors/builtin/miner/state.v9.go @@ -0,0 +1,557 @@ +// FETCHED FROM LOTUS: builtin/miner/state.go.template + +package miner + +import ( + "bytes" + "errors" + + "fmt" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store) (State, error) { + out := state9{store: store} + out.State = miner9.State{} + return &out, nil +} + +type state9 struct { + miner9.State + store adt.Store +} + +type deadline9 struct { + miner9.Deadline + store adt.Store +} + +type partition9 struct { + miner9.Partition + store adt.Store +} + +func (s *state9) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to get available balance: %v", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state9) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state9) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state9) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state9) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state9) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state9) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV9SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state9) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state9) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner9.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state9) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner9.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner9.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner9.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner9.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner9.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, fmt.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state9) GetPrecommittedSector(num abi.SectorNumber) (*minertypes.SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV9SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state9) ForEachPrecommittedSector(cb func(minertypes.SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt9.AsMap(s.store, s.State.PreCommittedSectors, builtin9.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner9.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV9SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state9) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner9.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info9 miner9.SectorOnChainInfo + if err := sectors.ForEach(&info9, func(_ int64) error { + info := fromV9SectorOnChainInfo(info9) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos9, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos9)) + for i, info9 := range infos9 { + info := fromV9SectorOnChainInfo(*info9) + infos[i] = &info + } + return infos, nil +} + +func (s *state9) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state9) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state9) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state9) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state9) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state9) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline9{*dl, s.store}, nil +} + +func (s *state9) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner9.Deadline) error { + return cb(i, &deadline9{*dl, s.store}) + }) +} + +func (s *state9) NumDeadlines() (uint64, error) { + return miner9.WPoStPeriodDeadlines, nil +} + +func (s *state9) DeadlinesChanged(other State) (bool, error) { + other9, ok := other.(*state9) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other9.Deadlines), nil +} + +func (s *state9) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state9) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state9) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + + Beneficiary: info.Beneficiary, + BeneficiaryTerm: info.BeneficiaryTerm, + PendingBeneficiaryTerm: info.PendingBeneficiaryTerm, + } + + return mi, nil +} + +func (s *state9) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state9) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state9) sectors() (adt.Array, error) { + return adt9.AsArray(s.store, s.Sectors, miner9.SectorsAmtBitwidth) +} + +func (s *state9) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner9.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV9SectorOnChainInfo(si), nil +} + +func (s *state9) precommits() (adt.Map, error) { + return adt9.AsMap(s.store, s.PreCommittedSectors, builtin9.DefaultHamtBitwidth) +} + +func (s *state9) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (minertypes.SectorPreCommitOnChainInfo, error) { + var sp miner9.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return minertypes.SectorPreCommitOnChainInfo{}, err + } + + return fromV9SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state9) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner9.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner9.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline9) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition9{*p, d.store}, nil +} + +func (d *deadline9) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner9.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition9{part, d.store}) + }) +} + +func (d *deadline9) PartitionsChanged(other Deadline) (bool, error) { + other9, ok := other.(*deadline9) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other9.Deadline.Partitions), nil +} + +func (d *deadline9) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline9) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition9) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition9) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition9) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition9) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV9SectorOnChainInfo(v9 miner9.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v9.SectorNumber, + SealProof: v9.SealProof, + SealedCID: v9.SealedCID, + DealIDs: v9.DealIDs, + Activation: v9.Activation, + Expiration: v9.Expiration, + DealWeight: v9.DealWeight, + VerifiedDealWeight: v9.VerifiedDealWeight, + InitialPledge: v9.InitialPledge, + ExpectedDayReward: v9.ExpectedDayReward, + ExpectedStoragePledge: v9.ExpectedStoragePledge, + + SectorKeyCID: v9.SectorKeyCID, + } + return info +} + +func fromV9SectorPreCommitOnChainInfo(v9 miner9.SectorPreCommitOnChainInfo) minertypes.SectorPreCommitOnChainInfo { + return v9 +} + +func (s *state9) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/miner/utils.go b/venus-shared/actors/builtin/miner/utils.go new file mode 100644 index 0000000000..354bffbad2 --- /dev/null +++ b/venus-shared/actors/builtin/miner/utils.go @@ -0,0 +1,90 @@ +// FETCHED FROM LOTUS: builtin/miner/utils.go + +package miner + +import ( + "fmt" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" +) + +func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) { + var parts []bitfield.BitField + + err := mas.ForEachDeadline(func(dlidx uint64, dl Deadline) error { + return dl.ForEachPartition(func(partidx uint64, part Partition) error { + s, err := sget(part) + if err != nil { + return fmt.Errorf("getting sector list (dl: %d, part %d): %w", dlidx, partidx, err) + } + + parts = append(parts, s) + return nil + }) + }) + if err != nil { + return bitfield.BitField{}, err + } + + return bitfield.MultiMerge(parts...) +} + +// SealProofTypeFromSectorSize returns preferred seal proof type for creating +// new miner actors and new sectors +func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) { + switch { + case nv < network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return 0, fmt.Errorf("unsupported sector size for miner: %v", ssize) + } + case nv >= network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return 0, fmt.Errorf("unsupported sector size for miner: %v", ssize) + } + } + + return 0, fmt.Errorf("unsupported network version") +} + +// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating +// new miner actors and new sectors +func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredPoStProof, error) { + switch ssize { + case 2 << 10: + return abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, nil + case 8 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, nil + case 512 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, nil + case 32 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, nil + case 64 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, nil + default: + return 0, fmt.Errorf("unsupported sector size for miner: %v", ssize) + } +} diff --git a/venus-shared/actors/builtin/multisig/actor.go b/venus-shared/actors/builtin/multisig/actor.go new file mode 100644 index 0000000000..fe779be592 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/actor.go @@ -0,0 +1,219 @@ +// FETCHED FROM LOTUS: builtin/multisig/actor.go.template + +package multisig + +import ( + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + + "github.com/minio/blake2b-simd" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + + msig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.MultisigKey { + return nil, fmt.Errorf("actor code is not multisig: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.MultisigActorCodeID: + return load0(store, act.Head) + + case builtin2.MultisigActorCodeID: + return load2(store, act.Head) + + case builtin3.MultisigActorCodeID: + return load3(store, act.Head) + + case builtin4.MultisigActorCodeID: + return load4(store, act.Head) + + case builtin5.MultisigActorCodeID: + return load5(store, act.Head) + + case builtin6.MultisigActorCodeID: + return load6(store, act.Head) + + case builtin7.MultisigActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version2: + return make2(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version3: + return make3(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version4: + return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version5: + return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version6: + return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version7: + return make7(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version8: + return make8(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actorstypes.Version9: + return make9(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error) + StartEpoch() (abi.ChainEpoch, error) + UnlockDuration() (abi.ChainEpoch, error) + InitialBalance() (abi.TokenAmount, error) + Threshold() (uint64, error) + Signers() ([]address.Address, error) + + ForEachPendingTxn(func(id int64, txn Transaction) error) error + PendingTxnChanged(State) (bool, error) + + transactions() (adt.Map, error) + decodeTransaction(val *cbg.Deferred) (Transaction, error) + GetState() interface{} +} + +type Transaction = msig9.Transaction + +var Methods = builtintypes.MethodsMultisig + +func Message(version actorstypes.Version, from address.Address) MessageBuilder { + switch version { + + case actorstypes.Version0: + return message0{from} + + case actorstypes.Version2: + return message2{message0{from}} + + case actorstypes.Version3: + return message3{message0{from}} + + case actorstypes.Version4: + return message4{message0{from}} + + case actorstypes.Version5: + return message5{message0{from}} + + case actorstypes.Version6: + return message6{message0{from}} + + case actorstypes.Version7: + return message7{message0{from}} + + case actorstypes.Version8: + return message8{message0{from}} + + case actorstypes.Version9: + return message9{message0{from}} + default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + // Create a new multisig with the specified parameters. + Create(signers []address.Address, threshold uint64, + vestingStart, vestingDuration abi.ChainEpoch, + initialAmount abi.TokenAmount) (*types.Message, error) + + // Propose a transaction to the given multisig. + Propose(msig, target address.Address, amt abi.TokenAmount, + method abi.MethodNum, params []byte) (*types.Message, error) + + // Approve a multisig transaction. The "hash" is optional. + Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) + + // Cancel a multisig transaction. The "hash" is optional. + Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) +} + +// this type is the same between v0 and v2 +type ProposalHashData = msig9.ProposalHashData +type ProposeReturn = msig9.ProposeReturn +type ProposeParams = msig9.ProposeParams +type ApproveReturn = msig9.ApproveReturn + +func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { + params := msig9.TxnIDParams{ID: msig9.TxnID(id)} + if data != nil { + if data.Requester.Protocol() != address.ID { + return nil, fmt.Errorf("proposer address must be an ID address, was %s", data.Requester) + } + if data.Value.Sign() == -1 { + return nil, fmt.Errorf("proposal value must be non-negative, was %s", data.Value) + } + if data.To == address.Undef { + return nil, fmt.Errorf("proposed destination address must be set") + } + pser, err := data.Serialize() + if err != nil { + return nil, err + } + hash := blake2b.Sum256(pser) + params.ProposalHash = hash[:] + } + + return actors.SerializeParams(¶ms) +} diff --git a/venus-shared/actors/builtin/multisig/actor.go.template b/venus-shared/actors/builtin/multisig/actor.go.template new file mode 100644 index 0000000000..a73b4e55c9 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/actor.go.template @@ -0,0 +1,144 @@ +// FETCHED FROM LOTUS: builtin/multisig/actor.go.template + +package multisig + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "fmt" + + "github.com/minio/blake2b-simd" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + + msig{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin{{import .latestVersion}}multisig" +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.MultisigKey { + return nil, fmt.Errorf("actor code is not multisig: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.MultisigActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store, signers, threshold, startEpoch, unlockDuration, initialBalance) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error) + StartEpoch() (abi.ChainEpoch, error) + UnlockDuration() (abi.ChainEpoch, error) + InitialBalance() (abi.TokenAmount, error) + Threshold() (uint64, error) + Signers() ([]address.Address, error) + + ForEachPendingTxn(func(id int64, txn Transaction) error) error + PendingTxnChanged(State) (bool, error) + + transactions() (adt.Map, error) + decodeTransaction(val *cbg.Deferred) (Transaction, error) + GetState() interface{} +} + +type Transaction = msig{{.latestVersion}}.Transaction + +var Methods = builtintypes.MethodsMultisig + +func Message(version actorstypes.Version, from address.Address) MessageBuilder { + switch version { +{{range .versions}} + case actorstypes.Version{{.}}: + return message{{.}}{{"{"}}{{if (ge . 2)}}message0{from}{{else}}from{{end}}} +{{end}} default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + // Create a new multisig with the specified parameters. + Create(signers []address.Address, threshold uint64, + vestingStart, vestingDuration abi.ChainEpoch, + initialAmount abi.TokenAmount) (*types.Message, error) + + // Propose a transaction to the given multisig. + Propose(msig, target address.Address, amt abi.TokenAmount, + method abi.MethodNum, params []byte) (*types.Message, error) + + // Approve a multisig transaction. The "hash" is optional. + Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) + + // Cancel a multisig transaction. The "hash" is optional. + Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) +} + +// this type is the same between v0 and v2 +type ProposalHashData = msig{{.latestVersion}}.ProposalHashData +type ProposeReturn = msig{{.latestVersion}}.ProposeReturn +type ProposeParams = msig{{.latestVersion}}.ProposeParams +type ApproveReturn = msig{{.latestVersion}}.ApproveReturn + +func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { + params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)} + if data != nil { + if data.Requester.Protocol() != address.ID { + return nil, fmt.Errorf("proposer address must be an ID address, was %s", data.Requester) + } + if data.Value.Sign() == -1 { + return nil, fmt.Errorf("proposal value must be non-negative, was %s", data.Value) + } + if data.To == address.Undef { + return nil, fmt.Errorf("proposed destination address must be set") + } + pser, err := data.Serialize() + if err != nil { + return nil, err + } + hash := blake2b.Sum256(pser) + params.ProposalHash = hash[:] + } + + return actors.SerializeParams(¶ms) +} diff --git a/venus-shared/actors/builtin/multisig/diff.go b/venus-shared/actors/builtin/multisig/diff.go new file mode 100644 index 0000000000..5cd8edd5d9 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/diff.go @@ -0,0 +1,136 @@ +// FETCHED FROM LOTUS: builtin/multisig/diff.go + +package multisig + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" +) + +type PendingTransactionChanges struct { + Added []TransactionChange + Modified []TransactionModification + Removed []TransactionChange +} + +type TransactionChange struct { + TxID int64 + Tx Transaction +} + +type TransactionModification struct { + TxID int64 + From Transaction + To Transaction +} + +func DiffPendingTransactions(pre, cur State) (*PendingTransactionChanges, error) { + results := new(PendingTransactionChanges) + if changed, err := pre.PendingTxnChanged(cur); err != nil { + return nil, err + } else if !changed { // if nothing has changed then return an empty result and bail. + return results, nil + } + + pret, err := pre.transactions() + if err != nil { + return nil, err + } + + curt, err := cur.transactions() + if err != nil { + return nil, err + } + + if err := adt.DiffAdtMap(pret, curt, &transactionDiffer{results, pre, cur}); err != nil { + return nil, err + } + return results, nil +} + +type transactionDiffer struct { + Results *PendingTransactionChanges + pre, after State +} + +func (t *transactionDiffer) AsKey(key string) (abi.Keyer, error) { + txID, err := abi.ParseIntKey(key) + if err != nil { + return nil, err + } + return abi.IntKey(txID), nil +} + +func (t *transactionDiffer) Add(key string, val *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + tx, err := t.after.decodeTransaction(val) + if err != nil { + return err + } + t.Results.Added = append(t.Results.Added, TransactionChange{ + TxID: txID, + Tx: tx, + }) + return nil +} + +func (t *transactionDiffer) Modify(key string, from, to *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + + txFrom, err := t.pre.decodeTransaction(from) + if err != nil { + return err + } + + txTo, err := t.after.decodeTransaction(to) + if err != nil { + return err + } + + if approvalsChanged(txFrom.Approved, txTo.Approved) { + t.Results.Modified = append(t.Results.Modified, TransactionModification{ + TxID: txID, + From: txFrom, + To: txTo, + }) + } + + return nil +} + +func approvalsChanged(from, to []address.Address) bool { + if len(from) != len(to) { + return true + } + for idx := range from { + if from[idx] != to[idx] { + return true + } + } + return false +} + +func (t *transactionDiffer) Remove(key string, val *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + tx, err := t.pre.decodeTransaction(val) + if err != nil { + return err + } + t.Results.Removed = append(t.Results.Removed, TransactionChange{ + TxID: txID, + Tx: tx, + }) + return nil +} diff --git a/venus-shared/actors/builtin/multisig/message.sep.go.template b/venus-shared/actors/builtin/multisig/message.sep.go.template new file mode 100644 index 0000000000..c709234b01 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.sep.go.template @@ -0,0 +1,168 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + {{if (le .v 7)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init" + multisig{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/multisig" + {{else}} + actorstypes "github.com/filecoin-project/go-state-types/actors" + multisig{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}multisig" + init{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin/v{{.latestVersion}}/init" + {{end}} + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message{{.v}} struct{ {{if (ge .v 2)}}message0{{else}}from address.Address{{end}} } + +func (m message{{.v}}) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } +{{if (le .v 1)}} + if unlockStart != 0 { + return nil, fmt.Errorf("actors v0 does not support a non-zero vesting start time") + } +{{end}} + // Set up constructor parameters for multisig + msigParams := &multisig{{.v}}.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration,{{if (ge .v 2)}} + StartEpoch: unlockStart,{{end}} + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + {{if (le .v 7)}} + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init{{.v}}.ExecParams{ + CodeCID: builtin{{.v}}.MultisigActorCodeID, + ConstructorParams: enc, + } + {{else}} + code, ok := actors.GetActorCodeID(actorstypes.Version{{.v}}, actors.MultisigKey) + if !ok { + return nil, fmt.Errorf("failed to get multisig code ID") + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init{{.latestVersion}}.ExecParams{ + CodeCID: code, + ConstructorParams: enc, + } + {{end}} + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} + +{{if (le .v 1)}} + +func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount, + method abi.MethodNum, params []byte) (*types.Message, error) { + + if msig == address.Undef { + return nil, fmt.Errorf("must provide a multisig address for proposal") + } + + if to == address.Undef { + return nil, fmt.Errorf("must provide a target address for proposal") + } + + if amt.Sign() == -1 { + return nil, fmt.Errorf("must provide a non-negative amount for proposed send") + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{ + To: to, + Value: amt, + Method: method, + Params: params, + }) + if actErr != nil { + return nil, fmt.Errorf("failed to serialize parameters: %w", actErr) + } + + return &types.Message{ + To: msig, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin0.MethodsMultisig.Propose, + Params: enc, + }, nil +} + +func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) { + enc, err := txnParams(txID, hashData) + if err != nil { + return nil, err + } + + return &types.Message{ + To: msig, + From: m.from, + Value: types.NewInt(0), + Method: builtin0.MethodsMultisig.Approve, + Params: enc, + }, nil +} + +func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) { + enc, err := txnParams(txID, hashData) + if err != nil { + return nil, err + } + + return &types.Message{ + To: msig, + From: m.from, + Value: types.NewInt(0), + Method: builtin0.MethodsMultisig.Cancel, + Params: enc, + }, nil +} +{{end}} diff --git a/venus-shared/actors/builtin/multisig/message.v0.go b/venus-shared/actors/builtin/multisig/message.v0.go new file mode 100644 index 0000000000..bff3932c93 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v0.go @@ -0,0 +1,145 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message0 struct{ from address.Address } + +func (m message0) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + if unlockStart != 0 { + return nil, fmt.Errorf("actors v0 does not support a non-zero vesting start time") + } + + // Set up constructor parameters for multisig + msigParams := &multisig0.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init0.ExecParams{ + CodeCID: builtin0.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} + +func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount, + method abi.MethodNum, params []byte) (*types.Message, error) { + + if msig == address.Undef { + return nil, fmt.Errorf("must provide a multisig address for proposal") + } + + if to == address.Undef { + return nil, fmt.Errorf("must provide a target address for proposal") + } + + if amt.Sign() == -1 { + return nil, fmt.Errorf("must provide a non-negative amount for proposed send") + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{ + To: to, + Value: amt, + Method: method, + Params: params, + }) + if actErr != nil { + return nil, fmt.Errorf("failed to serialize parameters: %w", actErr) + } + + return &types.Message{ + To: msig, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin0.MethodsMultisig.Propose, + Params: enc, + }, nil +} + +func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) { + enc, err := txnParams(txID, hashData) + if err != nil { + return nil, err + } + + return &types.Message{ + To: msig, + From: m.from, + Value: types.NewInt(0), + Method: builtin0.MethodsMultisig.Approve, + Params: enc, + }, nil +} + +func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) { + enc, err := txnParams(txID, hashData) + if err != nil { + return nil, err + } + + return &types.Message{ + To: msig, + From: m.from, + Value: types.NewInt(0), + Method: builtin0.MethodsMultisig.Cancel, + Params: enc, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v2.go b/venus-shared/actors/builtin/multisig/message.v2.go new file mode 100644 index 0000000000..0bbec6fc15 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v2.go @@ -0,0 +1,74 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message2 struct{ message0 } + +func (m message2) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig2.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init2.ExecParams{ + CodeCID: builtin2.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v3.go b/venus-shared/actors/builtin/multisig/message.v3.go new file mode 100644 index 0000000000..301b7d8cdb --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v3.go @@ -0,0 +1,74 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message3 struct{ message0 } + +func (m message3) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig3.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init3.ExecParams{ + CodeCID: builtin3.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v4.go b/venus-shared/actors/builtin/multisig/message.v4.go new file mode 100644 index 0000000000..f63fc4c021 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v4.go @@ -0,0 +1,74 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init" + multisig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message4 struct{ message0 } + +func (m message4) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig4.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init4.ExecParams{ + CodeCID: builtin4.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v5.go b/venus-shared/actors/builtin/multisig/message.v5.go new file mode 100644 index 0000000000..cc5eedb3e0 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v5.go @@ -0,0 +1,74 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + multisig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message5 struct{ message0 } + +func (m message5) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig5.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init5.ExecParams{ + CodeCID: builtin5.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v6.go b/venus-shared/actors/builtin/multisig/message.v6.go new file mode 100644 index 0000000000..e98c5c0ea7 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v6.go @@ -0,0 +1,74 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + init6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/init" + multisig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message6 struct{ message0 } + +func (m message6) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig6.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init6.ExecParams{ + CodeCID: builtin6.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v7.go b/venus-shared/actors/builtin/multisig/message.v7.go new file mode 100644 index 0000000000..749b54c0b1 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v7.go @@ -0,0 +1,74 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + multisig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message7 struct{ message0 } + +func (m message7) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig7.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init7.ExecParams{ + CodeCID: builtin7.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v8.go b/venus-shared/actors/builtin/multisig/message.v8.go new file mode 100644 index 0000000000..659447be9f --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v8.go @@ -0,0 +1,79 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig" + init9 "github.com/filecoin-project/go-state-types/builtin/v9/init" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message8 struct{ message0 } + +func (m message8) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig8.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + code, ok := actors.GetActorCodeID(actorstypes.Version8, actors.MultisigKey) + if !ok { + return nil, fmt.Errorf("failed to get multisig code ID") + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init9.ExecParams{ + CodeCID: code, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/message.v9.go b/venus-shared/actors/builtin/multisig/message.v9.go new file mode 100644 index 0000000000..35e8873998 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/message.v9.go @@ -0,0 +1,79 @@ +// FETCHED FROM LOTUS: builtin/multisig/message.go.template + +package multisig + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + init9 "github.com/filecoin-project/go-state-types/builtin/v9/init" + multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig" + + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message9 struct{ message0 } + +func (m message9) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, fmt.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, fmt.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig9.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + code, ok := actors.GetActorCodeID(actorstypes.Version9, actors.MultisigKey) + if !ok { + return nil, fmt.Errorf("failed to get multisig code ID") + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init9.ExecParams{ + CodeCID: code, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/venus-shared/actors/builtin/multisig/state.sep.go.template b/venus-shared/actors/builtin/multisig/state.sep.go.template new file mode 100644 index 0000000000..bdd7b86c6f --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.sep.go.template @@ -0,0 +1,135 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + +{{if (le .v 7)}} + {{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + {{end}} + msig{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/multisig" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +{{else}} + msig{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}multisig" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" + builtin{{.v}} "github.com/filecoin-project/go-state-types/builtin" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state{{.v}}{store: store} + out.State = msig{{.v}}.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + {{else}} + em, err := adt{{.v}}.StoreEmptyMap(store, builtin{{.v}}.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + {{end}} + return &out, nil +} + +type state{{.v}} struct { + msig{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state{{.v}}) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state{{.v}}) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state{{.v}}) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state{{.v}}) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state{{.v}}) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state{{.v}}) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt{{.v}}.AsMap(s.store, s.State.PendingTxns{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) + if err != nil { + return err + } + var out msig{{.v}}.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state{{.v}}) PendingTxnChanged(other State) (bool, error) { + other{{.v}}, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other{{.v}}.PendingTxns), nil +} + +func (s *state{{.v}}) transactions() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.PendingTxns{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig{{.v}}.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v0.go b/venus-shared/actors/builtin/multisig/state.v0.go new file mode 100644 index 0000000000..f522309bbb --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v0.go @@ -0,0 +1,119 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state0{store: store} + out.State = msig0.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state0 struct { + msig0.State + store adt.Store +} + +func (s *state0) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state0) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state0) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state0) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state0) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state0) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt0.AsMap(s.store, s.State.PendingTxns) + if err != nil { + return err + } + var out msig0.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state0) PendingTxnChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other0.PendingTxns), nil +} + +func (s *state0) transactions() (adt.Map, error) { + return adt0.AsMap(s.store, s.PendingTxns) +} + +func (s *state0) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig0.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v2.go b/venus-shared/actors/builtin/multisig/state.v2.go new file mode 100644 index 0000000000..8711f5c12e --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v2.go @@ -0,0 +1,119 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state2{store: store} + out.State = msig2.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state2 struct { + msig2.State + store adt.Store +} + +func (s *state2) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state2) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state2) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state2) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state2) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state2) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state2) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt2.AsMap(s.store, s.State.PendingTxns) + if err != nil { + return err + } + var out msig2.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state2) PendingTxnChanged(other State) (bool, error) { + other2, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other2.PendingTxns), nil +} + +func (s *state2) transactions() (adt.Map, error) { + return adt2.AsMap(s.store, s.PendingTxns) +} + +func (s *state2) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig2.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v3.go b/venus-shared/actors/builtin/multisig/state.v3.go new file mode 100644 index 0000000000..7d268a79f4 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v3.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + msig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state3{store: store} + out.State = msig3.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt3.StoreEmptyMap(store, builtin3.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state3 struct { + msig3.State + store adt.Store +} + +func (s *state3) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state3) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state3) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state3) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state3) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state3) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state3) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt3.AsMap(s.store, s.State.PendingTxns, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig3.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state3) PendingTxnChanged(other State) (bool, error) { + other3, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other3.PendingTxns), nil +} + +func (s *state3) transactions() (adt.Map, error) { + return adt3.AsMap(s.store, s.PendingTxns, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig3.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v4.go b/venus-shared/actors/builtin/multisig/state.v4.go new file mode 100644 index 0000000000..ca68431738 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v4.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + msig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state4{store: store} + out.State = msig4.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt4.StoreEmptyMap(store, builtin4.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state4 struct { + msig4.State + store adt.Store +} + +func (s *state4) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state4) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state4) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state4) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state4) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state4) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state4) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt4.AsMap(s.store, s.State.PendingTxns, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig4.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state4) PendingTxnChanged(other State) (bool, error) { + other4, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other4.PendingTxns), nil +} + +func (s *state4) transactions() (adt.Map, error) { + return adt4.AsMap(s.store, s.PendingTxns, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig4.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v5.go b/venus-shared/actors/builtin/multisig/state.v5.go new file mode 100644 index 0000000000..da2ca284ce --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v5.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state5{store: store} + out.State = msig5.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt5.StoreEmptyMap(store, builtin5.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state5 struct { + msig5.State + store adt.Store +} + +func (s *state5) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state5) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state5) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state5) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state5) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state5) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state5) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt5.AsMap(s.store, s.State.PendingTxns, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig5.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state5) PendingTxnChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other5.PendingTxns), nil +} + +func (s *state5) transactions() (adt.Map, error) { + return adt5.AsMap(s.store, s.PendingTxns, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig5.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v6.go b/venus-shared/actors/builtin/multisig/state.v6.go new file mode 100644 index 0000000000..e6e29fc4d0 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v6.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig" + adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state6{store: store} + out.State = msig6.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt6.StoreEmptyMap(store, builtin6.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state6 struct { + msig6.State + store adt.Store +} + +func (s *state6) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state6) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state6) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state6) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state6) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state6) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state6) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt6.AsMap(s.store, s.State.PendingTxns, builtin6.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig6.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state6) PendingTxnChanged(other State) (bool, error) { + other6, ok := other.(*state6) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other6.PendingTxns), nil +} + +func (s *state6) transactions() (adt.Map, error) { + return adt6.AsMap(s.store, s.PendingTxns, builtin6.DefaultHamtBitwidth) +} + +func (s *state6) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig6.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state6) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v7.go b/venus-shared/actors/builtin/multisig/state.v7.go new file mode 100644 index 0000000000..c190bcc159 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v7.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state7{store: store} + out.State = msig7.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt7.StoreEmptyMap(store, builtin7.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state7 struct { + msig7.State + store adt.Store +} + +func (s *state7) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state7) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state7) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state7) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state7) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state7) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state7) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt7.AsMap(s.store, s.State.PendingTxns, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig7.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state7) PendingTxnChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other7.PendingTxns), nil +} + +func (s *state7) transactions() (adt.Map, error) { + return adt7.AsMap(s.store, s.PendingTxns, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig7.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v8.go b/venus-shared/actors/builtin/multisig/state.v8.go new file mode 100644 index 0000000000..2718a22521 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v8.go @@ -0,0 +1,120 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin8 "github.com/filecoin-project/go-state-types/builtin" + msig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig" + adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state8{store: store} + out.State = msig8.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt8.StoreEmptyMap(store, builtin8.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state8 struct { + msig8.State + store adt.Store +} + +func (s *state8) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state8) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state8) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state8) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state8) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state8) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state8) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt8.AsMap(s.store, s.State.PendingTxns, builtin8.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig8.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state8) PendingTxnChanged(other State) (bool, error) { + other8, ok := other.(*state8) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other8.PendingTxns), nil +} + +func (s *state8) transactions() (adt.Map, error) { + return adt8.AsMap(s.store, s.PendingTxns, builtin8.DefaultHamtBitwidth) +} + +func (s *state8) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig8.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state8) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/multisig/state.v9.go b/venus-shared/actors/builtin/multisig/state.v9.go new file mode 100644 index 0000000000..38ddcd4372 --- /dev/null +++ b/venus-shared/actors/builtin/multisig/state.v9.go @@ -0,0 +1,120 @@ +// FETCHED FROM LOTUS: builtin/multisig/state.go.template + +package multisig + +import ( + "bytes" + "encoding/binary" + + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + msig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state9{store: store} + out.State = msig9.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt9.StoreEmptyMap(store, builtin9.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state9 struct { + msig9.State + store adt.Store +} + +func (s *state9) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state9) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state9) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state9) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state9) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state9) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state9) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt9.AsMap(s.store, s.State.PendingTxns, builtin9.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig9.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return fmt.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state9) PendingTxnChanged(other State) (bool, error) { + other9, ok := other.(*state9) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other9.PendingTxns), nil +} + +func (s *state9) transactions() (adt.Map, error) { + return adt9.AsMap(s.store, s.PendingTxns, builtin9.DefaultHamtBitwidth) +} + +func (s *state9) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig9.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state9) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/paych/actor.go b/venus-shared/actors/builtin/paych/actor.go new file mode 100644 index 0000000000..ba2e16ae80 --- /dev/null +++ b/venus-shared/actors/builtin/paych/actor.go @@ -0,0 +1,186 @@ +// FETCHED FROM LOTUS: builtin/paych/actor.go.template + +package paych + +import ( + "encoding/base64" + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + ipldcbor "github.com/ipfs/go-ipld-cbor" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +// Load returns an abstract copy of payment channel state, irregardless of actor version +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.PaychKey { + return nil, fmt.Errorf("actor code is not paych: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.PaymentChannelActorCodeID: + return load0(store, act.Head) + + case builtin2.PaymentChannelActorCodeID: + return load2(store, act.Head) + + case builtin3.PaymentChannelActorCodeID: + return load3(store, act.Head) + + case builtin4.PaymentChannelActorCodeID: + return load4(store, act.Head) + + case builtin5.PaymentChannelActorCodeID: + return load5(store, act.Head) + + case builtin6.PaymentChannelActorCodeID: + return load6(store, act.Head) + + case builtin7.PaymentChannelActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +// State is an abstract version of payment channel state that works across +// versions +type State interface { + cbor.Marshaler + // Channel owner, who has funded the actor + From() (address.Address, error) + // Recipient of payouts from channel + To() (address.Address, error) + + // Height at which the channel can be `Collected` + SettlingAt() (abi.ChainEpoch, error) + + // Amount successfully redeemed through the payment channel, paid out on `Collect()` + ToSend() (abi.TokenAmount, error) + + // Get total number of lanes + LaneCount() (uint64, error) + + // Iterate lane states + ForEachLaneState(cb func(idx uint64, dl LaneState) error) error + + GetState() interface{} +} + +// LaneState is an abstract copy of the state of a single lane +type LaneState interface { + Redeemed() (big.Int, error) + Nonce() (uint64, error) +} + +// DecodeSignedVoucher decodes base64 encoded signed voucher. +func DecodeSignedVoucher(s string) (*paychtypes.SignedVoucher, error) { + data, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + return nil, err + } + + var sv paychtypes.SignedVoucher + if err := ipldcbor.DecodeInto(data, &sv); err != nil { + return nil, err + } + + return &sv, nil +} + +func Message(version actorstypes.Version, from address.Address) MessageBuilder { + switch version { + + case actorstypes.Version0: + return message0{from} + + case actorstypes.Version2: + return message2{from} + + case actorstypes.Version3: + return message3{from} + + case actorstypes.Version4: + return message4{from} + + case actorstypes.Version5: + return message5{from} + + case actorstypes.Version6: + return message6{from} + + case actorstypes.Version7: + return message7{from} + + case actorstypes.Version8: + return message8{from} + + case actorstypes.Version9: + return message9{from} + + default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) + Update(paych address.Address, voucher *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) + Settle(paych address.Address) (*types.Message, error) + Collect(paych address.Address) (*types.Message, error) +} + +func toV0SignedVoucher(sv paychtypes.SignedVoucher) paych0.SignedVoucher { + return paych0.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretPreimage: sv.SecretHash, + Extra: (*paych0.ModVerifyParams)(sv.Extra), + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: nil, + Signature: sv.Signature, + } +} diff --git a/venus-shared/actors/builtin/paych/actor.go.template b/venus-shared/actors/builtin/paych/actor.go.template new file mode 100644 index 0000000000..8d9e98b308 --- /dev/null +++ b/venus-shared/actors/builtin/paych/actor.go.template @@ -0,0 +1,136 @@ +// FETCHED FROM LOTUS: builtin/paych/actor.go.template + +package paych + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "encoding/base64" + "fmt" + + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + ipldcbor "github.com/ipfs/go-ipld-cbor" + + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +// Load returns an abstract copy of payment channel state, irregardless of actor version +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.PaychKey { + return nil, fmt.Errorf("actor code is not paych: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.PaymentChannelActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +// State is an abstract version of payment channel state that works across +// versions +type State interface { + cbor.Marshaler + // Channel owner, who has funded the actor + From() (address.Address, error) + // Recipient of payouts from channel + To() (address.Address, error) + + // Height at which the channel can be `Collected` + SettlingAt() (abi.ChainEpoch, error) + + // Amount successfully redeemed through the payment channel, paid out on `Collect()` + ToSend() (abi.TokenAmount, error) + + // Get total number of lanes + LaneCount() (uint64, error) + + // Iterate lane states + ForEachLaneState(cb func(idx uint64, dl LaneState) error) error + + GetState() interface{} +} + +// LaneState is an abstract copy of the state of a single lane +type LaneState interface { + Redeemed() (big.Int, error) + Nonce() (uint64, error) +} + +// DecodeSignedVoucher decodes base64 encoded signed voucher. +func DecodeSignedVoucher(s string) (*paychtypes.SignedVoucher, error) { + data, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + return nil, err + } + + var sv paychtypes.SignedVoucher + if err := ipldcbor.DecodeInto(data, &sv); err != nil { + return nil, err + } + + return &sv, nil +} + +func Message(version actorstypes.Version, from address.Address) MessageBuilder { + switch version { +{{range .versions}} + case actorstypes.Version{{.}}: + return message{{.}}{from} +{{end}} + default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) + Update(paych address.Address, voucher *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) + Settle(paych address.Address) (*types.Message, error) + Collect(paych address.Address) (*types.Message, error) +} + +func toV0SignedVoucher(sv paychtypes.SignedVoucher) paych0.SignedVoucher { + return paych0.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretPreimage: sv.SecretHash, + Extra: (*paych0.ModVerifyParams)(sv.Extra), + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: nil, + Signature: sv.Signature, + } +} diff --git a/venus-shared/actors/builtin/paych/message.sep.go.template b/venus-shared/actors/builtin/paych/message.sep.go.template new file mode 100644 index 0000000000..62ac6108ff --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.sep.go.template @@ -0,0 +1,130 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + {{if (ge .v 8)}} + "fmt" + {{end}} + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + {{if (le .v 7)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init" + paych{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/paych" + {{else}} + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin{{.v}} "github.com/filecoin-project/go-state-types/builtin" + paych{{.v}} "github.com/filecoin-project/go-state-types/builtin/v{{.v}}/paych" + init{{.v}} "github.com/filecoin-project/go-state-types/builtin/v{{.v}}/init" + {{end}} + + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message{{.v}} struct{ from address.Address } + +func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + {{if (le .v 7)}} + actorCodeID := builtin{{.v}}.PaymentChannelActorCodeID + {{else}} + actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version{{.v}}, "paymentchannel") + if !ok { + return nil, fmt.Errorf("error getting actor paymentchannel code id for actor version %d", {{.v}}) + } + {{end}} + + params, aerr := actors.SerializeParams(&paych{{.v}}.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init{{.v}}.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin{{.v}}.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message{{.v}}) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{ + {{if (le .v 6)}} + Sv: toV0SignedVoucher(*sv), + {{else if (le .v 8)}} + Sv: *sv, + {{else}} + Sv: toV{{.v}}SignedVoucher(*sv), + {{end}} + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin{{.v}}.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +{{if (ge .v 9)}} + func toV{{.v}}SignedVoucher(sv paychtypes.SignedVoucher) paych{{.v}}.SignedVoucher { + merges := make([]paych{{.v}}.Merge, len(sv.Merges)) + for i := range sv.Merges { + merges[i] = paych{{.v}}.Merge{ + Lane: sv.Merges[i].Lane, + Nonce: sv.Merges[i].Nonce, + } + } + + return paych{{.v}}.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretHash: sv.SecretHash, + Extra: (*paych{{.v}}.ModVerifyParams)(sv.Extra), + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: merges, + Signature: sv.Signature, + } + } +{{end}} + +func (m message{{.v}}) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin{{.v}}.MethodsPaych.Settle, + }, nil +} + +func (m message{{.v}}) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin{{.v}}.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v0.go b/venus-shared/actors/builtin/paych/message.v0.go new file mode 100644 index 0000000000..367711d11d --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v0.go @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message0 struct{ from address.Address } + +func (m message0) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID := builtin0.PaymentChannelActorCodeID + + params, aerr := actors.SerializeParams(&paych0.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init0.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin0.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message0) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych0.UpdateChannelStateParams{ + + Sv: toV0SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin0.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message0) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin0.MethodsPaych.Settle, + }, nil +} + +func (m message0) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin0.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v2.go b/venus-shared/actors/builtin/paych/message.v2.go new file mode 100644 index 0000000000..c95974bca9 --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v2.go @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message2 struct{ from address.Address } + +func (m message2) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID := builtin2.PaymentChannelActorCodeID + + params, aerr := actors.SerializeParams(&paych2.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init2.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin2.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message2) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych2.UpdateChannelStateParams{ + + Sv: toV0SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin2.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message2) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin2.MethodsPaych.Settle, + }, nil +} + +func (m message2) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin2.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v3.go b/venus-shared/actors/builtin/paych/message.v3.go new file mode 100644 index 0000000000..e133db4a4f --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v3.go @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message3 struct{ from address.Address } + +func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID := builtin3.PaymentChannelActorCodeID + + params, aerr := actors.SerializeParams(&paych3.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init3.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin3.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message3) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{ + + Sv: toV0SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message3) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.Settle, + }, nil +} + +func (m message3) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v4.go b/venus-shared/actors/builtin/paych/message.v4.go new file mode 100644 index 0000000000..cf442dd476 --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v4.go @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init" + paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message4 struct{ from address.Address } + +func (m message4) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID := builtin4.PaymentChannelActorCodeID + + params, aerr := actors.SerializeParams(&paych4.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init4.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin4.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message4) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych4.UpdateChannelStateParams{ + + Sv: toV0SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin4.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message4) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin4.MethodsPaych.Settle, + }, nil +} + +func (m message4) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin4.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v5.go b/venus-shared/actors/builtin/paych/message.v5.go new file mode 100644 index 0000000000..277c07533e --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v5.go @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message5 struct{ from address.Address } + +func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID := builtin5.PaymentChannelActorCodeID + + params, aerr := actors.SerializeParams(&paych5.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init5.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin5.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message5) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{ + + Sv: toV0SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message5) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.Settle, + }, nil +} + +func (m message5) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v6.go b/venus-shared/actors/builtin/paych/message.v6.go new file mode 100644 index 0000000000..e203a22059 --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v6.go @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + init6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/init" + paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message6 struct{ from address.Address } + +func (m message6) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID := builtin6.PaymentChannelActorCodeID + + params, aerr := actors.SerializeParams(&paych6.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init6.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin6.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message6) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych6.UpdateChannelStateParams{ + + Sv: toV0SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin6.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message6) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin6.MethodsPaych.Settle, + }, nil +} + +func (m message6) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin6.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v7.go b/venus-shared/actors/builtin/paych/message.v7.go new file mode 100644 index 0000000000..210e8001b3 --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v7.go @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message7 struct{ from address.Address } + +func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID := builtin7.PaymentChannelActorCodeID + + params, aerr := actors.SerializeParams(&paych7.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init7.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin7.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message7) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{ + + Sv: *sv, + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message7) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Settle, + }, nil +} + +func (m message7) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v8.go b/venus-shared/actors/builtin/paych/message.v8.go new file mode 100644 index 0000000000..146c989c24 --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v8.go @@ -0,0 +1,89 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin8 "github.com/filecoin-project/go-state-types/builtin" + init8 "github.com/filecoin-project/go-state-types/builtin/v8/init" + paych8 "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message8 struct{ from address.Address } + +func (m message8) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version8, "paymentchannel") + if !ok { + return nil, fmt.Errorf("error getting actor paymentchannel code id for actor version %d", 8) + } + + params, aerr := actors.SerializeParams(&paych8.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init8.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin8.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message8) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych8.UpdateChannelStateParams{ + + Sv: *sv, + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin8.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message8) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin8.MethodsPaych.Settle, + }, nil +} + +func (m message8) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin8.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/message.v9.go b/venus-shared/actors/builtin/paych/message.v9.go new file mode 100644 index 0000000000..a588ea34c8 --- /dev/null +++ b/venus-shared/actors/builtin/paych/message.v9.go @@ -0,0 +1,113 @@ +// FETCHED FROM LOTUS: builtin/paych/message.go.template + +package paych + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin9 "github.com/filecoin-project/go-state-types/builtin" + init9 "github.com/filecoin-project/go-state-types/builtin/v9/init" + paych9 "github.com/filecoin-project/go-state-types/builtin/v9/paych" + + "github.com/filecoin-project/venus/venus-shared/actors" + init_ "github.com/filecoin-project/venus/venus-shared/actors/builtin/init" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +type message9 struct{ from address.Address } + +func (m message9) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version9, "paymentchannel") + if !ok { + return nil, fmt.Errorf("error getting actor paymentchannel code id for actor version %d", 9) + } + + params, aerr := actors.SerializeParams(&paych9.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init9.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin9.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message9) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych9.UpdateChannelStateParams{ + + Sv: toV9SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin9.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func toV9SignedVoucher(sv paychtypes.SignedVoucher) paych9.SignedVoucher { + merges := make([]paych9.Merge, len(sv.Merges)) + for i := range sv.Merges { + merges[i] = paych9.Merge{ + Lane: sv.Merges[i].Lane, + Nonce: sv.Merges[i].Nonce, + } + } + + return paych9.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretHash: sv.SecretHash, + Extra: (*paych9.ModVerifyParams)(sv.Extra), + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: merges, + Signature: sv.Signature, + } +} + +func (m message9) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin9.MethodsPaych.Settle, + }, nil +} + +func (m message9) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin9.MethodsPaych.Collect, + }, nil +} diff --git a/venus-shared/actors/builtin/paych/mock/mock.go b/venus-shared/actors/builtin/paych/mock/mock.go new file mode 100644 index 0000000000..087c34e4a4 --- /dev/null +++ b/venus-shared/actors/builtin/paych/mock/mock.go @@ -0,0 +1,94 @@ +// FETCHED FROM LOTUS: builtin/paych/mock/mock.go + +package mock + +import ( + "io" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" +) + +type mockState struct { + from address.Address + to address.Address + settlingAt abi.ChainEpoch + toSend abi.TokenAmount + lanes map[uint64]paych.LaneState +} + +func (ms *mockState) GetState() interface{} { + panic("implement me") +} + +type mockLaneState struct { + redeemed big.Int + nonce uint64 +} + +// NewMockPayChState constructs a state for a payment channel with the set fixed values +// that satisfies the paych.State interface. +func NewMockPayChState(from address.Address, + to address.Address, + settlingAt abi.ChainEpoch, + lanes map[uint64]paych.LaneState, +) paych.State { + return &mockState{from: from, to: to, settlingAt: settlingAt, toSend: big.NewInt(0), lanes: lanes} +} + +// NewMockLaneState constructs a state for a payment channel lane with the set fixed values +// that satisfies the paych.LaneState interface. Useful for populating lanes when +// calling NewMockPayChState +func NewMockLaneState(redeemed big.Int, nonce uint64) paych.LaneState { + return &mockLaneState{redeemed, nonce} +} + +func (ms *mockState) MarshalCBOR(io.Writer) error { + panic("not implemented") +} + +// Channel owner, who has funded the actor +func (ms *mockState) From() (address.Address, error) { + return ms.from, nil +} + +// Recipient of payouts from channel +func (ms *mockState) To() (address.Address, error) { + return ms.to, nil +} + +// Height at which the channel can be `Collected` +func (ms *mockState) SettlingAt() (abi.ChainEpoch, error) { + return ms.settlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (ms *mockState) ToSend() (abi.TokenAmount, error) { + return ms.toSend, nil +} + +// Get total number of lanes +func (ms *mockState) LaneCount() (uint64, error) { + return uint64(len(ms.lanes)), nil +} + +// Iterate lane states +func (ms *mockState) ForEachLaneState(cb func(idx uint64, dl paych.LaneState) error) error { + var lastErr error + for lane, state := range ms.lanes { + if err := cb(lane, state); err != nil { + lastErr = err + } + } + return lastErr +} + +func (mls *mockLaneState) Redeemed() (big.Int, error) { + return mls.redeemed, nil +} + +func (mls *mockLaneState) Nonce() (uint64, error) { + return mls.nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.sep.go.template b/venus-shared/actors/builtin/paych/state.sep.go.template new file mode 100644 index 0000000000..2419de3b4a --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.sep.go.template @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + +{{if (le .v 7)}} + paych{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/paych" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +{{else}} + paych{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}paych" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = paych{{.v}}.State{} + return &out, nil +} + +type state{{.v}} struct { + paych{{.v}}.State + store adt.Store + lsAmt *adt{{.v}}.Array +} + +// Channel owner, who has funded the actor +func (s *state{{.v}}) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state{{.v}}) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state{{.v}}) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state{{.v}}) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state{{.v}}) getOrLoadLsAmt() (*adt{{.v}}.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt{{.v}}.AsArray(s.store, s.State.LaneStates{{if (ge .v 3)}}, paych{{.v}}.LaneStatesAmtBitwidth{{end}}) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state{{.v}}) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state{{.v}}) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych{{.v}}.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState{{.v}}{ls}) + }) +} + +type laneState{{.v}} struct { + paych{{.v}}.LaneState +} + +func (ls *laneState{{.v}}) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState{{.v}}) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v0.go b/venus-shared/actors/builtin/paych/state.v0.go new file mode 100644 index 0000000000..0ce32fa29f --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v0.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = paych0.State{} + return &out, nil +} + +type state0 struct { + paych0.State + store adt.Store + lsAmt *adt0.Array +} + +// Channel owner, who has funded the actor +func (s *state0) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state0) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state0) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state0) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state0) getOrLoadLsAmt() (*adt0.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt0.AsArray(s.store, s.State.LaneStates) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state0) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state0) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych0.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState0{ls}) + }) +} + +type laneState0 struct { + paych0.LaneState +} + +func (ls *laneState0) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState0) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v2.go b/venus-shared/actors/builtin/paych/state.v2.go new file mode 100644 index 0000000000..5591b1bf24 --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v2.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = paych2.State{} + return &out, nil +} + +type state2 struct { + paych2.State + store adt.Store + lsAmt *adt2.Array +} + +// Channel owner, who has funded the actor +func (s *state2) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state2) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state2) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state2) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state2) getOrLoadLsAmt() (*adt2.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt2.AsArray(s.store, s.State.LaneStates) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state2) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state2) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych2.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState2{ls}) + }) +} + +type laneState2 struct { + paych2.LaneState +} + +func (ls *laneState2) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState2) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v3.go b/venus-shared/actors/builtin/paych/state.v3.go new file mode 100644 index 0000000000..6bf17f1d10 --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v3.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = paych3.State{} + return &out, nil +} + +type state3 struct { + paych3.State + store adt.Store + lsAmt *adt3.Array +} + +// Channel owner, who has funded the actor +func (s *state3) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state3) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state3) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state3) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state3) getOrLoadLsAmt() (*adt3.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt3.AsArray(s.store, s.State.LaneStates, paych3.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state3) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state3) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych3.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState3{ls}) + }) +} + +type laneState3 struct { + paych3.LaneState +} + +func (ls *laneState3) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState3) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v4.go b/venus-shared/actors/builtin/paych/state.v4.go new file mode 100644 index 0000000000..915ca202ef --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v4.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = paych4.State{} + return &out, nil +} + +type state4 struct { + paych4.State + store adt.Store + lsAmt *adt4.Array +} + +// Channel owner, who has funded the actor +func (s *state4) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state4) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state4) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state4) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state4) getOrLoadLsAmt() (*adt4.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt4.AsArray(s.store, s.State.LaneStates, paych4.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state4) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state4) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych4.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState4{ls}) + }) +} + +type laneState4 struct { + paych4.LaneState +} + +func (ls *laneState4) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState4) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v5.go b/venus-shared/actors/builtin/paych/state.v5.go new file mode 100644 index 0000000000..9448fde2da --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v5.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = paych5.State{} + return &out, nil +} + +type state5 struct { + paych5.State + store adt.Store + lsAmt *adt5.Array +} + +// Channel owner, who has funded the actor +func (s *state5) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state5) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state5) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state5) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state5) getOrLoadLsAmt() (*adt5.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt5.AsArray(s.store, s.State.LaneStates, paych5.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state5) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state5) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych5.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState5{ls}) + }) +} + +type laneState5 struct { + paych5.LaneState +} + +func (ls *laneState5) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState5) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v6.go b/venus-shared/actors/builtin/paych/state.v6.go new file mode 100644 index 0000000000..fb9448a3af --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v6.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych" + adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store) (State, error) { + out := state6{store: store} + out.State = paych6.State{} + return &out, nil +} + +type state6 struct { + paych6.State + store adt.Store + lsAmt *adt6.Array +} + +// Channel owner, who has funded the actor +func (s *state6) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state6) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state6) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state6) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state6) getOrLoadLsAmt() (*adt6.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt6.AsArray(s.store, s.State.LaneStates, paych6.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state6) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state6) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state6) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych6.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState6{ls}) + }) +} + +type laneState6 struct { + paych6.LaneState +} + +func (ls *laneState6) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState6) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v7.go b/venus-shared/actors/builtin/paych/state.v7.go new file mode 100644 index 0000000000..eca0c46fb6 --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v7.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = paych7.State{} + return &out, nil +} + +type state7 struct { + paych7.State + store adt.Store + lsAmt *adt7.Array +} + +// Channel owner, who has funded the actor +func (s *state7) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state7) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state7) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state7) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state7) getOrLoadLsAmt() (*adt7.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt7.AsArray(s.store, s.State.LaneStates, paych7.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state7) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state7) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych7.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState7{ls}) + }) +} + +type laneState7 struct { + paych7.LaneState +} + +func (ls *laneState7) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState7) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v8.go b/venus-shared/actors/builtin/paych/state.v8.go new file mode 100644 index 0000000000..9f0151761b --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v8.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych8 "github.com/filecoin-project/go-state-types/builtin/v8/paych" + adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store) (State, error) { + out := state8{store: store} + out.State = paych8.State{} + return &out, nil +} + +type state8 struct { + paych8.State + store adt.Store + lsAmt *adt8.Array +} + +// Channel owner, who has funded the actor +func (s *state8) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state8) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state8) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state8) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state8) getOrLoadLsAmt() (*adt8.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt8.AsArray(s.store, s.State.LaneStates, paych8.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state8) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state8) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state8) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych8.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState8{ls}) + }) +} + +type laneState8 struct { + paych8.LaneState +} + +func (ls *laneState8) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState8) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/paych/state.v9.go b/venus-shared/actors/builtin/paych/state.v9.go new file mode 100644 index 0000000000..b34c7c5022 --- /dev/null +++ b/venus-shared/actors/builtin/paych/state.v9.go @@ -0,0 +1,116 @@ +// FETCHED FROM LOTUS: builtin/paych/state.go.template + +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + paych9 "github.com/filecoin-project/go-state-types/builtin/v9/paych" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store) (State, error) { + out := state9{store: store} + out.State = paych9.State{} + return &out, nil +} + +type state9 struct { + paych9.State + store adt.Store + lsAmt *adt9.Array +} + +// Channel owner, who has funded the actor +func (s *state9) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state9) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state9) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state9) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state9) getOrLoadLsAmt() (*adt9.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt9.AsArray(s.store, s.State.LaneStates, paych9.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state9) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state9) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state9) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych9.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState9{ls}) + }) +} + +type laneState9 struct { + paych9.LaneState +} + +func (ls *laneState9) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState9) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/venus-shared/actors/builtin/power/actor.go b/venus-shared/actors/builtin/power/actor.go new file mode 100644 index 0000000000..6608884d59 --- /dev/null +++ b/venus-shared/actors/builtin/power/actor.go @@ -0,0 +1,164 @@ +// FETCHED FROM LOTUS: builtin/power/actor.go.template + +package power + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/venus-shared/actors" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + types "github.com/filecoin-project/venus/venus-shared/internal" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" +) + +var ( + Address = builtin9.StoragePowerActorAddr + Methods = builtin9.MethodsPower +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.PowerKey { + return nil, fmt.Errorf("actor code is not power: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.StoragePowerActorCodeID: + return load0(store, act.Head) + + case builtin2.StoragePowerActorCodeID: + return load2(store, act.Head) + + case builtin3.StoragePowerActorCodeID: + return load3(store, act.Head) + + case builtin4.StoragePowerActorCodeID: + return load4(store, act.Head) + + case builtin5.StoragePowerActorCodeID: + return load5(store, act.Head) + + case builtin6.StoragePowerActorCodeID: + return load6(store, act.Head) + + case builtin7.StoragePowerActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store) + + case actorstypes.Version2: + return make2(store) + + case actorstypes.Version3: + return make3(store) + + case actorstypes.Version4: + return make4(store) + + case actorstypes.Version5: + return make5(store) + + case actorstypes.Version6: + return make6(store) + + case actorstypes.Version7: + return make7(store) + + case actorstypes.Version8: + return make8(store) + + case actorstypes.Version9: + return make9(store) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + TotalLocked() (abi.TokenAmount, error) + TotalPower() (Claim, error) + TotalCommitted() (Claim, error) + TotalPowerSmoothed() (builtin.FilterEstimate, error) + GetState() interface{} + + // MinerCounts returns the number of miners. Participating is the number + // with power above the minimum miner threshold. + MinerCounts() (participating, total uint64, err error) + MinerPower(address.Address) (Claim, bool, error) + MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error) + ListAllMiners() ([]address.Address, error) + ForEachClaim(func(miner address.Address, claim Claim) error) error + ClaimsChanged(State) (bool, error) + + // Testing or genesis setup only + SetTotalQualityAdjPower(abi.StoragePower) error + SetTotalRawBytePower(abi.StoragePower) error + SetThisEpochQualityAdjPower(abi.StoragePower) error + SetThisEpochRawBytePower(abi.StoragePower) error + + // Diff helpers. Used by Diff* functions internally. + claims() (adt.Map, error) + decodeClaim(*cbg.Deferred) (Claim, error) +} + +type Claim struct { + // Sum of raw byte power for a miner's sectors. + RawBytePower abi.StoragePower + + // Sum of quality adjusted power for a miner's sectors. + QualityAdjPower abi.StoragePower +} + +func AddClaims(a Claim, b Claim) Claim { + return Claim{ + RawBytePower: big.Add(a.RawBytePower, b.RawBytePower), + QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower), + } +} diff --git a/venus-shared/actors/builtin/power/actor.go.template b/venus-shared/actors/builtin/power/actor.go.template new file mode 100644 index 0000000000..6cbbe997ab --- /dev/null +++ b/venus-shared/actors/builtin/power/actor.go.template @@ -0,0 +1,113 @@ +// FETCHED FROM LOTUS: builtin/power/actor.go.template + +package power + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/venus-shared/actors" + cbg "github.com/whyrusleeping/cbor-gen" + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + types "github.com/filecoin-project/venus/venus-shared/internal" + +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" +) + +var ( + Address = builtin{{.latestVersion}}.StoragePowerActorAddr + Methods = builtin{{.latestVersion}}.MethodsPower +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.PowerKey { + return nil, fmt.Errorf("actor code is not power: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.StoragePowerActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + TotalLocked() (abi.TokenAmount, error) + TotalPower() (Claim, error) + TotalCommitted() (Claim, error) + TotalPowerSmoothed() (builtin.FilterEstimate, error) + GetState() interface{} + + // MinerCounts returns the number of miners. Participating is the number + // with power above the minimum miner threshold. + MinerCounts() (participating, total uint64, err error) + MinerPower(address.Address) (Claim, bool, error) + MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error) + ListAllMiners() ([]address.Address, error) + ForEachClaim(func(miner address.Address, claim Claim) error) error + ClaimsChanged(State) (bool, error) + + // Testing or genesis setup only + SetTotalQualityAdjPower(abi.StoragePower) error + SetTotalRawBytePower(abi.StoragePower) error + SetThisEpochQualityAdjPower(abi.StoragePower) error + SetThisEpochRawBytePower(abi.StoragePower) error + + // Diff helpers. Used by Diff* functions internally. + claims() (adt.Map, error) + decodeClaim(*cbg.Deferred) (Claim, error) +} + +type Claim struct { + // Sum of raw byte power for a miner's sectors. + RawBytePower abi.StoragePower + + // Sum of quality adjusted power for a miner's sectors. + QualityAdjPower abi.StoragePower +} + +func AddClaims(a Claim, b Claim) Claim { + return Claim{ + RawBytePower: big.Add(a.RawBytePower, b.RawBytePower), + QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower), + } +} diff --git a/venus-shared/actors/builtin/power/diff.go b/venus-shared/actors/builtin/power/diff.go new file mode 100644 index 0000000000..807bd81d64 --- /dev/null +++ b/venus-shared/actors/builtin/power/diff.go @@ -0,0 +1,120 @@ +// FETCHED FROM LOTUS: builtin/power/diff.go + +package power + +import ( + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" +) + +type ClaimChanges struct { + Added []ClaimInfo + Modified []ClaimModification + Removed []ClaimInfo +} + +type ClaimModification struct { + Miner address.Address + From Claim + To Claim +} + +type ClaimInfo struct { + Miner address.Address + Claim Claim +} + +func DiffClaims(pre, cur State) (*ClaimChanges, error) { + results := new(ClaimChanges) + + prec, err := pre.claims() + if err != nil { + return nil, err + } + + curc, err := cur.claims() + if err != nil { + return nil, err + } + + if err := adt.DiffAdtMap(prec, curc, &claimDiffer{results, pre, cur}); err != nil { + return nil, err + } + + return results, nil +} + +type claimDiffer struct { + Results *ClaimChanges + pre, after State +} + +func (c *claimDiffer) AsKey(key string) (abi.Keyer, error) { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return nil, err + } + return abi.AddrKey(addr), nil +} + +func (c *claimDiffer) Add(key string, val *cbg.Deferred) error { + ci, err := c.after.decodeClaim(val) + if err != nil { + return err + } + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + c.Results.Added = append(c.Results.Added, ClaimInfo{ + Miner: addr, + Claim: ci, + }) + return nil +} + +func (c *claimDiffer) Modify(key string, from, to *cbg.Deferred) error { + ciFrom, err := c.pre.decodeClaim(from) + if err != nil { + return err + } + + ciTo, err := c.after.decodeClaim(to) + if err != nil { + return err + } + + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + + if ciFrom != ciTo { + c.Results.Modified = append(c.Results.Modified, ClaimModification{ + Miner: addr, + From: ciFrom, + To: ciTo, + }) + } + return nil +} + +func (c *claimDiffer) Remove(key string, val *cbg.Deferred) error { + ci, err := c.after.decodeClaim(val) + if err != nil { + return err + } + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + c.Results.Removed = append(c.Results.Removed, ClaimInfo{ + Miner: addr, + Claim: ci, + }) + return nil +} diff --git a/venus-shared/actors/builtin/power/state.sep.go.template b/venus-shared/actors/builtin/power/state.sep.go.template new file mode 100644 index 0000000000..de95355ff1 --- /dev/null +++ b/venus-shared/actors/builtin/power/state.sep.go.template @@ -0,0 +1,209 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + +{{if (le .v 7)}} + {{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + {{end}} + power{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/power" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +{{else}} + builtin{{.v}} "github.com/filecoin-project/go-state-types/builtin" + power{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}power" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt{{.v}}.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power{{.v}}.ConstructState(em, emm) + {{else}} + s, err := power{{.v}}.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + + return &out, nil +} + +type state{{.v}} struct { + power{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state{{.v}}) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state{{.v}}) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state{{.v}}) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power{{.v}}.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state{{.v}}) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state{{.v}}) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate({{if (le .v 1)}}*{{end}}s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state{{.v}}) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state{{.v}}) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state{{.v}}) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power{{.v}}.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state{{.v}}) ClaimsChanged(other State) (bool, error) { + other{{.v}}, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other{{.v}}.State.Claims), nil +} + +func (s *state{{.v}}) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state{{.v}}) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state{{.v}}) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state{{.v}}) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +func (s *state{{.v}}) claims() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.Claims{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power{{.v}}.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV{{.v}}Claim(ci), nil +} + +func fromV{{.v}}Claim(v{{.v}} power{{.v}}.Claim) Claim { + return Claim{ + RawBytePower: v{{.v}}.RawBytePower, + QualityAdjPower: v{{.v}}.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v0.go b/venus-shared/actors/builtin/power/state.v0.go new file mode 100644 index 0000000000..be41a98a4c --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v0.go @@ -0,0 +1,192 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt0.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power0.ConstructState(em, emm) + + return &out, nil +} + +type state0 struct { + power0.State + store adt.Store +} + +func (s *state0) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state0) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state0) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state0) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power0.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state0) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state0) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(*s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state0) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state0) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state0) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power0.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state0) ClaimsChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other0.State.Claims), nil +} + +func (s *state0) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state0) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state0) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state0) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} + +func (s *state0) claims() (adt.Map, error) { + return adt0.AsMap(s.store, s.Claims) +} + +func (s *state0) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power0.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV0Claim(ci), nil +} + +func fromV0Claim(v0 power0.Claim) Claim { + return Claim{ + RawBytePower: v0.RawBytePower, + QualityAdjPower: v0.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v2.go b/venus-shared/actors/builtin/power/state.v2.go new file mode 100644 index 0000000000..0b463e11b7 --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v2.go @@ -0,0 +1,192 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt2.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power2.ConstructState(em, emm) + + return &out, nil +} + +type state2 struct { + power2.State + store adt.Store +} + +func (s *state2) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state2) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state2) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state2) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power2.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state2) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state2) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state2) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state2) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state2) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power2.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state2) ClaimsChanged(other State) (bool, error) { + other2, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other2.State.Claims), nil +} + +func (s *state2) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state2) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state2) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state2) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} + +func (s *state2) claims() (adt.Map, error) { + return adt2.AsMap(s.store, s.Claims) +} + +func (s *state2) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power2.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV2Claim(ci), nil +} + +func fromV2Claim(v2 power2.Claim) Claim { + return Claim{ + RawBytePower: v2.RawBytePower, + QualityAdjPower: v2.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v3.go b/venus-shared/actors/builtin/power/state.v3.go new file mode 100644 index 0000000000..b89714be7c --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v3.go @@ -0,0 +1,189 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + + s, err := power3.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + power3.State + store adt.Store +} + +func (s *state3) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state3) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state3) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state3) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power3.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state3) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state3) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state3) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state3) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state3) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power3.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state3) ClaimsChanged(other State) (bool, error) { + other3, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other3.State.Claims), nil +} + +func (s *state3) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state3) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state3) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state3) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} + +func (s *state3) claims() (adt.Map, error) { + return adt3.AsMap(s.store, s.Claims, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power3.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV3Claim(ci), nil +} + +func fromV3Claim(v3 power3.Claim) Claim { + return Claim{ + RawBytePower: v3.RawBytePower, + QualityAdjPower: v3.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v4.go b/venus-shared/actors/builtin/power/state.v4.go new file mode 100644 index 0000000000..f2a2c83d31 --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v4.go @@ -0,0 +1,189 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + + s, err := power4.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + power4.State + store adt.Store +} + +func (s *state4) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state4) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state4) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state4) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power4.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state4) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state4) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state4) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state4) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state4) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power4.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state4) ClaimsChanged(other State) (bool, error) { + other4, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other4.State.Claims), nil +} + +func (s *state4) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state4) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state4) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state4) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} + +func (s *state4) claims() (adt.Map, error) { + return adt4.AsMap(s.store, s.Claims, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power4.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV4Claim(ci), nil +} + +func fromV4Claim(v4 power4.Claim) Claim { + return Claim{ + RawBytePower: v4.RawBytePower, + QualityAdjPower: v4.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v5.go b/venus-shared/actors/builtin/power/state.v5.go new file mode 100644 index 0000000000..45dd7321ce --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v5.go @@ -0,0 +1,189 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := power5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + power5.State + store adt.Store +} + +func (s *state5) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state5) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state5) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state5) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power5.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state5) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state5) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state5) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state5) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state5) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power5.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state5) ClaimsChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other5.State.Claims), nil +} + +func (s *state5) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state5) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state5) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state5) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +func (s *state5) claims() (adt.Map, error) { + return adt5.AsMap(s.store, s.Claims, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power5.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV5Claim(ci), nil +} + +func fromV5Claim(v5 power5.Claim) Claim { + return Claim{ + RawBytePower: v5.RawBytePower, + QualityAdjPower: v5.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v6.go b/venus-shared/actors/builtin/power/state.v6.go new file mode 100644 index 0000000000..73bd923766 --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v6.go @@ -0,0 +1,189 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store) (State, error) { + out := state6{store: store} + + s, err := power6.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state6 struct { + power6.State + store adt.Store +} + +func (s *state6) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state6) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state6) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state6) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power6.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state6) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state6) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state6) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state6) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state6) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power6.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state6) ClaimsChanged(other State) (bool, error) { + other6, ok := other.(*state6) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other6.State.Claims), nil +} + +func (s *state6) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state6) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state6) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state6) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state6) GetState() interface{} { + return &s.State +} + +func (s *state6) claims() (adt.Map, error) { + return adt6.AsMap(s.store, s.Claims, builtin6.DefaultHamtBitwidth) +} + +func (s *state6) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power6.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV6Claim(ci), nil +} + +func fromV6Claim(v6 power6.Claim) Claim { + return Claim{ + RawBytePower: v6.RawBytePower, + QualityAdjPower: v6.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v7.go b/venus-shared/actors/builtin/power/state.v7.go new file mode 100644 index 0000000000..5047214760 --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v7.go @@ -0,0 +1,189 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + power7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/power" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := power7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + power7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state7) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state7) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state7) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power7.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state7) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state7) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state7) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state7) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state7) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power7.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state7) ClaimsChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other7.State.Claims), nil +} + +func (s *state7) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state7) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state7) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state7) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +func (s *state7) claims() (adt.Map, error) { + return adt7.AsMap(s.store, s.Claims, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power7.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV7Claim(ci), nil +} + +func fromV7Claim(v7 power7.Claim) Claim { + return Claim{ + RawBytePower: v7.RawBytePower, + QualityAdjPower: v7.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v8.go b/venus-shared/actors/builtin/power/state.v8.go new file mode 100644 index 0000000000..9a26ecb8f6 --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v8.go @@ -0,0 +1,188 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + builtin8 "github.com/filecoin-project/go-state-types/builtin" + power8 "github.com/filecoin-project/go-state-types/builtin/v8/power" + adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store) (State, error) { + out := state8{store: store} + + s, err := power8.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state8 struct { + power8.State + store adt.Store +} + +func (s *state8) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state8) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state8) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state8) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power8.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state8) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state8) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state8) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state8) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state8) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power8.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state8) ClaimsChanged(other State) (bool, error) { + other8, ok := other.(*state8) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other8.State.Claims), nil +} + +func (s *state8) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state8) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state8) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state8) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state8) GetState() interface{} { + return &s.State +} + +func (s *state8) claims() (adt.Map, error) { + return adt8.AsMap(s.store, s.Claims, builtin8.DefaultHamtBitwidth) +} + +func (s *state8) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power8.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV8Claim(ci), nil +} + +func fromV8Claim(v8 power8.Claim) Claim { + return Claim{ + RawBytePower: v8.RawBytePower, + QualityAdjPower: v8.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/power/state.v9.go b/venus-shared/actors/builtin/power/state.v9.go new file mode 100644 index 0000000000..0701baf0cb --- /dev/null +++ b/venus-shared/actors/builtin/power/state.v9.go @@ -0,0 +1,188 @@ +// FETCHED FROM LOTUS: builtin/power/state.go.template + +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + power9 "github.com/filecoin-project/go-state-types/builtin/v9/power" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store) (State, error) { + out := state9{store: store} + + s, err := power9.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state9 struct { + power9.State + store adt.Store +} + +func (s *state9) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state9) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state9) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state9) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power9.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state9) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state9) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state9) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state9) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state9) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power9.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state9) ClaimsChanged(other State) (bool, error) { + other9, ok := other.(*state9) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other9.State.Claims), nil +} + +func (s *state9) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state9) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state9) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state9) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state9) GetState() interface{} { + return &s.State +} + +func (s *state9) claims() (adt.Map, error) { + return adt9.AsMap(s.store, s.Claims, builtin9.DefaultHamtBitwidth) +} + +func (s *state9) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power9.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV9Claim(ci), nil +} + +func fromV9Claim(v9 power9.Claim) Claim { + return Claim{ + RawBytePower: v9.RawBytePower, + QualityAdjPower: v9.QualityAdjPower, + } +} diff --git a/venus-shared/actors/builtin/registry.go b/venus-shared/actors/builtin/registry.go new file mode 100644 index 0000000000..e80db3ef39 --- /dev/null +++ b/venus-shared/actors/builtin/registry.go @@ -0,0 +1,267 @@ +// FETCHED FROM LOTUS: builtin/registry.go.template + +package builtin + +import ( + "reflect" + "runtime" + "strings" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/builtin" + + account8 "github.com/filecoin-project/go-state-types/builtin/v8/account" + cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron" + _init8 "github.com/filecoin-project/go-state-types/builtin/v8/init" + market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" + miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" + multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig" + paych8 "github.com/filecoin-project/go-state-types/builtin/v8/paych" + power8 "github.com/filecoin-project/go-state-types/builtin/v8/power" + reward8 "github.com/filecoin-project/go-state-types/builtin/v8/reward" + system8 "github.com/filecoin-project/go-state-types/builtin/v8/system" + verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" + + account9 "github.com/filecoin-project/go-state-types/builtin/v9/account" + cron9 "github.com/filecoin-project/go-state-types/builtin/v9/cron" + _init9 "github.com/filecoin-project/go-state-types/builtin/v9/init" + market9 "github.com/filecoin-project/go-state-types/builtin/v9/market" + miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" + multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig" + paych9 "github.com/filecoin-project/go-state-types/builtin/v9/paych" + power9 "github.com/filecoin-project/go-state-types/builtin/v9/power" + reward9 "github.com/filecoin-project/go-state-types/builtin/v9/reward" + system9 "github.com/filecoin-project/go-state-types/builtin/v9/system" + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + datacap9 "github.com/filecoin-project/go-state-types/builtin/v9/datacap" + + "github.com/filecoin-project/go-state-types/cbor" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/filecoin-project/venus/venus-shared/actors" +) + +type RegistryEntry struct { + state cbor.Er + code cid.Cid + methods map[uint64]builtin.MethodMeta +} + +func (r RegistryEntry) State() cbor.Er { + return r.state +} + +func (r RegistryEntry) Exports() map[uint64]builtin.MethodMeta { + return r.methods +} + +func (r RegistryEntry) Code() cid.Cid { + return r.code +} + +func MakeRegistryLegacy(actors []rtt.VMActor) []RegistryEntry { + registry := make([]RegistryEntry, 0) + + for _, actor := range actors { + methodMap := make(map[uint64]builtin.MethodMeta) + for methodNum, method := range actor.Exports() { + if method != nil { + methodMap[uint64(methodNum)] = makeMethodMeta(method) + } + } + registry = append(registry, RegistryEntry{ + code: actor.Code(), + methods: methodMap, + state: actor.State(), + }) + } + + return registry +} + +func makeMethodMeta(method interface{}) builtin.MethodMeta { + ev := reflect.ValueOf(method) + // Extract the method names using reflection. These + // method names always match the field names in the + // `builtin.Method*` structs (tested in the specs-actors + // tests). + fnName := runtime.FuncForPC(ev.Pointer()).Name() + fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm") + return builtin.MethodMeta{ + Name: fnName, + Method: method, + } +} + +func MakeRegistry(av actorstypes.Version) []RegistryEntry { + if av < actorstypes.Version8 { + panic("expected version v8 and up only, use specs-actors for v0-7") + } + registry := make([]RegistryEntry, 0) + + codeIDs, err := actors.GetActorCodeIDs(av) + if err != nil { + panic(err) + } + + switch av { + + case actorstypes.Version8: + for key, codeID := range codeIDs { + switch key { + case actors.AccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: account8.Methods, + state: new(account8.State), + }) + case actors.CronKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: cron8.Methods, + state: new(cron8.State), + }) + case actors.InitKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: _init8.Methods, + state: new(_init8.State), + }) + case actors.MarketKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: market8.Methods, + state: new(market8.State), + }) + case actors.MinerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: miner8.Methods, + state: new(miner8.State), + }) + case actors.MultisigKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: multisig8.Methods, + state: new(multisig8.State), + }) + case actors.PaychKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: paych8.Methods, + state: new(paych8.State), + }) + case actors.PowerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: power8.Methods, + state: new(power8.State), + }) + case actors.RewardKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: reward8.Methods, + state: new(reward8.State), + }) + case actors.SystemKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: system8.Methods, + state: new(system8.State), + }) + case actors.VerifregKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: verifreg8.Methods, + state: new(verifreg8.State), + }) + + } + } + + case actorstypes.Version9: + for key, codeID := range codeIDs { + switch key { + case actors.AccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: account9.Methods, + state: new(account9.State), + }) + case actors.CronKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: cron9.Methods, + state: new(cron9.State), + }) + case actors.InitKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: _init9.Methods, + state: new(_init9.State), + }) + case actors.MarketKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: market9.Methods, + state: new(market9.State), + }) + case actors.MinerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: miner9.Methods, + state: new(miner9.State), + }) + case actors.MultisigKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: multisig9.Methods, + state: new(multisig9.State), + }) + case actors.PaychKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: paych9.Methods, + state: new(paych9.State), + }) + case actors.PowerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: power9.Methods, + state: new(power9.State), + }) + case actors.RewardKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: reward9.Methods, + state: new(reward9.State), + }) + case actors.SystemKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: system9.Methods, + state: new(system9.State), + }) + case actors.VerifregKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: verifreg9.Methods, + state: new(verifreg9.State), + }) + case actors.DatacapKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: datacap9.Methods, + state: new(datacap9.State), + }) + } + } + + default: + panic("expected version v8 and up only, use specs-actors for v0-7") + } + + return registry +} diff --git a/venus-shared/actors/builtin/registry.go.template b/venus-shared/actors/builtin/registry.go.template new file mode 100644 index 0000000000..d3ef0d2187 --- /dev/null +++ b/venus-shared/actors/builtin/registry.go.template @@ -0,0 +1,187 @@ +// FETCHED FROM LOTUS: builtin/registry.go.template + +package builtin + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/ipfs/go-cid" + "reflect" + "runtime" + "strings" + + "github.com/filecoin-project/go-state-types/builtin" + {{range .versions}} + {{if (ge . 8)}} + account{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/account" + cron{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/cron" + _init{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/init" + multisig{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/multisig" + miner{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/miner" + market{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/market" + reward{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/reward" + paych{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/paych" + power{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/power" + system{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/system" + verifreg{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/verifreg" + {{end}} + {{if (ge . 9)}} + datacap{{.}} "github.com/filecoin-project/go-state-types/builtin/v{{.}}/datacap" + {{end}} + {{end}} + "github.com/filecoin-project/go-state-types/cbor" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/filecoin-project/venus/venus-shared/actors" +) + +type RegistryEntry struct { + state cbor.Er + code cid.Cid + methods map[uint64]builtin.MethodMeta +} + +func (r RegistryEntry) State() cbor.Er { + return r.state +} + +func (r RegistryEntry) Exports() map[uint64]builtin.MethodMeta { + return r.methods +} + +func (r RegistryEntry) Code() cid.Cid { + return r.code +} + +func MakeRegistryLegacy(actors []rtt.VMActor) []RegistryEntry { + registry := make([]RegistryEntry, 0) + + for _, actor := range actors { + methodMap := make(map[uint64]builtin.MethodMeta) + for methodNum, method := range actor.Exports() { + if method != nil { + methodMap[uint64(methodNum)] = makeMethodMeta(method) + } + } + registry = append(registry, RegistryEntry{ + code: actor.Code(), + methods: methodMap, + state: actor.State(), + }) + } + + return registry +} + +func makeMethodMeta(method interface{}) builtin.MethodMeta { + ev := reflect.ValueOf(method) + // Extract the method names using reflection. These + // method names always match the field names in the + // `builtin.Method*` structs (tested in the specs-actors + // tests). + fnName := runtime.FuncForPC(ev.Pointer()).Name() + fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm") + return builtin.MethodMeta{ + Name: fnName, + Method: method, + } +} + +func MakeRegistry(av actorstypes.Version) []RegistryEntry { + if av < actorstypes.Version8 { + panic("expected version v8 and up only, use specs-actors for v0-7") + } + registry := make([]RegistryEntry, 0) + + codeIDs, err := actors.GetActorCodeIDs(av) + if err != nil { + panic(err) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + for key, codeID := range codeIDs { + switch key { + case actors.AccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: account{{.}}.Methods, + state: new(account{{.}}.State), + }) + case actors.CronKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: cron{{.}}.Methods, + state: new(cron{{.}}.State), + }) + case actors.InitKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: _init{{.}}.Methods, + state: new(_init{{.}}.State), + }) + case actors.MarketKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: market{{.}}.Methods, + state: new(market{{.}}.State), + }) + case actors.MinerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: miner{{.}}.Methods, + state: new(miner{{.}}.State), + }) + case actors.MultisigKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: multisig{{.}}.Methods, + state: new(multisig{{.}}.State), + }) + case actors.PaychKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: paych{{.}}.Methods, + state: new(paych{{.}}.State), + }) + case actors.PowerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: power{{.}}.Methods, + state: new(power{{.}}.State), + }) + case actors.RewardKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: reward{{.}}.Methods, + state: new(reward{{.}}.State), + }) + case actors.SystemKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: system{{.}}.Methods, + state: new(system{{.}}.State), + }) + case actors.VerifregKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: verifreg{{.}}.Methods, + state: new(verifreg{{.}}.State), + }) + {{if (ge . 9)}}case actors.DatacapKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: datacap{{.}}.Methods, + state: new(datacap{{.}}.State), + }){{end}} + } + } + {{end}} + {{end}} + + default: + panic("expected version v8 and up only, use specs-actors for v0-7") + } + + return registry +} diff --git a/venus-shared/actors/builtin/reward/actor.go b/venus-shared/actors/builtin/reward/actor.go new file mode 100644 index 0000000000..358c5dbe1a --- /dev/null +++ b/venus-shared/actors/builtin/reward/actor.go @@ -0,0 +1,140 @@ +// FETCHED FROM LOTUS: builtin/reward/actor.go.template + +package reward + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "github.com/filecoin-project/venus/venus-shared/actors" + + "github.com/filecoin-project/go-state-types/cbor" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ( + Address = builtin9.RewardActorAddr + Methods = builtin9.MethodsReward +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.RewardKey { + return nil, fmt.Errorf("actor code is not reward: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.RewardActorCodeID: + return load0(store, act.Head) + + case builtin2.RewardActorCodeID: + return load2(store, act.Head) + + case builtin3.RewardActorCodeID: + return load3(store, act.Head) + + case builtin4.RewardActorCodeID: + return load4(store, act.Head) + + case builtin5.RewardActorCodeID: + return load5(store, act.Head) + + case builtin6.RewardActorCodeID: + return load6(store, act.Head) + + case builtin7.RewardActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, currRealizedPower abi.StoragePower) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store, currRealizedPower) + + case actorstypes.Version2: + return make2(store, currRealizedPower) + + case actorstypes.Version3: + return make3(store, currRealizedPower) + + case actorstypes.Version4: + return make4(store, currRealizedPower) + + case actorstypes.Version5: + return make5(store, currRealizedPower) + + case actorstypes.Version6: + return make6(store, currRealizedPower) + + case actorstypes.Version7: + return make7(store, currRealizedPower) + + case actorstypes.Version8: + return make8(store, currRealizedPower) + + case actorstypes.Version9: + return make9(store, currRealizedPower) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + ThisEpochBaselinePower() (abi.StoragePower, error) + ThisEpochReward() (abi.StoragePower, error) + ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) + + EffectiveBaselinePower() (abi.StoragePower, error) + EffectiveNetworkTime() (abi.ChainEpoch, error) + + TotalStoragePowerReward() (abi.TokenAmount, error) + + CumsumBaseline() (abi.StoragePower, error) + CumsumRealized() (abi.StoragePower, error) + + InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error) + PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error) + GetState() interface{} +} + +type AwardBlockRewardParams = reward0.AwardBlockRewardParams diff --git a/venus-shared/actors/builtin/reward/actor.go.template b/venus-shared/actors/builtin/reward/actor.go.template new file mode 100644 index 0000000000..62c639d51d --- /dev/null +++ b/venus-shared/actors/builtin/reward/actor.go.template @@ -0,0 +1,88 @@ +// FETCHED FROM LOTUS: builtin/reward/actor.go.template + +package reward + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/abi" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "github.com/filecoin-project/venus/venus-shared/actors" + "fmt" + + "github.com/filecoin-project/go-state-types/cbor" +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ( + Address = builtin{{.latestVersion}}.RewardActorAddr + Methods = builtin{{.latestVersion}}.MethodsReward +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.RewardKey { + return nil, fmt.Errorf("actor code is not reward: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.RewardActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, currRealizedPower abi.StoragePower) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store, currRealizedPower) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + ThisEpochBaselinePower() (abi.StoragePower, error) + ThisEpochReward() (abi.StoragePower, error) + ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) + + EffectiveBaselinePower() (abi.StoragePower, error) + EffectiveNetworkTime() (abi.ChainEpoch, error) + + TotalStoragePowerReward() (abi.TokenAmount, error) + + CumsumBaseline() (abi.StoragePower, error) + CumsumRealized() (abi.StoragePower, error) + + InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error) + PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error) + GetState() interface{} +} + +type AwardBlockRewardParams = reward0.AwardBlockRewardParams diff --git a/venus-shared/actors/builtin/reward/state.sep.go.template b/venus-shared/actors/builtin/reward/state.sep.go.template new file mode 100644 index 0000000000..e14f2145f6 --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.sep.go.template @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + +{{if (le .v 7)}} + miner{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/miner" + reward{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/reward" + smoothing{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/smoothing" +{{else}} + smoothing{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/smoothing" + miner{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}miner" + reward{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}reward" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state{{.v}}{store: store} + out.State = *reward{{.v}}.ConstructState(currRealizedPower) + return &out, nil +} + +type state{{.v}} struct { + reward{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state{{.v}}) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { +{{if (ge .v 2)}} + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil +{{else}} + return builtin.FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil +{{end}} +} + +func (s *state{{.v}}) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state{{.v}}) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.{{if (ge .v 2)}}TotalStoragePowerReward{{else}}TotalMined{{end}}, nil +} + +func (s *state{{.v}}) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state{{.v}}) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state{{.v}}) CumsumBaseline() (reward{{.v}}.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state{{.v}}) CumsumRealized() (reward{{.v}}.Spacetime, error) { + return s.State.CumsumRealized, nil +} +{{if (ge .v 2)}} +func (s *state{{.v}}) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner{{.v}}.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing{{.v}}.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} +{{else}} +func (s *state0) InitialPledgeForPower(sectorWeight abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner0.InitialPledgeForPower( + sectorWeight, + s.State.ThisEpochBaselinePower, + networkTotalPledge, + s.State.ThisEpochRewardSmoothed, + &smoothing0.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply), nil +} +{{end}} +func (s *state{{.v}}) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner{{.v}}.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + {{if (le .v 0)}}&{{end}}smoothing{{.v}}.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v0.go b/venus-shared/actors/builtin/reward/state.v0.go new file mode 100644 index 0000000000..79b37d2d47 --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v0.go @@ -0,0 +1,97 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state0{store: store} + out.State = *reward0.ConstructState(currRealizedPower) + return &out, nil +} + +type state0 struct { + reward0.State + store adt.Store +} + +func (s *state0) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state0) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil + +} + +func (s *state0) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state0) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalMined, nil +} + +func (s *state0) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state0) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state0) CumsumBaseline() (reward0.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state0) CumsumRealized() (reward0.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state0) InitialPledgeForPower(sectorWeight abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner0.InitialPledgeForPower( + sectorWeight, + s.State.ThisEpochBaselinePower, + networkTotalPledge, + s.State.ThisEpochRewardSmoothed, + &smoothing0.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply), nil +} + +func (s *state0) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner0.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + &smoothing0.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v2.go b/venus-shared/actors/builtin/reward/state.v2.go new file mode 100644 index 0000000000..9445e2c0d9 --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v2.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + reward2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/reward" + smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state2{store: store} + out.State = *reward2.ConstructState(currRealizedPower) + return &out, nil +} + +type state2 struct { + reward2.State + store adt.Store +} + +func (s *state2) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state2) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state2) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state2) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state2) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state2) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state2) CumsumBaseline() (reward2.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state2) CumsumRealized() (reward2.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state2) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner2.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing2.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state2) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner2.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing2.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v3.go b/venus-shared/actors/builtin/reward/state.v3.go new file mode 100644 index 0000000000..95f3585b58 --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v3.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + reward3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/reward" + smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state3{store: store} + out.State = *reward3.ConstructState(currRealizedPower) + return &out, nil +} + +type state3 struct { + reward3.State + store adt.Store +} + +func (s *state3) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state3) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state3) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state3) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state3) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state3) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state3) CumsumBaseline() (reward3.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state3) CumsumRealized() (reward3.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state3) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner3.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing3.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state3) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner3.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing3.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v4.go b/venus-shared/actors/builtin/reward/state.v4.go new file mode 100644 index 0000000000..05755641f7 --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v4.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" + reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" + smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state4{store: store} + out.State = *reward4.ConstructState(currRealizedPower) + return &out, nil +} + +type state4 struct { + reward4.State + store adt.Store +} + +func (s *state4) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state4) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state4) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state4) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state4) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state4) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state4) CumsumBaseline() (reward4.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state4) CumsumRealized() (reward4.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state4) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner4.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing4.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state4) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner4.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing4.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v5.go b/venus-shared/actors/builtin/reward/state.v5.go new file mode 100644 index 0000000000..0360320aab --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v5.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + reward5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/reward" + smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state5{store: store} + out.State = *reward5.ConstructState(currRealizedPower) + return &out, nil +} + +type state5 struct { + reward5.State + store adt.Store +} + +func (s *state5) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state5) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state5) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state5) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state5) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state5) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state5) CumsumBaseline() (reward5.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state5) CumsumRealized() (reward5.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state5) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner5.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing5.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state5) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner5.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing5.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v6.go b/venus-shared/actors/builtin/reward/state.v6.go new file mode 100644 index 0000000000..799bb9193b --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v6.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" + reward6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/reward" + smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state6{store: store} + out.State = *reward6.ConstructState(currRealizedPower) + return &out, nil +} + +type state6 struct { + reward6.State + store adt.Store +} + +func (s *state6) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state6) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state6) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state6) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state6) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state6) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state6) CumsumBaseline() (reward6.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state6) CumsumRealized() (reward6.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state6) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner6.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing6.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state6) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner6.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing6.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state6) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v7.go b/venus-shared/actors/builtin/reward/state.v7.go new file mode 100644 index 0000000000..72d3cc18ce --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v7.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + reward7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/reward" + smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state7{store: store} + out.State = *reward7.ConstructState(currRealizedPower) + return &out, nil +} + +type state7 struct { + reward7.State + store adt.Store +} + +func (s *state7) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state7) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state7) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state7) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state7) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state7) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state7) CumsumBaseline() (reward7.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state7) CumsumRealized() (reward7.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state7) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner7.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state7) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner7.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v8.go b/venus-shared/actors/builtin/reward/state.v8.go new file mode 100644 index 0000000000..38e5469d3d --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v8.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" + reward8 "github.com/filecoin-project/go-state-types/builtin/v8/reward" + smoothing8 "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state8{store: store} + out.State = *reward8.ConstructState(currRealizedPower) + return &out, nil +} + +type state8 struct { + reward8.State + store adt.Store +} + +func (s *state8) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state8) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state8) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state8) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state8) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state8) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state8) CumsumBaseline() (reward8.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state8) CumsumRealized() (reward8.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state8) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner8.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing8.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state8) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner8.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing8.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state8) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/reward/state.v9.go b/venus-shared/actors/builtin/reward/state.v9.go new file mode 100644 index 0000000000..31358125cb --- /dev/null +++ b/venus-shared/actors/builtin/reward/state.v9.go @@ -0,0 +1,100 @@ +// FETCHED FROM LOTUS: builtin/reward/state.go.template + +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + + miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" + reward9 "github.com/filecoin-project/go-state-types/builtin/v9/reward" + smoothing9 "github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state9{store: store} + out.State = *reward9.ConstructState(currRealizedPower) + return &out, nil +} + +type state9 struct { + reward9.State + store adt.Store +} + +func (s *state9) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state9) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state9) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state9) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state9) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state9) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state9) CumsumBaseline() (reward9.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state9) CumsumRealized() (reward9.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state9) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner9.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing9.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state9) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner9.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing9.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state9) GetState() interface{} { + return &s.State +} diff --git a/venus-shared/actors/builtin/system/actor.go b/venus-shared/actors/builtin/system/actor.go new file mode 100644 index 0000000000..9c2b8c7ccb --- /dev/null +++ b/venus-shared/actors/builtin/system/actor.go @@ -0,0 +1,118 @@ +// FETCHED FROM LOTUS: builtin/system/actor.go.template + +package system + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" + "github.com/ipfs/go-cid" + + "fmt" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" +) + +var ( + Address = builtin9.SystemActorAddr +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.SystemKey { + return nil, fmt.Errorf("actor code is not system: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.SystemActorCodeID: + return load0(store, act.Head) + + case builtin2.SystemActorCodeID: + return load2(store, act.Head) + + case builtin3.SystemActorCodeID: + return load3(store, act.Head) + + case builtin4.SystemActorCodeID: + return load4(store, act.Head) + + case builtin5.SystemActorCodeID: + return load5(store, act.Head) + + case builtin6.SystemActorCodeID: + return load6(store, act.Head) + + case builtin7.SystemActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, builtinActors cid.Cid) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store) + + case actorstypes.Version2: + return make2(store) + + case actorstypes.Version3: + return make3(store) + + case actorstypes.Version4: + return make4(store) + + case actorstypes.Version5: + return make5(store) + + case actorstypes.Version6: + return make6(store) + + case actorstypes.Version7: + return make7(store) + + case actorstypes.Version8: + return make8(store, builtinActors) + + case actorstypes.Version9: + return make9(store, builtinActors) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + GetState() interface{} + GetBuiltinActors() cid.Cid + SetBuiltinActors(cid.Cid) error +} diff --git a/venus-shared/actors/builtin/system/actor.go.template b/venus-shared/actors/builtin/system/actor.go.template new file mode 100644 index 0000000000..d9c0675058 --- /dev/null +++ b/venus-shared/actors/builtin/system/actor.go.template @@ -0,0 +1,68 @@ +// FETCHED FROM LOTUS: builtin/system/actor.go.template + +package system + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors" + types "github.com/filecoin-project/venus/venus-shared/internal" + "github.com/ipfs/go-cid" + + "fmt" + +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" +) + +var ( + Address = builtin{{.latestVersion}}.SystemActorAddr +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.SystemKey { + return nil, fmt.Errorf("actor code is not system: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.SystemActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, builtinActors cid.Cid) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store{{if (ge . 8)}}, builtinActors{{end}}) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + GetState() interface{} + GetBuiltinActors() cid.Cid + SetBuiltinActors(cid.Cid) error +} diff --git a/venus-shared/actors/builtin/system/state.sep.go.template b/venus-shared/actors/builtin/system/state.sep.go.template new file mode 100644 index 0000000000..72e82666c0 --- /dev/null +++ b/venus-shared/actors/builtin/system/state.sep.go.template @@ -0,0 +1,61 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + +{{if (le .v 7)}} + system{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/system" + "fmt" +{{else}} + system{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}system" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store{{if (ge .v 8)}}, builtinActors cid.Cid{{end}}) (State, error) { + out := state{{.v}}{store: store} + out.State = system{{.v}}.State{ + {{if (ge .v 8)}}BuiltinActors: builtinActors,{{end}} + } + return &out, nil +} + +type state{{.v}} struct { + system{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +func (s *state{{.v}}) GetBuiltinActors() cid.Cid { +{{if (le .v 7)}} + return cid.Undef +{{else}} + return s.State.BuiltinActors +{{end}} +} + +func (s *state{{.v}}) SetBuiltinActors(c cid.Cid) error { +{{if (le .v 7)}} + return xerrors.New("cannot set manifest cid before v8") +{{else}} + s.State.BuiltinActors = c + return nil +{{end}} +} diff --git a/venus-shared/actors/builtin/system/state.v0.go b/venus-shared/actors/builtin/system/state.v0.go new file mode 100644 index 0000000000..ff6dc110bc --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v0.go @@ -0,0 +1,50 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system0 "github.com/filecoin-project/specs-actors/actors/builtin/system" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = system0.State{} + return &out, nil +} + +type state0 struct { + system0.State + store adt.Store +} + +func (s *state0) GetState() interface{} { + return &s.State +} + +func (s *state0) GetBuiltinActors() cid.Cid { + + return cid.Undef + +} + +func (s *state0) SetBuiltinActors(c cid.Cid) error { + + return xerrors.New("cannot set manifest cid before v8") + +} diff --git a/venus-shared/actors/builtin/system/state.v2.go b/venus-shared/actors/builtin/system/state.v2.go new file mode 100644 index 0000000000..376195b254 --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v2.go @@ -0,0 +1,50 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/system" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = system2.State{} + return &out, nil +} + +type state2 struct { + system2.State + store adt.Store +} + +func (s *state2) GetState() interface{} { + return &s.State +} + +func (s *state2) GetBuiltinActors() cid.Cid { + + return cid.Undef + +} + +func (s *state2) SetBuiltinActors(c cid.Cid) error { + + return xerrors.New("cannot set manifest cid before v8") + +} diff --git a/venus-shared/actors/builtin/system/state.v3.go b/venus-shared/actors/builtin/system/state.v3.go new file mode 100644 index 0000000000..eeb00d03cb --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v3.go @@ -0,0 +1,50 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/system" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = system3.State{} + return &out, nil +} + +type state3 struct { + system3.State + store adt.Store +} + +func (s *state3) GetState() interface{} { + return &s.State +} + +func (s *state3) GetBuiltinActors() cid.Cid { + + return cid.Undef + +} + +func (s *state3) SetBuiltinActors(c cid.Cid) error { + + return xerrors.New("cannot set manifest cid before v8") + +} diff --git a/venus-shared/actors/builtin/system/state.v4.go b/venus-shared/actors/builtin/system/state.v4.go new file mode 100644 index 0000000000..7498d996a1 --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v4.go @@ -0,0 +1,50 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/system" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = system4.State{} + return &out, nil +} + +type state4 struct { + system4.State + store adt.Store +} + +func (s *state4) GetState() interface{} { + return &s.State +} + +func (s *state4) GetBuiltinActors() cid.Cid { + + return cid.Undef + +} + +func (s *state4) SetBuiltinActors(c cid.Cid) error { + + return xerrors.New("cannot set manifest cid before v8") + +} diff --git a/venus-shared/actors/builtin/system/state.v5.go b/venus-shared/actors/builtin/system/state.v5.go new file mode 100644 index 0000000000..fcf197bf1a --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v5.go @@ -0,0 +1,50 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/system" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = system5.State{} + return &out, nil +} + +type state5 struct { + system5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +func (s *state5) GetBuiltinActors() cid.Cid { + + return cid.Undef + +} + +func (s *state5) SetBuiltinActors(c cid.Cid) error { + + return xerrors.New("cannot set manifest cid before v8") + +} diff --git a/venus-shared/actors/builtin/system/state.v6.go b/venus-shared/actors/builtin/system/state.v6.go new file mode 100644 index 0000000000..dc1e1ca1fc --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v6.go @@ -0,0 +1,50 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/system" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store) (State, error) { + out := state6{store: store} + out.State = system6.State{} + return &out, nil +} + +type state6 struct { + system6.State + store adt.Store +} + +func (s *state6) GetState() interface{} { + return &s.State +} + +func (s *state6) GetBuiltinActors() cid.Cid { + + return cid.Undef + +} + +func (s *state6) SetBuiltinActors(c cid.Cid) error { + + return xerrors.New("cannot set manifest cid before v8") + +} diff --git a/venus-shared/actors/builtin/system/state.v7.go b/venus-shared/actors/builtin/system/state.v7.go new file mode 100644 index 0000000000..c56156d1a7 --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v7.go @@ -0,0 +1,50 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/system" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = system7.State{} + return &out, nil +} + +type state7 struct { + system7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +func (s *state7) GetBuiltinActors() cid.Cid { + + return cid.Undef + +} + +func (s *state7) SetBuiltinActors(c cid.Cid) error { + + return xerrors.New("cannot set manifest cid before v8") + +} diff --git a/venus-shared/actors/builtin/system/state.v8.go b/venus-shared/actors/builtin/system/state.v8.go new file mode 100644 index 0000000000..c1e49f2d8e --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v8.go @@ -0,0 +1,52 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system8 "github.com/filecoin-project/go-state-types/builtin/v8/system" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store, builtinActors cid.Cid) (State, error) { + out := state8{store: store} + out.State = system8.State{ + BuiltinActors: builtinActors, + } + return &out, nil +} + +type state8 struct { + system8.State + store adt.Store +} + +func (s *state8) GetState() interface{} { + return &s.State +} + +func (s *state8) GetBuiltinActors() cid.Cid { + + return s.State.BuiltinActors + +} + +func (s *state8) SetBuiltinActors(c cid.Cid) error { + + s.State.BuiltinActors = c + return nil + +} diff --git a/venus-shared/actors/builtin/system/state.v9.go b/venus-shared/actors/builtin/system/state.v9.go new file mode 100644 index 0000000000..cd38110f1f --- /dev/null +++ b/venus-shared/actors/builtin/system/state.v9.go @@ -0,0 +1,52 @@ +// FETCHED FROM LOTUS: builtin/system/state.go.template + +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + system9 "github.com/filecoin-project/go-state-types/builtin/v9/system" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store, builtinActors cid.Cid) (State, error) { + out := state9{store: store} + out.State = system9.State{ + BuiltinActors: builtinActors, + } + return &out, nil +} + +type state9 struct { + system9.State + store adt.Store +} + +func (s *state9) GetState() interface{} { + return &s.State +} + +func (s *state9) GetBuiltinActors() cid.Cid { + + return s.State.BuiltinActors + +} + +func (s *state9) SetBuiltinActors(c cid.Cid) error { + + s.State.BuiltinActors = c + return nil + +} diff --git a/venus-shared/actors/builtin/verifreg/actor.go b/venus-shared/actors/builtin/verifreg/actor.go new file mode 100644 index 0000000000..e440763d99 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/actor.go @@ -0,0 +1,135 @@ +// FETCHED FROM LOTUS: builtin/verifreg/actor.go.template + +package verifreg + +import ( + "fmt" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-state-types/cbor" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + types "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ( + Address = builtin9.VerifiedRegistryActorAddr + Methods = builtin9.MethodsVerifiedRegistry +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.VerifregKey { + return nil, fmt.Errorf("actor code is not verifreg: %s", name) + } + + switch av { + + case actorstypes.Version8: + return load8(store, act.Head) + + case actorstypes.Version9: + return load9(store, act.Head) + + } + } + + switch act.Code { + + case builtin0.VerifiedRegistryActorCodeID: + return load0(store, act.Head) + + case builtin2.VerifiedRegistryActorCodeID: + return load2(store, act.Head) + + case builtin3.VerifiedRegistryActorCodeID: + return load3(store, act.Head) + + case builtin4.VerifiedRegistryActorCodeID: + return load4(store, act.Head) + + case builtin5.VerifiedRegistryActorCodeID: + return load5(store, act.Head) + + case builtin6.VerifiedRegistryActorCodeID: + return load6(store, act.Head) + + case builtin7.VerifiedRegistryActorCodeID: + return load7(store, act.Head) + + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, rootKeyAddress address.Address) (State, error) { + switch av { + + case actorstypes.Version0: + return make0(store, rootKeyAddress) + + case actorstypes.Version2: + return make2(store, rootKeyAddress) + + case actorstypes.Version3: + return make3(store, rootKeyAddress) + + case actorstypes.Version4: + return make4(store, rootKeyAddress) + + case actorstypes.Version5: + return make5(store, rootKeyAddress) + + case actorstypes.Version6: + return make6(store, rootKeyAddress) + + case actorstypes.Version7: + return make7(store, rootKeyAddress) + + case actorstypes.Version8: + return make8(store, rootKeyAddress) + + case actorstypes.Version9: + return make9(store, rootKeyAddress) + + } + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + RootKey() (address.Address, error) + VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) + VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) + ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error + ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + GetAllocation(clientIdAddr address.Address, allocationId verifregtypes.AllocationId) (*verifregtypes.Allocation, bool, error) + GetAllocations(clientIdAddr address.Address) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) + GetClaim(providerIdAddr address.Address, claimId verifregtypes.ClaimId) (*verifregtypes.Claim, bool, error) + GetClaims(providerIdAddr address.Address) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/verifreg/actor.go.template b/venus-shared/actors/builtin/verifreg/actor.go.template new file mode 100644 index 0000000000..07f3d4f91f --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/actor.go.template @@ -0,0 +1,83 @@ +// FETCHED FROM LOTUS: builtin/verifreg/actor.go.template + +package verifreg + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-state-types/cbor" +{{range .versions}} + {{if (le . 7)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} +{{end}} + builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/filecoin-project/venus/venus-shared/actors" + types "github.com/filecoin-project/venus/venus-shared/internal" + verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var ( + Address = builtin{{.latestVersion}}.VerifiedRegistryActorAddr + Methods = builtin{{.latestVersion}}.MethodsVerifiedRegistry +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { + if name != actors.VerifregKey { + return nil, fmt.Errorf("actor code is not verifreg: %s", name) + } + + switch av { + {{range .versions}} + {{if (ge . 8)}} + case actorstypes.Version{{.}}: + return load{{.}}(store, act.Head) + {{end}} + {{end}} + } + } + + switch act.Code { +{{range .versions}} + {{if (le . 7)}} + case builtin{{.}}.VerifiedRegistryActorCodeID: + return load{{.}}(store, act.Head) + {{end}} +{{end}} + } + + return nil, fmt.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actorstypes.Version, rootKeyAddress address.Address) (State, error) { + switch av { +{{range .versions}} + case actorstypes.Version{{.}}: + return make{{.}}(store, rootKeyAddress) +{{end}} +} + return nil, fmt.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + RootKey() (address.Address, error) + VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) + VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) + ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error + ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + GetAllocation(clientIdAddr address.Address, allocationId verifregtypes.AllocationId) (*verifregtypes.Allocation, bool, error) + GetAllocations(clientIdAddr address.Address) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) + GetClaim(providerIdAddr address.Address, claimId verifregtypes.ClaimId) (*verifregtypes.Claim, bool, error) + GetClaims(providerIdAddr address.Address) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) + GetState() interface{} +} diff --git a/venus-shared/actors/builtin/verifreg/state.sep.go.template b/venus-shared/actors/builtin/verifreg/state.sep.go.template new file mode 100644 index 0000000000..be693ffe75 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.sep.go.template @@ -0,0 +1,151 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "fmt" + +{{if (le .v 7)}} + {{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + {{end}} + verifreg{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/verifreg" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +{{else}} + verifreg{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}verifreg" + adt{{.v}} "github.com/filecoin-project/go-state-types/builtin{{.import}}util/adt" + builtin{{.v}} "github.com/filecoin-project/go-state-types/builtin" +{{end}} +{{if (ge .v 9)}} + "github.com/filecoin-project/go-state-types/big" +{{else}} + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +{{end}} +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg{{.v}}.ConstructState(em, rootKeyAddress) + {{else}} + s, err := verifreg{{.v}}.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + +type state{{.v}} struct { + verifreg{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state{{.v}}) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { +{{if (le .v 8)}} + return getDataCap(s.store, actors.Version{{.v}}, s.verifiedClients, addr) +{{else}} + return false, big.Zero(), fmt.Errorf("unsupported in actors v{{.v}}") +{{end}} +} + +func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr) +} + +func (s *state{{.v}}) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version{{.v}}, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb) +} + +func (s *state{{.v}}) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { +{{if (le .v 8)}} + return forEachCap(s.store, actors.Version{{.v}}, s.verifiedClients, cb) +{{else}} + return fmt.Errorf("unsupported in actors v{{.v}}") +{{end}} +} + +func (s *state{{.v}}) verifiedClients() (adt.Map, error) { +{{if (le .v 8)}} + return adt{{.v}}.AsMap(s.store, s.VerifiedClients{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +{{else}} + return nil, fmt.Errorf("unsupported in actors v{{.v}}") +{{end}} +} + +func (s *state{{.v}}) verifiers() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) removeDataCapProposalIDs() (adt.Map, error) { + {{if le .v 6}}return nil, nil + {{else}}return adt{{.v}}.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin{{.v}}.DefaultHamtBitwidth){{end}} +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +func (s *state{{.v}}) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { +{{if (le .v 8)}} + return nil, false, fmt.Errorf("unsupported in actors v{{.v}}") +{{else}} + return s.FindAllocation(s.store, clientIdAddr, allocationId) +{{end}} +} + +func (s *state{{.v}}) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { +{{if (le .v 8)}} + return nil, fmt.Errorf("unsupported in actors v{{.v}}") +{{else}} + return s.LoadAllocationsToMap(s.store, clientIdAddr) +{{end}} +} + +func (s *state{{.v}}) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { +{{if (le .v 8)}} + return nil, false, fmt.Errorf("unsupported in actors v{{.v}}") +{{else}} + return s.FindClaim(s.store, providerIdAddr, claimId) +{{end}} +} + +func (s *state{{.v}}) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { +{{if (le .v 8)}} + return nil, fmt.Errorf("unsupported in actors v{{.v}}") +{{else}} + return s.LoadClaimsToMap(s.store, providerIdAddr) +{{end}} +} diff --git a/venus-shared/actors/builtin/verifreg/state.v0.go b/venus-shared/actors/builtin/verifreg/state.v0.go new file mode 100644 index 0000000000..60e70f6d18 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v0.go @@ -0,0 +1,119 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state0{store: store} + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg0.ConstructState(em, rootKeyAddress) + + return &out, nil +} + +type state0 struct { + verifreg0.State + store adt.Store +} + +func (s *state0) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version0, s.verifiedClients, addr) + +} + +func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version0, s.verifiers, addr) +} + +func (s *state0) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version0, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version0, s.verifiers, cb) +} + +func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version0, s.verifiedClients, cb) + +} + +func (s *state0) verifiedClients() (adt.Map, error) { + + return adt0.AsMap(s.store, s.VerifiedClients) + +} + +func (s *state0) verifiers() (adt.Map, error) { + return adt0.AsMap(s.store, s.Verifiers) +} + +func (s *state0) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + +func (s *state0) GetState() interface{} { + return &s.State +} + +func (s *state0) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v0") + +} + +func (s *state0) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v0") + +} + +func (s *state0) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v0") + +} + +func (s *state0) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v0") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v2.go b/venus-shared/actors/builtin/verifreg/state.v2.go new file mode 100644 index 0000000000..7dfb847424 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v2.go @@ -0,0 +1,119 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state2{store: store} + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg2.ConstructState(em, rootKeyAddress) + + return &out, nil +} + +type state2 struct { + verifreg2.State + store adt.Store +} + +func (s *state2) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version2, s.verifiedClients, addr) + +} + +func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version2, s.verifiers, addr) +} + +func (s *state2) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version2, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version2, s.verifiers, cb) +} + +func (s *state2) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version2, s.verifiedClients, cb) + +} + +func (s *state2) verifiedClients() (adt.Map, error) { + + return adt2.AsMap(s.store, s.VerifiedClients) + +} + +func (s *state2) verifiers() (adt.Map, error) { + return adt2.AsMap(s.store, s.Verifiers) +} + +func (s *state2) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + +func (s *state2) GetState() interface{} { + return &s.State +} + +func (s *state2) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v2") + +} + +func (s *state2) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v2") + +} + +func (s *state2) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v2") + +} + +func (s *state2) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v2") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v3.go b/venus-shared/actors/builtin/verifreg/state.v3.go new file mode 100644 index 0000000000..9d370428a4 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v3.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state3{store: store} + + s, err := verifreg3.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + verifreg3.State + store adt.Store +} + +func (s *state3) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state3) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version3, s.verifiedClients, addr) + +} + +func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version3, s.verifiers, addr) +} + +func (s *state3) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version3, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version3, s.verifiers, cb) +} + +func (s *state3) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version3, s.verifiedClients, cb) + +} + +func (s *state3) verifiedClients() (adt.Map, error) { + + return adt3.AsMap(s.store, s.VerifiedClients, builtin3.DefaultHamtBitwidth) + +} + +func (s *state3) verifiers() (adt.Map, error) { + return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + +func (s *state3) GetState() interface{} { + return &s.State +} + +func (s *state3) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v3") + +} + +func (s *state3) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v3") + +} + +func (s *state3) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v3") + +} + +func (s *state3) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v3") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v4.go b/venus-shared/actors/builtin/verifreg/state.v4.go new file mode 100644 index 0000000000..04603300a6 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v4.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state4{store: store} + + s, err := verifreg4.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + verifreg4.State + store adt.Store +} + +func (s *state4) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state4) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version4, s.verifiedClients, addr) + +} + +func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version4, s.verifiers, addr) +} + +func (s *state4) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version4, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version4, s.verifiers, cb) +} + +func (s *state4) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version4, s.verifiedClients, cb) + +} + +func (s *state4) verifiedClients() (adt.Map, error) { + + return adt4.AsMap(s.store, s.VerifiedClients, builtin4.DefaultHamtBitwidth) + +} + +func (s *state4) verifiers() (adt.Map, error) { + return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + +func (s *state4) GetState() interface{} { + return &s.State +} + +func (s *state4) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v4") + +} + +func (s *state4) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v4") + +} + +func (s *state4) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v4") + +} + +func (s *state4) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v4") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v5.go b/venus-shared/actors/builtin/verifreg/state.v5.go new file mode 100644 index 0000000000..4dfc1edbc3 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v5.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state5{store: store} + + s, err := verifreg5.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + verifreg5.State + store adt.Store +} + +func (s *state5) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state5) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version5, s.verifiedClients, addr) + +} + +func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version5, s.verifiers, addr) +} + +func (s *state5) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version5, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version5, s.verifiers, cb) +} + +func (s *state5) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version5, s.verifiedClients, cb) + +} + +func (s *state5) verifiedClients() (adt.Map, error) { + + return adt5.AsMap(s.store, s.VerifiedClients, builtin5.DefaultHamtBitwidth) + +} + +func (s *state5) verifiers() (adt.Map, error) { + return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +func (s *state5) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v5") + +} + +func (s *state5) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v5") + +} + +func (s *state5) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v5") + +} + +func (s *state5) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v5") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v6.go b/venus-shared/actors/builtin/verifreg/state.v6.go new file mode 100644 index 0000000000..9fed61b98b --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v6.go @@ -0,0 +1,121 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg" + adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state6)(nil) + +func load6(store adt.Store, root cid.Cid) (State, error) { + out := state6{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make6(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state6{store: store} + + s, err := verifreg6.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state6 struct { + verifreg6.State + store adt.Store +} + +func (s *state6) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state6) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version6, s.verifiedClients, addr) + +} + +func (s *state6) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version6, s.verifiers, addr) +} + +func (s *state6) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version6, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version6, s.verifiers, cb) +} + +func (s *state6) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version6, s.verifiedClients, cb) + +} + +func (s *state6) verifiedClients() (adt.Map, error) { + + return adt6.AsMap(s.store, s.VerifiedClients, builtin6.DefaultHamtBitwidth) + +} + +func (s *state6) verifiers() (adt.Map, error) { + return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth) +} + +func (s *state6) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + +func (s *state6) GetState() interface{} { + return &s.State +} + +func (s *state6) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v6") + +} + +func (s *state6) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v6") + +} + +func (s *state6) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v6") + +} + +func (s *state6) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v6") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v7.go b/venus-shared/actors/builtin/verifreg/state.v7.go new file mode 100644 index 0000000000..7754796872 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v7.go @@ -0,0 +1,120 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state7{store: store} + + s, err := verifreg7.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + verifreg7.State + store adt.Store +} + +func (s *state7) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state7) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version7, s.verifiedClients, addr) + +} + +func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version7, s.verifiers, addr) +} + +func (s *state7) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version7, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version7, s.verifiers, cb) +} + +func (s *state7) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version7, s.verifiedClients, cb) + +} + +func (s *state7) verifiedClients() (adt.Map, error) { + + return adt7.AsMap(s.store, s.VerifiedClients, builtin7.DefaultHamtBitwidth) + +} + +func (s *state7) verifiers() (adt.Map, error) { + return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) removeDataCapProposalIDs() (adt.Map, error) { + return adt7.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +func (s *state7) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v7") + +} + +func (s *state7) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v7") + +} + +func (s *state7) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v7") + +} + +func (s *state7) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v7") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v8.go b/venus-shared/actors/builtin/verifreg/state.v8.go new file mode 100644 index 0000000000..681acfbcc8 --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v8.go @@ -0,0 +1,119 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin8 "github.com/filecoin-project/go-state-types/builtin" + adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" + verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" + + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +var _ State = (*state8)(nil) + +func load8(store adt.Store, root cid.Cid) (State, error) { + out := state8{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make8(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state8{store: store} + + s, err := verifreg8.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state8 struct { + verifreg8.State + store adt.Store +} + +func (s *state8) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state8) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return getDataCap(s.store, actors.Version8, s.verifiedClients, addr) + +} + +func (s *state8) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version8, s.verifiers, addr) +} + +func (s *state8) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version8, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state8) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version8, s.verifiers, cb) +} + +func (s *state8) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return forEachCap(s.store, actors.Version8, s.verifiedClients, cb) + +} + +func (s *state8) verifiedClients() (adt.Map, error) { + + return adt8.AsMap(s.store, s.VerifiedClients, builtin8.DefaultHamtBitwidth) + +} + +func (s *state8) verifiers() (adt.Map, error) { + return adt8.AsMap(s.store, s.Verifiers, builtin8.DefaultHamtBitwidth) +} + +func (s *state8) removeDataCapProposalIDs() (adt.Map, error) { + return adt8.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin8.DefaultHamtBitwidth) +} + +func (s *state8) GetState() interface{} { + return &s.State +} + +func (s *state8) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v8") + +} + +func (s *state8) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return nil, fmt.Errorf("unsupported in actors v8") + +} + +func (s *state8) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return nil, false, fmt.Errorf("unsupported in actors v8") + +} + +func (s *state8) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return nil, fmt.Errorf("unsupported in actors v8") + +} diff --git a/venus-shared/actors/builtin/verifreg/state.v9.go b/venus-shared/actors/builtin/verifreg/state.v9.go new file mode 100644 index 0000000000..7d6061f5db --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/state.v9.go @@ -0,0 +1,119 @@ +// FETCHED FROM LOTUS: builtin/verifreg/state.go.template + +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + "github.com/filecoin-project/go-state-types/big" +) + +var _ State = (*state9)(nil) + +func load9(store adt.Store, root cid.Cid) (State, error) { + out := state9{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make9(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state9{store: store} + + s, err := verifreg9.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state9 struct { + verifreg9.State + store adt.Store +} + +func (s *state9) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state9) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return false, big.Zero(), fmt.Errorf("unsupported in actors v9") + +} + +func (s *state9) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version9, s.verifiers, addr) +} + +func (s *state9) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version9, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state9) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version9, s.verifiers, cb) +} + +func (s *state9) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return fmt.Errorf("unsupported in actors v9") + +} + +func (s *state9) verifiedClients() (adt.Map, error) { + + return nil, fmt.Errorf("unsupported in actors v9") + +} + +func (s *state9) verifiers() (adt.Map, error) { + return adt9.AsMap(s.store, s.Verifiers, builtin9.DefaultHamtBitwidth) +} + +func (s *state9) removeDataCapProposalIDs() (adt.Map, error) { + return adt9.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin9.DefaultHamtBitwidth) +} + +func (s *state9) GetState() interface{} { + return &s.State +} + +func (s *state9) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*verifreg9.Allocation, bool, error) { + + return s.FindAllocation(s.store, clientIdAddr, allocationId) + +} + +func (s *state9) GetAllocations(clientIdAddr address.Address) (map[verifreg9.AllocationId]verifreg9.Allocation, error) { + + return s.LoadAllocationsToMap(s.store, clientIdAddr) + +} + +func (s *state9) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*verifreg9.Claim, bool, error) { + + return s.FindClaim(s.store, providerIdAddr, claimId) + +} + +func (s *state9) GetClaims(providerIdAddr address.Address) (map[verifreg9.ClaimId]verifreg9.Claim, error) { + + return s.LoadClaimsToMap(s.store, providerIdAddr) + +} diff --git a/venus-shared/actors/builtin/verifreg/util.go b/venus-shared/actors/builtin/verifreg/util.go new file mode 100644 index 0000000000..0bb8ad770f --- /dev/null +++ b/venus-shared/actors/builtin/verifreg/util.go @@ -0,0 +1,88 @@ +// FETCHED FROM LOTUS: builtin/verifreg/util.go + +package verifreg + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/adt" +) + +// taking this as a function instead of asking the caller to call it helps reduce some of the error +// checking boilerplate. +// +// "go made me do it" +type rootFunc func() (adt.Map, error) + +// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth +func getDataCap(store adt.Store, ver actors.Version, root rootFunc, addr address.Address) (bool, abi.StoragePower, error) { + if addr.Protocol() != address.ID { + return false, big.Zero(), fmt.Errorf("can only look up ID addresses") + } + vh, err := root() + if err != nil { + return false, big.Zero(), fmt.Errorf("loading verifreg: %w", err) + } + + var keyedAddr abi.Keyer + if ver <= 8 { + keyedAddr = abi.AddrKey(addr) + } else { + keyedAddr = abi.IdAddrKey(addr) + } + + var dcap abi.StoragePower + if found, err := vh.Get(keyedAddr, &dcap); err != nil { + return false, big.Zero(), fmt.Errorf("looking up addr: %w", err) + } else if !found { + return false, big.Zero(), nil + } + + return true, dcap, nil +} + +// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth +func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr address.Address, dcap abi.StoragePower) error) error { + vh, err := root() + if err != nil { + return fmt.Errorf("loading verified clients: %w", err) + } + var dcap abi.StoragePower + return vh.ForEach(&dcap, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, dcap) + }) +} + +func getRemoveDataCapProposalID(store adt.Store, ver actors.Version, root rootFunc, verifier address.Address, client address.Address) (bool, uint64, error) { + if verifier.Protocol() != address.ID { + return false, 0, fmt.Errorf("can only look up ID addresses") + } + if client.Protocol() != address.ID { + return false, 0, fmt.Errorf("can only look up ID addresses") + } + vh, err := root() + if err != nil { + return false, 0, fmt.Errorf("loading verifreg: %w", err) + } + if vh == nil { + return false, 0, fmt.Errorf("remove data cap proposal hamt not found. you are probably using an incompatible version of actors") + } + + var id verifreg.RmDcProposalID + if found, err := vh.Get(abi.NewAddrPairKey(verifier, client), &id); err != nil { + return false, 0, fmt.Errorf("looking up addr pair: %w", err) + } else if !found { + return false, 0, nil + } + + return true, id.ProposalID, nil +} diff --git a/venus-shared/actors/builtin_actors.go b/venus-shared/actors/builtin_actors.go new file mode 100644 index 0000000000..6d3fad3d0c --- /dev/null +++ b/venus-shared/actors/builtin_actors.go @@ -0,0 +1,300 @@ +package actors + +import ( + "archive/tar" + "context" + "embed" + "fmt" + "io" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/DataDog/zstd" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" + + "github.com/filecoin-project/venus/venus-shared/actors/adt" + blockstoreutil "github.com/filecoin-project/venus/venus-shared/blockstore" +) + +//go:embed builtin-actors-code/*.tar.zst +var embeddedBuiltinActorReleases embed.FS + +// NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical. +var BundleOverrides map[actorstypes.Version]string + +var NetworkBundle = "mainnet" + +func init() { + if BundleOverrides == nil { + BundleOverrides = make(map[actorstypes.Version]string) + } + + for _, av := range Versions { + path := os.Getenv(fmt.Sprintf("VENUS_BUILTIN_ACTORS_V%d_BUNDLE", av)) + if path == "" { + continue + } + BundleOverrides[actorstypes.Version(av)] = path + } + if err := loadManifests(NetworkBundle); err != nil { + panic(err) + } +} + +// NetworkMainnet NetworkType = 0x1 +// Network2k NetworkType = 0x2 +// NetworkCalibnet NetworkType = 0x4 +// NetworkInterop NetworkType = 0x6 +// NetworkForce NetworkType = 0x7 +// NetworkButterfly NetworkType = 0x8 +// Avoid import cycle, we use concrete values +func SetNetworkBundle(networkType int) error { + networkBundle := "" + switch networkType { + // case types.Network2k: + case 0x2: + networkBundle = "devnet" + // types.NetworkForce + case 0x7: + networkBundle = "testing" + // case types.NetworkButterfly: + case 0x8: + networkBundle = "butterflynet" + // case types.NetworkInterop: + case 0x6: + networkBundle = "caterpillarnet" + // case types.NetworkCalibnet: + case 0x4: + networkBundle = "calibrationnet" + default: + networkBundle = "mainnet" + } + + return UseNetworkBundle(networkBundle) +} + +// UseNetworkBundle switches to a different network bundle, by name. +func UseNetworkBundle(netw string) error { + if NetworkBundle == netw { + return nil + } + if err := loadManifests(netw); err != nil { + return err + } + NetworkBundle = netw + return nil +} + +func loadManifests(netw string) error { + overridden := make(map[actorstypes.Version]struct{}) + var newMetadata []*BuiltinActorsMetadata + // First, prefer overrides. + for av, path := range BundleOverrides { + root, actorCids, err := readBundleManifestFromFile(path) + if err != nil { + return err + } + newMetadata = append(newMetadata, &BuiltinActorsMetadata{ + Network: netw, + Version: av, + ManifestCid: root, + Actors: actorCids, + }) + overridden[av] = struct{}{} + } + + // Then load embedded bundle metadata. + for _, meta := range EmbeddedBuiltinActorsMetadata { + if meta.Network != netw { + continue + } + if _, ok := overridden[meta.Version]; ok { + continue + } + newMetadata = append(newMetadata, meta) + } + + ClearManifests() + + for _, meta := range newMetadata { + RegisterManifest(meta.Version, meta.ManifestCid, meta.Actors) + } + + return nil +} + +type BuiltinActorsMetadata struct { // nolint + Network string + Version actorstypes.Version + ManifestCid cid.Cid + Actors map[string]cid.Cid +} + +// ReadEmbeddedBuiltinActorsMetadata reads the metadata from the embedded built-in actor bundles. +// There should be no need to call this method as the result is cached in the +// `EmbeddedBuiltinActorsMetadata` variable on `make gen`. +func ReadEmbeddedBuiltinActorsMetadata() ([]*BuiltinActorsMetadata, error) { + files, err := embeddedBuiltinActorReleases.ReadDir("builtin-actors-code") + if err != nil { + return nil, fmt.Errorf("failed to read embedded bundle directory: %s", err) + } + var bundles []*BuiltinActorsMetadata + for _, dirent := range files { + name := dirent.Name() + b, err := readEmbeddedBuiltinActorsMetadata(name) + if err != nil { + return nil, err + } + bundles = append(bundles, b...) + } + // Sort by network, then by bundle. + sort.Slice(bundles, func(i, j int) bool { + if bundles[i].Network == bundles[j].Network { + return bundles[i].Version < bundles[j].Version + } + return bundles[i].Network < bundles[j].Network + }) + return bundles, nil +} + +func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata, error) { + const ( + archiveExt = ".tar.zst" + bundleExt = ".car" + bundlePrefix = "builtin-actors-" + ) + + if !strings.HasPrefix(bundle, "v") { + return nil, fmt.Errorf("bundle bundle '%q' doesn't start with a 'v'", bundle) + } + if !strings.HasSuffix(bundle, archiveExt) { + return nil, fmt.Errorf("bundle bundle '%q' doesn't end with '%s'", bundle, archiveExt) + } + version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0) + if err != nil { + return nil, fmt.Errorf("failed to parse actors version from bundle '%q': %s", bundle, err) + } + fi, err := embeddedBuiltinActorReleases.Open(fmt.Sprintf("builtin-actors-code/%s", bundle)) + if err != nil { + return nil, err + } + defer fi.Close() //nolint + + uncompressed := zstd.NewReader(fi) + defer uncompressed.Close() //nolint + + var bundles []*BuiltinActorsMetadata + + tarReader := tar.NewReader(uncompressed) + for { + header, err := tarReader.Next() + switch err { + case io.EOF: + return bundles, nil + case nil: + default: + return nil, err + } + + // Read the network name from the bundle name. + name := path.Base(header.Name) + if !strings.HasSuffix(name, bundleExt) { + return nil, fmt.Errorf("expected bundle to end with .car: %s", name) + } + if !strings.HasPrefix(name, bundlePrefix) { + return nil, fmt.Errorf("expected bundle to end with .car: %s", name) + } + name = name[len(bundlePrefix) : len(name)-len(bundleExt)] + + // Load the bundle. + root, actorCids, err := readBundleManifest(tarReader) + if err != nil { + return nil, fmt.Errorf("error loading builtin actors bundle: %w", err) + } + bundles = append(bundles, &BuiltinActorsMetadata{ + Network: name, + Version: actorstypes.Version(version), + ManifestCid: root, + Actors: actorCids, + }) + } +} + +func readBundleManifestFromFile(path string) (cid.Cid, map[string]cid.Cid, error) { + fi, err := os.Open(path) + if err != nil { + return cid.Undef, nil, err + } + defer fi.Close() //nolint + + return readBundleManifest(fi) +} + +func readBundleManifest(r io.Reader) (cid.Cid, map[string]cid.Cid, error) { + // Load the bundle. + bs := blockstoreutil.NewMemory() + hdr, err := car.LoadCar(context.Background(), bs, r) + if err != nil { + return cid.Undef, nil, fmt.Errorf("error loading builtin actors bundle: %w", err) + } + + if len(hdr.Roots) != 1 { + return cid.Undef, nil, fmt.Errorf("expected one root when loading actors bundle, got %d", len(hdr.Roots)) + } + root := hdr.Roots[0] + actorCids, err := ReadManifest(context.Background(), adt.WrapStore(context.Background(), cbor.NewCborStore(bs)), root) + if err != nil { + return cid.Undef, nil, err + } + + // Make sure we have all the + for name, c := range actorCids { + if has, err := bs.Has(context.Background(), c); err != nil { + return cid.Undef, nil, fmt.Errorf("got an error when checking that the bundle has the actor %q: %w", name, err) + } else if !has { + return cid.Undef, nil, fmt.Errorf("actor %q missing from bundle", name) + } + } + + return root, actorCids, nil +} + +// GetEmbeddedBuiltinActorsBundle returns the builtin-actors bundle for the given actors version. +func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version) ([]byte, bool) { + fi, err := embeddedBuiltinActorReleases.Open(fmt.Sprintf("builtin-actors-code/v%d.tar.zst", version)) + if err != nil { + return nil, false + } + defer fi.Close() //nolint + + uncompressed := zstd.NewReader(fi) + defer uncompressed.Close() //nolint + + tarReader := tar.NewReader(uncompressed) + targetFileName := fmt.Sprintf("builtin-actors-%s.car", NetworkBundle) + for { + header, err := tarReader.Next() + switch err { + case io.EOF: + return nil, false + case nil: + default: + panic(err) + } + if header.Name != targetFileName { + continue + } + + car, err := io.ReadAll(tarReader) + if err != nil { + panic(err) + } + return car, true + } +} diff --git a/venus-shared/actors/builtin_actors_bundle.go b/venus-shared/actors/builtin_actors_bundle.go new file mode 100644 index 0000000000..8fae79273b --- /dev/null +++ b/venus-shared/actors/builtin_actors_bundle.go @@ -0,0 +1,82 @@ +package actors + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/venus/venus-shared/blockstore" + cid "github.com/ipfs/go-cid" + "github.com/ipld/go-car" +) + +func LoadBundleFromFile(ctx context.Context, bs blockstore.Blockstore, path string) (cid.Cid, error) { + f, err := os.Open(path) + if err != nil { + return cid.Undef, fmt.Errorf("error opening bundle %q for builtin-actors: %w", path, err) + } + defer f.Close() //nolint + + return LoadBundle(ctx, bs, f) +} + +func LoadBundle(ctx context.Context, bs blockstore.Blockstore, r io.Reader) (cid.Cid, error) { + hdr, err := car.LoadCar(ctx, bs, r) + if err != nil { + return cid.Undef, fmt.Errorf("error loading builtin actors bundle: %w", err) + } + + if len(hdr.Roots) != 1 { + return cid.Undef, fmt.Errorf("expected one root when loading actors bundle, got %d", len(hdr.Roots)) + } + return hdr.Roots[0], nil +} + +// LoadBundles loads the bundles for the specified actor versions into the passed blockstore, if and +// only if the bundle's manifest is not already present in the blockstore. +func LoadBundles(ctx context.Context, bs blockstore.Blockstore, versions ...actorstypes.Version) error { + for _, av := range versions { + // No bundles before version 8. + if av < actorstypes.Version8 { + continue + } + + manifestCid, ok := GetManifest(av) + if !ok { + // All manifests are registered on start, so this must succeed. + return fmt.Errorf("unknown actor version v%d", av) + } + + if haveManifest, err := bs.Has(ctx, manifestCid); err != nil { + return fmt.Errorf("blockstore error when loading manifest %s: %w", manifestCid, err) + } else if haveManifest { + // We already have the manifest, and therefore everything under it. + continue + } + + var ( + root cid.Cid + err error + ) + if path, ok := BundleOverrides[av]; ok { + root, err = LoadBundleFromFile(ctx, bs, path) + } else if embedded, ok := GetEmbeddedBuiltinActorsBundle(av); ok { + root, err = LoadBundle(ctx, bs, bytes.NewReader(embedded)) + } else { + err = fmt.Errorf("bundle for actors version v%d not found", av) + } + + if err != nil { + return err + } + + if root != manifestCid { + return fmt.Errorf("expected manifest for actors version %d does not match actual: %s != %s", av, manifestCid, root) + } + } + + return nil +} diff --git a/venus-shared/actors/builtin_actors_gen.go b/venus-shared/actors/builtin_actors_gen.go new file mode 100644 index 0000000000..f003cc616e --- /dev/null +++ b/venus-shared/actors/builtin_actors_gen.go @@ -0,0 +1,262 @@ +// WARNING: This file has automatically been generated +package actors + +import ( + "github.com/ipfs/go-cid" +) + +var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{{ + Network: "butterflynet", + Version: 8, + ManifestCid: mustParseCid("bafy2bzacedvaarfyh6q3bk4dyzux46ednlace2ckxp5nbyn6mb3da2apqn6sk"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzaceavzeu4gqte7o33vr4htiaapiwpfq6p26tgdkqla2baqhmiqswfso"), + "cron": mustParseCid("bafk2bzacech35onpqxep4yox36k7sr4mj4bch54s3i4b3yyaustrbo5xwfbfs"), + "init": mustParseCid("bafk2bzaceahxin3sf5f6ude5j6we4yeqlg66s5qe4tu7lwp26jcg7yp2ns6hi"), + "multisig": mustParseCid("bafk2bzacectfmzjtniypgl4whm42sws5aupihqgfikwsr7p5yoq3bmqaogldi"), + "paymentchannel": mustParseCid("bafk2bzacecbwu54ce5mjgp2pqxyj6kpn2vlgiu5wv2lj2byjiegxnn3infd5i"), + "reward": mustParseCid("bafk2bzacecskkbhe6c4ud5jt62wg4w7j7shj6xdwoyic74s5y6pgywxxvnw72"), + "storagemarket": mustParseCid("bafk2bzacebycxcwwm7hwhuhpasaskil2kxaqb7tins7azdvvm72rorlciuysi"), + "storageminer": mustParseCid("bafk2bzacecgx3etor5m6lahpmjdwqnryutqe6naiurfhgsju72rd4nqssutbg"), + "storagepower": mustParseCid("bafk2bzaceayvy6xyp5cwtngm457c5hssvihidppgq3o7gy3dlmhgor3yzujoc"), + "system": mustParseCid("bafk2bzacec6xctjxybp7r3kkhase56o6jsaiua7ure5ttu2xfuojt4jhlsoa6"), + "verifiedregistry": mustParseCid("bafk2bzacec2hcqlqcfacylfcrhhliwkisvh4y3adwt47xkf2gdvodwu6ccepc"), + }, +}, { + Network: "butterflynet", + Version: 9, + ManifestCid: mustParseCid("bafy2bzacec35by4erhcdgcsgzp7yb3j57utydlxxfc73m3k5pep67ehvvyv6i"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzaceajsdln7v4chxqoukiw7lxw6aexg5qdsaex2hgelz2sbu24iblhzg"), + "cron": mustParseCid("bafk2bzacecgrwmgnqhybn3l23uvwf2n2vrcfjrprfzgd44uxers2pgr5mhsue"), + "datacap": mustParseCid("bafk2bzacebyier2ceh27acbrq2ccv4efvzotl6qntnlrxdsrik6i4tembz6qw"), + "init": mustParseCid("bafk2bzaceberhto43wnf4pklkd4c7d36kzslngyzyms4op7shxuswv3dtvfxu"), + "multisig": mustParseCid("bafk2bzaceaclpbrhoqdruvsuqqgknvy2k5dywzmjoehk4uarce3uvt3w2rewu"), + "paymentchannel": mustParseCid("bafk2bzacedzp56g5cg73oilloak3kf7u667rdkd5pgnhe2cljmr3o7ykcrzuk"), + "reward": mustParseCid("bafk2bzacebczbwfbbi6mvppbjcozatasjiaohvjjiqcy65ccuuyyw3xiixhk2"), + "storagemarket": mustParseCid("bafk2bzaceawqexy6t2ybzh3jjwhbs7icbg5vqnedbbge4e4r4pfp7spkcadsu"), + "storageminer": mustParseCid("bafk2bzacearemd7pn2jj26fdtqd4di27lfhpng3vp5chepm7qnmdzgiqr6wfi"), + "storagepower": mustParseCid("bafk2bzaceddc7fiaxfobfegqaobf5xinjgmhsa5iu4yi6klvc3jmjimcdvgyg"), + "system": mustParseCid("bafk2bzacedylltr57b2n6zpadh4i2c2kis4fzzvhao3kgvfaggrrbqyacew7q"), + "verifiedregistry": mustParseCid("bafk2bzacecjkesz766626ab4svnzpq3jfs26a75vfktlfaku5fjdao2eyiqyq"), + }, +}, { + Network: "calibrationnet", + Version: 8, + ManifestCid: mustParseCid("bafy2bzacedrdn6z3z7xz7lx4wll3tlgktirhllzqxb766dxpaqp3ukxsjfsba"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzacecruossn66xqbeutqx5r4k2kjzgd43frmwd4qkw6haez44ubvvpxo"), + "cron": mustParseCid("bafk2bzaceaxlezmclw5ugldhhtfgvn7yztux45scqik3ez4yhwiqhg5ssib44"), + "init": mustParseCid("bafk2bzaceadyfilb22bcvzvnpzbg2lyg6npmperyq6es2brvzjdh5rmywc4ry"), + "multisig": mustParseCid("bafk2bzacec66wmb4kohuzvuxsulhcgiwju7sqkldwfpmmgw7dbbwgm5l2574q"), + "paymentchannel": mustParseCid("bafk2bzaceblot4pemhfgwb3lceellwrpgxaqkpselzbpqu32maffpopdunlha"), + "reward": mustParseCid("bafk2bzaceayah37uvj7brl5no4gmvmqbmtndh5raywuts7h6tqbgbq2ge7dhu"), + "storagemarket": mustParseCid("bafk2bzacebotg5coqnglzsdrqxtkqk2eq4krxt6zvds3i3vb2yejgxhexl2n6"), + "storageminer": mustParseCid("bafk2bzacea6rabflc7kpwr6y4lzcqsnuahr4zblyq3rhzrrsfceeiw2lufrb4"), + "storagepower": mustParseCid("bafk2bzacecpwr4mynn55bg5hrlns3osvg7sty3rca6zlai3vl52vbbjk7ulfa"), + "system": mustParseCid("bafk2bzaceaqrkllksxv2jsfgjvmuewx5vbzrammw5mdscod6gkdr3ijih2q64"), + "verifiedregistry": mustParseCid("bafk2bzaceaihibfu625lbtzdp3tcftscshrmbgghgrc7kzqhxn4455pycpdkm"), + }, +}, { + Network: "calibrationnet", + Version: 9, + ManifestCid: mustParseCid("bafy2bzacedbedgynklc4dgpyxippkxmba2mgtw7ecntoneclsvvl4klqwuyyy"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzaceavfgpiw6whqigmskk74z4blm22nwjfnzxb4unlqz2e4wg3c5ujpw"), + "cron": mustParseCid("bafk2bzaceb7hxmudhvkizszbmmf2ur2qfnfxfkok3xmbrlifylx6huw4bb3s4"), + "datacap": mustParseCid("bafk2bzaceanmwcfjfj65xy275rrfqqgoblnuqirdg6zwhc6qhbfhpphomvceu"), + "init": mustParseCid("bafk2bzaceczqxpivlxifdo5ohr2rx5ny4uyvssm6tkf7am357xm47x472yxu2"), + "multisig": mustParseCid("bafk2bzacec6gmi7ucukr3bk67akaxwngohw3lsg3obvdazhmfhdzflkszk3tg"), + "paymentchannel": mustParseCid("bafk2bzacec4kg3bfjtssvv2b4wizlbdk3pdtrg5aknzgeb3a6rmksgurpynca"), + "reward": mustParseCid("bafk2bzacebpptqhcw6mcwdj576dgpryapdd2zfexxvqzlh3aoc24mabwgmcss"), + "storagemarket": mustParseCid("bafk2bzacebkfcnc27d3agm2bhzzbvvtbqahmvy2b2nf5xyj4aoxehow3bules"), + "storageminer": mustParseCid("bafk2bzacebz4na3nq4gmumghegtkaofrv4nffiihd7sxntrryfneusqkuqodm"), + "storagepower": mustParseCid("bafk2bzaceburxajojmywawjudovqvigmos4dlu4ifdikogumhso2ca2ccaleo"), + "system": mustParseCid("bafk2bzaceaue3nzucbom3tcclgyaahy3iwvbqejsxrohiquakvvsjgbw3shac"), + "verifiedregistry": mustParseCid("bafk2bzacebh7dj6j7yi5vadh7lgqjtq42qi2uq4n6zy2g5vjeathacwn2tscu"), + }, +}, { + Network: "caterpillarnet", + Version: 8, + ManifestCid: mustParseCid("bafy2bzacecsmunz6fzhg53276cixadn6ybhcnzkgbw3la5hf342tfxsdoet26"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzaced6yatl4y2nmqmx2h3btk3np6oelyw2yt57elsb2nfmm33fadzt2g"), + "cron": mustParseCid("bafk2bzacebrujytq4u7g62jbz52gio5k2s6rhruty7nt4eqq7ygitzxuee5zi"), + "init": mustParseCid("bafk2bzacedajw5ptnwfdidv6m4rvd4c2m7dve4lhfbawygl5idkalcxbiiudu"), + "multisig": mustParseCid("bafk2bzaceb3kh5hjh6eebb5236xp7crn2owyyo7irap6sy4ns76uc7om6pxuy"), + "paymentchannel": mustParseCid("bafk2bzacedl5am53e4mtxpzligcycxvmkolfkhfiuavww2dq3ukgaqwowj7vw"), + "reward": mustParseCid("bafk2bzacecbswf242j43cymj3wh7nszawwlofv6z6z4qipb5d32hpxdhxywng"), + "storagemarket": mustParseCid("bafk2bzaceca5ersmg3zxf2cztgktq33bmfjuiqjcjlktwj52xyrpujbdsqvek"), + "storageminer": mustParseCid("bafk2bzacedg2fqaq5udfp3h6cxhywm27dgagxtselfgkyyyunqq362eaxpdm4"), + "storagepower": mustParseCid("bafk2bzaceb3dm2i2q323e6iozo3r6pyded645vvlpf537kga2a3hu5x7abgl4"), + "system": mustParseCid("bafk2bzacebu47th3xerlngqavlipb6cfu2utljkxxzgadc3totogto2tmx2jc"), + "verifiedregistry": mustParseCid("bafk2bzaceci3niq3rmbcmepgn27zvlgci6d5t4dvthx3pbmmx3wcu5elova6i"), + }, +}, { + Network: "caterpillarnet", + Version: 9, + ManifestCid: mustParseCid("bafy2bzacedo6tmei6rzjaaddh2yffe5xgr6w4smnadofjhomc3saiv3ubplqe"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzacebb32htqlwcwiotyvtbeehfmluu2ubjnepo57gelelwitudrstwba"), + "cron": mustParseCid("bafk2bzaceatvkww7soy4a6onu6xhe7pzkdzkqw46ywuu56yv3ncl76xpotzqu"), + "datacap": mustParseCid("bafk2bzaced57nk7i7w6qmbosy4gd6atme6yppesdgjllou6nppbti5yw6glcg"), + "init": mustParseCid("bafk2bzacedtoputbtz573ytg4yo5wbbg7fbhrzplux4uknxrb2jarifcuxxou"), + "multisig": mustParseCid("bafk2bzacec22z3xz45mbwgtliwkj7ngc43bervnt557c6dqsg6aesatpd5isy"), + "paymentchannel": mustParseCid("bafk2bzacedym7xnaxr2igfq72rttj2adqyqqfxk3j4qovp2bcwqk5paoe4t7e"), + "reward": mustParseCid("bafk2bzacedemsmbmbtk5toprmm6jivjq3wkxumavc65vpvm6ngspgjfkth7z6"), + "storagemarket": mustParseCid("bafk2bzacecb53mmklf4rbv263dvufqj3nsf7mi6zk2tjlgwmzbr633kw3ds3w"), + "storageminer": mustParseCid("bafk2bzacea3wljpn2ixgnd4lovr6yckiwd652ytcrz5amgj47lg6drjhgggqa"), + "storagepower": mustParseCid("bafk2bzaceakvohgvovpeldb6hjfg7readxo37a5h4qauis4nz6pte7mcll6c2"), + "system": mustParseCid("bafk2bzacecisuqj2ln7ep72xaejvs2lrgh2logc7retxxpd3qvobymwyz7bxo"), + "verifiedregistry": mustParseCid("bafk2bzacebyjosiripwqyf56yhjfs5hg26mch7totsqth4rgpt5j32hqg6ric"), + }, +}, { + Network: "devnet", + Version: 8, + ManifestCid: mustParseCid("bafy2bzacedq7tuibavyqxzkq4uybjj7ly22eu42mjkoehwn5d47xfunmtjm4k"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzacea4tlgnp7m6tlldpz3termlwxlnyq24nwd4zdzv4r6nsjuaktuuzc"), + "cron": mustParseCid("bafk2bzacecgrlf3vg3mufwovddlbgclhpnpp3jftr46stssh3crd3pyljc37w"), + "init": mustParseCid("bafk2bzacedarbnovmucppbjkcwsxopludrj5ttmtm7mzfqsugmxdnqevqso7o"), + "multisig": mustParseCid("bafk2bzaced4gcxjwy6garxwfw6y5a2k4jewj4t5nzopjy4qwnimhjtnsgo3ss"), + "paymentchannel": mustParseCid("bafk2bzaceb3isfguytt6cs4xecyoonbhhekmngfbap2msggbwyde7zch3a6w4"), + "reward": mustParseCid("bafk2bzacedn3fkp27ys5dxn4pwqdq2atj2x6cyezxuekdorvjwi7zazirgvgy"), + "storagemarket": mustParseCid("bafk2bzacecw57fpkqesfhi5g3nr4csy4oy7oc42wmwjuis6l7ijniolo4rt2k"), + "storageminer": mustParseCid("bafk2bzacebze3elvppssc6v5457ukszzy6ndrg6xgaojfsqfbbtg3xfwo4rbs"), + "storagepower": mustParseCid("bafk2bzaceb45l6zhgc34n6clz7xnvd7ek55bhw46q25umuje34t6kroix6hh6"), + "system": mustParseCid("bafk2bzacecf7eta2stfd3cnuxzervd33imbvlaqq6b5tsho7pxmhifrybreru"), + "verifiedregistry": mustParseCid("bafk2bzaceaajgtglewgitshgdi2nzrvq7eihjtyqj5yiamesqun2hujl3xev2"), + }, +}, { + Network: "devnet", + Version: 9, + ManifestCid: mustParseCid("bafy2bzacedozk3jh2j4nobqotkbofodq4chbrabioxbfrygpldgoxs3zwgggk"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzaced5llqnqqhypolyuogz3h2wjomugqkrhyhocvly3aoib4c5xiush6"), + "cron": mustParseCid("bafk2bzaceahwdt32ji53mo5yz6imvztz3s3g2ra5uz3jdfa77j7hqcnq6r4l2"), + "datacap": mustParseCid("bafk2bzaceabcxoy5iscdierasorjoj6xzqgnnb5pmrr7prkuibw4yggx3v2d2"), + "init": mustParseCid("bafk2bzaceastwn42kqyztz7uzej7l4lemp5nakqqsfvksry7k75q5ombhprme"), + "multisig": mustParseCid("bafk2bzacebeiygkjupkpfxcrsidci4bvn6afkvx4lsj3ut3ywhsj654pzfgk4"), + "paymentchannel": mustParseCid("bafk2bzacedhsdoo4ww47rm44pizu5qqpho753cizzbbvnd5yz3nm3347su5cy"), + "reward": mustParseCid("bafk2bzacebzqvisqe3iaodtxq7l2lgzwfkxznrnp676ddpllqcpvuae5i33le"), + "storagemarket": mustParseCid("bafk2bzaceduauegz4nniegh667btjhg2anipwpxeb664s4ossq2ifvuqwqlso"), + "storageminer": mustParseCid("bafk2bzacec23wjdmbm5pt6pqsbjb3w6j7vyrolijz2mysvp6clllfgpmhb6ge"), + "storagepower": mustParseCid("bafk2bzacebnyywv46n2ghg62inllwpmnyuwtoz57fn5lpgpf436mahajg4qrg"), + "system": mustParseCid("bafk2bzacebgafb6h2o2g5whrujc2uvsttrussyc5t56rvhrjqkqhzdu4jopwa"), + "verifiedregistry": mustParseCid("bafk2bzacednorhcy446agy7ecpmfms2u4aoa3mj2eqomffuoerbik5yavrxyi"), + }, +}, { + Network: "mainnet", + Version: 8, + ManifestCid: mustParseCid("bafy2bzacebogjbpiemi7npzxchgcjjki3tfxon4ims55obfyfleqntteljsea"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzacedudbf7fc5va57t3tmo63snmt3en4iaidv4vo3qlyacbxaa6hlx6y"), + "cron": mustParseCid("bafk2bzacecqb3eolfurehny6yp7tgmapib4ocazo5ilkopjce2c7wc2bcec62"), + "init": mustParseCid("bafk2bzaceaipvjhoxmtofsnv3aj6gj5ida4afdrxa4ewku2hfipdlxpaektlw"), + "multisig": mustParseCid("bafk2bzacebhldfjuy4o5v7amrhp5p2gzv2qo5275jut4adnbyp56fxkwy5fag"), + "paymentchannel": mustParseCid("bafk2bzacebalad3f72wyk7qyilvfjijcwubdspytnyzlrhvn73254gqis44rq"), + "reward": mustParseCid("bafk2bzacecwzzxlgjiavnc3545cqqil3cmq4hgpvfp2crguxy2pl5ybusfsbe"), + "storagemarket": mustParseCid("bafk2bzacediohrxkp2fbsl4yj4jlupjdkgsiwqb4zuezvinhdo2j5hrxco62q"), + "storageminer": mustParseCid("bafk2bzacecgnynvd3tene3bvqoknuspit56canij5bpra6wl4mrq2mxxwriyu"), + "storagepower": mustParseCid("bafk2bzacebjvqva6ppvysn5xpmiqcdfelwbbcxmghx5ww6hr37cgred6dyrpm"), + "system": mustParseCid("bafk2bzacedwq5uppsw7vp55zpj7jdieizirmldceehu6wvombw3ixq2tcq57w"), + "verifiedregistry": mustParseCid("bafk2bzaceb3zbkjz3auizmoln2unmxep7dyfcmsre64vnqfhdyh7rkqfoxlw4"), + }, +}, { + Network: "mainnet", + Version: 9, + ManifestCid: mustParseCid("bafy2bzaceb6j6666h36xnhksu3ww4kxb6e25niayfgkdnifaqi6m6ooc66i6i"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzacect2p7urje3pylrrrjy3tngn6yaih4gtzauuatf2jllk3ksgfiw2y"), + "cron": mustParseCid("bafk2bzacebcec3lffmos3nawm5cvwehssxeqwxixoyyfvejy7viszzsxzyu26"), + "datacap": mustParseCid("bafk2bzacebb6uy2ys7tapekmtj7apnjg7oyj4ia5t7tlkvbmwtxwv74lb2pug"), + "init": mustParseCid("bafk2bzacebtdq4zyuxk2fzbdkva6kc4mx75mkbfmldplfntayhbl5wkqou33i"), + "multisig": mustParseCid("bafk2bzacec4va3nmugyqjqrs3lqyr2ij67jhjia5frvx7omnh2isha6abxzya"), + "paymentchannel": mustParseCid("bafk2bzacebhdvjbjcgupklddfavzef4e4gnkt3xk3rbmgfmk7xhecszhfxeds"), + "reward": mustParseCid("bafk2bzacebezgbbmcm2gbcqwisus5fjvpj7hhmu5ubd37phuku3hmkfulxm2o"), + "storagemarket": mustParseCid("bafk2bzacec3j7p6gklk64stax5px3xxd7hdtejaepnd4nw7s2adihde6emkcu"), + "storageminer": mustParseCid("bafk2bzacedyux5hlrildwutvvjdcsvjtwsoc5xnqdjl73ouiukgklekeuyfl4"), + "storagepower": mustParseCid("bafk2bzacedsetphfajgne4qy3vdrpyd6ekcmtfs2zkjut4r34cvnuoqemdrtw"), + "system": mustParseCid("bafk2bzaceagvlo2jtahj7dloshrmwfulrd6e2izqev32qm46eumf754weec6c"), + "verifiedregistry": mustParseCid("bafk2bzacecf3yodlyudzukumehbuabgqljyhjt5ifiv4vetcfohnvsxzynwga"), + }, +}, { + Network: "testing", + Version: 8, + ManifestCid: mustParseCid("bafy2bzacedkjpqx27wgsvfxzuxfvixuxtbpt2y6yo6igcasez6gqiowron776"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzacebmfbtdj5vruje5auacrhhprcjdd6uclhukb7je7t2f6ozfcgqlu2"), + "cron": mustParseCid("bafk2bzacea4gwsbeux7z4yxvpkxpco77iyxijoyqaoikofrxdewunwh3unjem"), + "init": mustParseCid("bafk2bzacecqk6zlwein7tzy7yrrhtj4pzavrkofgpyxvvw5ktr3w4x4ml4lis"), + "multisig": mustParseCid("bafk2bzacea5zp2g6ag5qfuro7zw6kyku2swxs57wjxncaaxbih5iqflqy4ghm"), + "paymentchannel": mustParseCid("bafk2bzaced47dbtbygmfwnyfsp5iihzhhdmnkpuyc5nlnfgc4mkkvlsgvj2do"), + "reward": mustParseCid("bafk2bzacecmcagk32pzdzfg7piobzqhlgla37x3g7jjzyndlz7mqdno2zulfi"), + "storagemarket": mustParseCid("bafk2bzaceballmgd7puoixfwm65f5shi3kzreqdisowtsoufbvduwytydqotw"), + "storageminer": mustParseCid("bafk2bzacebucngwdhxtod2gvv52adtdssafyg43znsoy4omtfkkqe2hbhvxeu"), + "storagepower": mustParseCid("bafk2bzaceakxw5wx3rtqoarrdbzhmxkufg2kx7n34xotzxzacvvbe5iqggmsa"), + "system": mustParseCid("bafk2bzaced6kjkbv7lrb2qwq5we2hqaxc6ztch5p52g27qtjy45zdemsk4b7m"), + "verifiedregistry": mustParseCid("bafk2bzacectzxvtoselhnzsair5nv6k5vokvegnht6z2lfee4p3xexo4kg4m6"), + }, +}, { + Network: "testing", + Version: 9, + ManifestCid: mustParseCid("bafy2bzacecnnrmekqw2xvud46g3vo6x26cogh3ydgljqajlxqxzzbuxsjlwjm"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzaceaiebfiuu76zoywzltelio2zuvsavirka27ur6kspn7scvcl5cuiy"), + "cron": mustParseCid("bafk2bzacecla36w3tbwap5jgdtooxsud25mdpc75kgtjs34mi4xhwygph2gki"), + "datacap": mustParseCid("bafk2bzaced5h3ct6i7oqpyimkj3hwdywmux5tslu5vs2ywbzruqmxjtqczygs"), + "init": mustParseCid("bafk2bzaceauxqpspnvui7dryuvfgzoogatbkbahp4ovaih734blwi4bassnlm"), + "multisig": mustParseCid("bafk2bzaceddfagxfpsihjxq7yt4ditv2tcoou5w4hzbsapadlw3v44cxfcqpi"), + "paymentchannel": mustParseCid("bafk2bzaced4nc4ofrbqevpwrt7fnf3beshi5ccrecq3zojt2sxgrkz7ebnbh4"), + "reward": mustParseCid("bafk2bzacedxleepeg4ei3jnayzcfz6shi25rrvoyhr6fxmkdezq4owrazi7rq"), + "storagemarket": mustParseCid("bafk2bzaceakqcjpppg3exrr7dru7jglvno2xyw4hsuebxay4lvrzvmwmv5kvu"), + "storageminer": mustParseCid("bafk2bzacealfvphicwnysmmyyerseppyvydy2reisvbft46vdprp2lnfvlgqc"), + "storagepower": mustParseCid("bafk2bzaceageil5b5mr5uwo6vqs4nnnmpiwe3fkjffzyngcicuu7gruuwapjm"), + "system": mustParseCid("bafk2bzacedo4pu3iwx2gu72hinsstpiokhl5iicnb3rumzffsnhy7zhmnxhyy"), + "verifiedregistry": mustParseCid("bafk2bzaceatmqip2o3ausbntvdhj7yemu6hb3b5yqv6hm42gylbbmz7geocpm"), + }, +}, { + Network: "testing-fake-proofs", + Version: 8, + ManifestCid: mustParseCid("bafy2bzacecd3lb5v6tzjylnhnrhexslssyaozy6hogzgpkhztoe76exbrgrug"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzacebmfbtdj5vruje5auacrhhprcjdd6uclhukb7je7t2f6ozfcgqlu2"), + "cron": mustParseCid("bafk2bzacea4gwsbeux7z4yxvpkxpco77iyxijoyqaoikofrxdewunwh3unjem"), + "init": mustParseCid("bafk2bzacebwkqd6e7gdphfzw2kdmbokdh2bly6fvzgfopxzy7quq4l67gmkks"), + "multisig": mustParseCid("bafk2bzacea5zp2g6ag5qfuro7zw6kyku2swxs57wjxncaaxbih5iqflqy4ghm"), + "paymentchannel": mustParseCid("bafk2bzaced47dbtbygmfwnyfsp5iihzhhdmnkpuyc5nlnfgc4mkkvlsgvj2do"), + "reward": mustParseCid("bafk2bzacecmcagk32pzdzfg7piobzqhlgla37x3g7jjzyndlz7mqdno2zulfi"), + "storagemarket": mustParseCid("bafk2bzacecxqgajcaednamgolc6wc3lzbjc6tz5alfrbwqez2y3c372vts6dg"), + "storageminer": mustParseCid("bafk2bzaceaqwxllfycpq6decpsnkqjdeycpysh5acubonjae7u3wciydlkvki"), + "storagepower": mustParseCid("bafk2bzaceddmeolsokbxgcr25cuf2skrobtmmoof3dmqfpcfp33lmw63oikvm"), + "system": mustParseCid("bafk2bzaced6kjkbv7lrb2qwq5we2hqaxc6ztch5p52g27qtjy45zdemsk4b7m"), + "verifiedregistry": mustParseCid("bafk2bzacectzxvtoselhnzsair5nv6k5vokvegnht6z2lfee4p3xexo4kg4m6"), + }, +}, { + Network: "testing-fake-proofs", + Version: 9, + ManifestCid: mustParseCid("bafy2bzacecql2gj2tri4fnbznmldue73qzt6zszvugw4exd64mwb52zrhv7k2"), + Actors: map[string]cid.Cid{ + "account": mustParseCid("bafk2bzaceaiebfiuu76zoywzltelio2zuvsavirka27ur6kspn7scvcl5cuiy"), + "cron": mustParseCid("bafk2bzacecla36w3tbwap5jgdtooxsud25mdpc75kgtjs34mi4xhwygph2gki"), + "datacap": mustParseCid("bafk2bzaced5h3ct6i7oqpyimkj3hwdywmux5tslu5vs2ywbzruqmxjtqczygs"), + "init": mustParseCid("bafk2bzaceauxqpspnvui7dryuvfgzoogatbkbahp4ovaih734blwi4bassnlm"), + "multisig": mustParseCid("bafk2bzaceddfagxfpsihjxq7yt4ditv2tcoou5w4hzbsapadlw3v44cxfcqpi"), + "paymentchannel": mustParseCid("bafk2bzaced4nc4ofrbqevpwrt7fnf3beshi5ccrecq3zojt2sxgrkz7ebnbh4"), + "reward": mustParseCid("bafk2bzacedxleepeg4ei3jnayzcfz6shi25rrvoyhr6fxmkdezq4owrazi7rq"), + "storagemarket": mustParseCid("bafk2bzaceakqcjpppg3exrr7dru7jglvno2xyw4hsuebxay4lvrzvmwmv5kvu"), + "storageminer": mustParseCid("bafk2bzaceab3cjrwwwfemyc5lw73w6tibpgxtx3wuzjhami6tvhcvetygdm7m"), + "storagepower": mustParseCid("bafk2bzaceafemwhsy3e7ueqsrn3f7n53vdqkvfbig3hgbw7eohsefnfvgq7yc"), + "system": mustParseCid("bafk2bzacedo4pu3iwx2gu72hinsstpiokhl5iicnb3rumzffsnhy7zhmnxhyy"), + "verifiedregistry": mustParseCid("bafk2bzaceatmqip2o3ausbntvdhj7yemu6hb3b5yqv6hm42gylbbmz7geocpm"), + }, +}} + +func mustParseCid(c string) cid.Cid { + ret, err := cid.Decode(c) + if err != nil { + panic(err) + } + + return ret +} diff --git a/venus-shared/actors/builtin_actors_test.go b/venus-shared/actors/builtin_actors_test.go new file mode 100644 index 0000000000..1e4af87454 --- /dev/null +++ b/venus-shared/actors/builtin_actors_test.go @@ -0,0 +1,32 @@ +package actors + +import ( + "testing" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/stretchr/testify/require" +) + +// Test that the embedded metadata is correct. +func TestEmbeddedMetadata(t *testing.T) { + metadata, err := ReadEmbeddedBuiltinActorsMetadata() + require.NoError(t, err) + + require.Equal(t, metadata, EmbeddedBuiltinActorsMetadata) +} + +// Test that we're registering the manifest correctly. +func TestRegistration(t *testing.T) { + manifestCid, found := GetManifest(actorstypes.Version8) + require.True(t, found) + require.True(t, manifestCid.Defined()) + + for _, key := range GetBuiltinActorsKeys(actorstypes.Version8) { + actorCid, found := GetActorCodeID(actorstypes.Version8, key) + require.True(t, found) + name, version, found := GetActorMetaByCode(actorCid) + require.True(t, found) + require.Equal(t, actorstypes.Version8, version) + require.Equal(t, key, name) + } +} diff --git a/venus-shared/actors/manifest.go b/venus-shared/actors/manifest.go new file mode 100644 index 0000000000..7b44227f72 --- /dev/null +++ b/venus-shared/actors/manifest.go @@ -0,0 +1,165 @@ +package actors + +import ( + "context" + "fmt" + "strings" + "sync" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/manifest" + "github.com/filecoin-project/venus/venus-shared/actors/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" +) + +var manifestCids map[actorstypes.Version]cid.Cid = make(map[actorstypes.Version]cid.Cid) +var manifests map[actorstypes.Version]map[string]cid.Cid = make(map[actorstypes.Version]map[string]cid.Cid) +var actorMeta map[cid.Cid]actorEntry = make(map[cid.Cid]actorEntry) + +const ( + AccountKey = "account" + CronKey = "cron" + InitKey = "init" + MarketKey = "storagemarket" + MinerKey = "storageminer" + MultisigKey = "multisig" + PaychKey = "paymentchannel" + PowerKey = "storagepower" + RewardKey = "reward" + SystemKey = "system" + VerifregKey = "verifiedregistry" + DatacapKey = "datacap" +) + +func GetBuiltinActorsKeys(av actorstypes.Version) []string { + keys := []string{ + AccountKey, + CronKey, + InitKey, + MarketKey, + MinerKey, + MultisigKey, + PaychKey, + PowerKey, + RewardKey, + SystemKey, + VerifregKey, + } + if av >= 9 { + keys = append(keys, DatacapKey) + } + return keys +} + +var ( + manifestMx sync.RWMutex +) + +type actorEntry struct { + name string + version actorstypes.Version +} + +// ClearManifests clears all known manifests. This is usually used in tests that need to switch networks. +func ClearManifests() { + manifestMx.Lock() + defer manifestMx.Unlock() + + manifestCids = make(map[actorstypes.Version]cid.Cid) + manifests = make(map[actorstypes.Version]map[string]cid.Cid) + actorMeta = make(map[cid.Cid]actorEntry) +} + +// RegisterManifest registers an actors manifest with lotus. +func RegisterManifest(av actorstypes.Version, manifestCid cid.Cid, entries map[string]cid.Cid) { + manifestMx.Lock() + defer manifestMx.Unlock() + + manifestCids[av] = manifestCid + manifests[av] = entries + + for name, c := range entries { + actorMeta[c] = actorEntry{name: name, version: av} + } +} + +// GetManifest gets a loaded manifest. +func GetManifest(av actorstypes.Version) (cid.Cid, bool) { + manifestMx.RLock() + defer manifestMx.RUnlock() + + c, ok := manifestCids[av] + return c, ok +} + +// ReadManifest reads a manifest from a blockstore. It does not "add" it. +func ReadManifest(ctx context.Context, store cbor.IpldStore, mfCid cid.Cid) (map[string]cid.Cid, error) { + adtStore := adt.WrapStore(ctx, store) + + var mf manifest.Manifest + if err := adtStore.Get(ctx, mfCid, &mf); err != nil { + return nil, fmt.Errorf("error reading manifest (cid: %s): %w", mfCid, err) + } + + if err := mf.Load(ctx, adtStore); err != nil { + return nil, fmt.Errorf("error loading manifest (cid: %s): %w", mfCid, err) + } + + var manifestData manifest.ManifestData + if err := store.Get(ctx, mf.Data, &manifestData); err != nil { + return nil, fmt.Errorf("error loading manifest data: %w", err) + } + + metadata := make(map[string]cid.Cid) + for _, entry := range manifestData.Entries { + metadata[entry.Name] = entry.Code + } + + return metadata, nil +} + +// GetActorCodeIDsFromManifest looks up all builtin actor's code CIDs by actor version for versions that have a manifest. +func GetActorCodeIDsFromManifest(av actorstypes.Version) (map[string]cid.Cid, bool) { + manifestMx.RLock() + defer manifestMx.RUnlock() + + cids, ok := manifests[av] + return cids, ok +} + +// Given a Manifest CID, get the manifest from the store and Load data into its entries +func LoadManifest(ctx context.Context, mfCid cid.Cid, adtStore adt.Store) (*manifest.Manifest, error) { + var mf manifest.Manifest + + if err := adtStore.Get(ctx, mfCid, &mf); err != nil { + return nil, fmt.Errorf("error reading manifest: %w", err) + } + + if err := mf.Load(ctx, adtStore); err != nil { + return nil, fmt.Errorf("error loading manifest entries data: %w", err) + } + + return &mf, nil +} + +func GetActorMetaByCode(c cid.Cid) (string, actorstypes.Version, bool) { + manifestMx.RLock() + defer manifestMx.RUnlock() + + entry, ok := actorMeta[c] + if !ok { + return "", -1, false + } + + return entry.name, entry.version, true +} + +func CanonicalName(name string) string { + idx := strings.LastIndex(name, "/") + if idx >= 0 { + return name[idx+1:] + } + + return name +} diff --git a/venus-shared/actors/params.go b/venus-shared/actors/params.go new file mode 100644 index 0000000000..c319233a7a --- /dev/null +++ b/venus-shared/actors/params.go @@ -0,0 +1,22 @@ +// FETCHED FROM LOTUS: params.go + +package actors + +import ( + "bytes" + + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/venus/venus-shared/actors/aerrors" +) + +func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { + buf := new(bytes.Buffer) + if err := i.MarshalCBOR(buf); err != nil { + // TODO: shouldnt this be a fatal error? + return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter") + } + return buf.Bytes(), nil +} diff --git a/venus-shared/actors/policy/policy.go b/venus-shared/actors/policy/policy.go new file mode 100644 index 0000000000..940dd4069a --- /dev/null +++ b/venus-shared/actors/policy/policy.go @@ -0,0 +1,695 @@ +// FETCHED FROM LOTUS: policy/policy.go.template + +package policy + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + + "fmt" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" + miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" + verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg" + + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market" + miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" + verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + + builtin8 "github.com/filecoin-project/go-state-types/builtin" + market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" + miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" + verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" + + builtin9 "github.com/filecoin-project/go-state-types/builtin" + market9 "github.com/filecoin-project/go-state-types/builtin/v9/market" + miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + paych9 "github.com/filecoin-project/go-state-types/builtin/v9/paych" +) + +const ( + ChainFinality = miner9.ChainFinality + SealRandomnessLookback = ChainFinality + PaychSettleDelay = paych9.SettleDelay + MaxPreCommitRandomnessLookback = builtin9.EpochsInDay + SealRandomnessLookback +) + +// SetSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { + + miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner3.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner3.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner3.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner4.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner7.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + AddSupportedProofTypes(types...) +} + +// AddSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { + for _, t := range types { + if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 { + panic("must specify v1 proof types only") + } + // Set for all miner versions. + + miner0.SupportedProofTypes[t] = struct{}{} + + miner2.PreCommitSealProofTypesV0[t] = struct{}{} + miner2.PreCommitSealProofTypesV7[t] = struct{}{} + miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner3.PreCommitSealProofTypesV0[t] = struct{}{} + miner3.PreCommitSealProofTypesV7[t] = struct{}{} + miner3.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner3.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner4.PreCommitSealProofTypesV0[t] = struct{}{} + miner4.PreCommitSealProofTypesV7[t] = struct{}{} + miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err := t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner5.WindowPoStProofTypes[wpp] = struct{}{} + + miner6.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err = t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner6.WindowPoStProofTypes[wpp] = struct{}{} + + miner7.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err = t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner7.WindowPoStProofTypes[wpp] = struct{}{} + + } +} + +// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all +// actors versions. Use for testing. +func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { + // Set for all miner versions. + + miner0.PreCommitChallengeDelay = delay + + miner2.PreCommitChallengeDelay = delay + + miner3.PreCommitChallengeDelay = delay + + miner4.PreCommitChallengeDelay = delay + + miner5.PreCommitChallengeDelay = delay + + miner6.PreCommitChallengeDelay = delay + + miner7.PreCommitChallengeDelay = delay + + miner8.PreCommitChallengeDelay = delay + + miner9.PreCommitChallengeDelay = delay + +} + +// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. +func GetPreCommitChallengeDelay() abi.ChainEpoch { + return miner9.PreCommitChallengeDelay +} + +// SetConsensusMinerMinPower sets the minimum power of an individual miner must +// meet for leader election, across all actor versions. This should only be used +// for testing. +func SetConsensusMinerMinPower(p abi.StoragePower) { + + power0.ConsensusMinerMinPower = p + + for _, policy := range builtin2.SealProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin3.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin4.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin5.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin6.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin7.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin8.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin9.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + +} + +// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should +// only be used for testing. +func SetMinVerifiedDealSize(size abi.StoragePower) { + + verifreg0.MinVerifiedDealSize = size + + verifreg2.MinVerifiedDealSize = size + + verifreg3.MinVerifiedDealSize = size + + verifreg4.MinVerifiedDealSize = size + + verifreg5.MinVerifiedDealSize = size + + verifreg6.MinVerifiedDealSize = size + + verifreg7.MinVerifiedDealSize = size + + verifreg8.MinVerifiedDealSize = size + + verifreg9.MinVerifiedDealSize = size + +} + +func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { + switch ver { + + case actorstypes.Version0: + + return miner0.MaxSealDuration[t], nil + + case actorstypes.Version2: + + return miner2.MaxProveCommitDuration[t], nil + + case actorstypes.Version3: + + return miner3.MaxProveCommitDuration[t], nil + + case actorstypes.Version4: + + return miner4.MaxProveCommitDuration[t], nil + + case actorstypes.Version5: + + return miner5.MaxProveCommitDuration[t], nil + + case actorstypes.Version6: + + return miner6.MaxProveCommitDuration[t], nil + + case actorstypes.Version7: + + return miner7.MaxProveCommitDuration[t], nil + + case actorstypes.Version8: + + return miner8.MaxProveCommitDuration[t], nil + + case actorstypes.Version9: + + return miner9.MaxProveCommitDuration[t], nil + + default: + return 0, fmt.Errorf("unsupported actors version") + } +} + +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { + + market2.ProviderCollateralSupplyTarget = builtin2.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market3.ProviderCollateralSupplyTarget = builtin3.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market4.ProviderCollateralSupplyTarget = builtin4.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market5.ProviderCollateralSupplyTarget = builtin5.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market6.ProviderCollateralSupplyTarget = builtin6.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market7.ProviderCollateralSupplyTarget = builtin7.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market8.ProviderCollateralSupplyTarget = builtin8.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market9.ProviderCollateralSupplyTarget = builtin9.BigFrac{ + Numerator: num, + Denominator: denom, + } + +} + +func DealProviderCollateralBounds( + size abi.PaddedPieceSize, verified bool, + rawBytePower, qaPower, baselinePower abi.StoragePower, + circulatingFil abi.TokenAmount, nwVer network.Version, +) (min, max abi.TokenAmount, err error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), big.Zero(), err + } + switch v { + + case actorstypes.Version0: + + min, max := market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + return min, max, nil + + case actorstypes.Version2: + + min, max := market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + case actorstypes.Version3: + + min, max := market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + case actorstypes.Version4: + + min, max := market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + case actorstypes.Version5: + + min, max := market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + case actorstypes.Version6: + + min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + case actorstypes.Version7: + + min, max := market7.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + case actorstypes.Version8: + + min, max := market8.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + case actorstypes.Version9: + + min, max := market9.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + + default: + return big.Zero(), big.Zero(), fmt.Errorf("unsupported actors version") + } +} + +func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { + return market9.DealDurationBounds(pieceSize) +} + +// Sets the challenge window and scales the proving period to match (such that +// there are always 48 challenge windows in a proving period). +func SetWPoStChallengeWindow(period abi.ChainEpoch) { + + miner0.WPoStChallengeWindow = period + miner0.WPoStProvingPeriod = period * abi.ChainEpoch(miner0.WPoStPeriodDeadlines) + + miner2.WPoStChallengeWindow = period + miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines) + + miner3.WPoStChallengeWindow = period + miner3.WPoStProvingPeriod = period * abi.ChainEpoch(miner3.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner3.WPoStDisputeWindow = period * 30 + + miner4.WPoStChallengeWindow = period + miner4.WPoStProvingPeriod = period * abi.ChainEpoch(miner4.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner4.WPoStDisputeWindow = period * 30 + + miner5.WPoStChallengeWindow = period + miner5.WPoStProvingPeriod = period * abi.ChainEpoch(miner5.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner5.WPoStDisputeWindow = period * 30 + + miner6.WPoStChallengeWindow = period + miner6.WPoStProvingPeriod = period * abi.ChainEpoch(miner6.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner6.WPoStDisputeWindow = period * 30 + + miner7.WPoStChallengeWindow = period + miner7.WPoStProvingPeriod = period * abi.ChainEpoch(miner7.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner7.WPoStDisputeWindow = period * 30 + + miner8.WPoStChallengeWindow = period + miner8.WPoStProvingPeriod = period * abi.ChainEpoch(miner8.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner8.WPoStDisputeWindow = period * 30 + + miner9.WPoStChallengeWindow = period + miner9.WPoStProvingPeriod = period * abi.ChainEpoch(miner9.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner9.WPoStDisputeWindow = period * 30 + +} + +func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version3 { + return 10 + } + + // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well + return ChainFinality +} + +func GetMaxSectorExpirationExtension() abi.ChainEpoch { + return miner9.MaxSectorExpirationExtension +} + +func GetMinSectorExpiration() abi.ChainEpoch { + return miner9.MinSectorExpiration +} + +func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { + sectorsPerPart, err := builtin9.PoStProofWindowPoStPartitionSectors(p) + if err != nil { + return 0, err + } + maxSectors, err := GetAddressedSectorsMax(nv) + if err != nil { + return 0, err + } + return int(uint64(maxSectors) / sectorsPerPart), nil +} + +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + +func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version10 { + return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime + } + + return builtin9.SealProofPoliciesV11[proof].SectorMaxLifetime +} + +func GetAddressedSectorsMax(nwVer network.Version) (int, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { + + case actorstypes.Version0: + return miner0.AddressedSectorsMax, nil + + case actorstypes.Version2: + return miner2.AddressedSectorsMax, nil + + case actorstypes.Version3: + return miner3.AddressedSectorsMax, nil + + case actorstypes.Version4: + return miner4.AddressedSectorsMax, nil + + case actorstypes.Version5: + return miner5.AddressedSectorsMax, nil + + case actorstypes.Version6: + return miner6.AddressedSectorsMax, nil + + case actorstypes.Version7: + return miner7.AddressedSectorsMax, nil + + case actorstypes.Version8: + return miner8.AddressedSectorsMax, nil + + case actorstypes.Version9: + return miner9.AddressedSectorsMax, nil + + default: + return 0, fmt.Errorf("unsupported network version") + } +} + +func GetDeclarationsMax(nwVer network.Version) (int, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { + + case actorstypes.Version0: + + // TODO: Should we instead error here since the concept doesn't exist yet? + return miner0.AddressedPartitionsMax, nil + + case actorstypes.Version2: + + return miner2.DeclarationsMax, nil + + case actorstypes.Version3: + + return miner3.DeclarationsMax, nil + + case actorstypes.Version4: + + return miner4.DeclarationsMax, nil + + case actorstypes.Version5: + + return miner5.DeclarationsMax, nil + + case actorstypes.Version6: + + return miner6.DeclarationsMax, nil + + case actorstypes.Version7: + + return miner7.DeclarationsMax, nil + + case actorstypes.Version8: + + return miner8.DeclarationsMax, nil + + case actorstypes.Version9: + + return miner9.DeclarationsMax, nil + + default: + return 0, fmt.Errorf("unsupported network version") + } +} + +func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), err + } + switch v { + + case actorstypes.Version0: + + return big.Zero(), nil + + case actorstypes.Version2: + + return big.Zero(), nil + + case actorstypes.Version3: + + return big.Zero(), nil + + case actorstypes.Version4: + + return big.Zero(), nil + + case actorstypes.Version5: + + return miner5.AggregateNetworkFee(aggregateSize, baseFee), nil + + case actorstypes.Version6: + + return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + + case actorstypes.Version7: + + return miner7.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + + case actorstypes.Version8: + + return miner8.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + + case actorstypes.Version9: + + return miner9.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + + default: + return big.Zero(), fmt.Errorf("unsupported network version") + } +} + +func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), err + } + switch v { + + case actorstypes.Version0: + + return big.Zero(), nil + + case actorstypes.Version2: + + return big.Zero(), nil + + case actorstypes.Version3: + + return big.Zero(), nil + + case actorstypes.Version4: + + return big.Zero(), nil + + case actorstypes.Version5: + + return big.Zero(), nil + + case actorstypes.Version6: + + return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + + case actorstypes.Version7: + + return miner7.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + + case actorstypes.Version8: + + return miner8.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + + case actorstypes.Version9: + + return miner9.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + + default: + return big.Zero(), fmt.Errorf("unsupported network version") + } +} diff --git a/venus-shared/actors/policy/policy.go.template b/venus-shared/actors/policy/policy.go.template new file mode 100644 index 0000000000..47d5498d74 --- /dev/null +++ b/venus-shared/actors/policy/policy.go.template @@ -0,0 +1,329 @@ +// FETCHED FROM LOTUS: policy/policy.go.template + +package policy + +import ( + actorstypes "github.com/filecoin-project/go-state-types/actors" + + "github.com/filecoin-project/go-state-types/big" + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + {{range .versions}} + {{if (ge . 8)}} + builtin{{.}} "github.com/filecoin-project/go-state-types/builtin" + miner{{.}} "github.com/filecoin-project/go-state-types/builtin{{import .}}miner" + market{{.}} "github.com/filecoin-project/go-state-types/builtin{{import .}}market" + verifreg{{.}} "github.com/filecoin-project/go-state-types/builtin{{import .}}verifreg" + {{else}} + {{if (ge . 2)}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + {{end}} + market{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/market" + miner{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/miner" + verifreg{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/verifreg" + {{if (eq . 0)}} + power{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/power" + {{end}} + {{end}} + {{end}} + + paych{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin{{import .latestVersion}}paych" + +) + +const ( + ChainFinality = miner{{.latestVersion}}.ChainFinality + SealRandomnessLookback = ChainFinality + PaychSettleDelay = paych{{.latestVersion}}.SettleDelay + MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback +) + +// SetSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { + {{range .versions}} + {{if (eq . 0)}} + miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{else if (le . 4)}} + miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{else if (le . 7)}} + miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{end}} + {{end}} + + AddSupportedProofTypes(types...) +} + +// AddSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { + for _, t := range types { + if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 { + panic("must specify v1 proof types only") + } + // Set for all miner versions. + + {{range .versions}} + {{if (eq . 0)}} + miner{{.}}.SupportedProofTypes[t] = struct{}{} + {{else if (le . 4)}} + miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + {{else if (eq . 5)}} + miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err := t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{} + {{else if (le . 7)}} + miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err = t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{} + {{end}} + {{end}} + } +} + +// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all +// actors versions. Use for testing. +func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { + // Set for all miner versions. + {{range .versions}} + miner{{.}}.PreCommitChallengeDelay = delay + {{end}} +} + +// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. +func GetPreCommitChallengeDelay() abi.ChainEpoch { + return miner{{.latestVersion}}.PreCommitChallengeDelay +} + +// SetConsensusMinerMinPower sets the minimum power of an individual miner must +// meet for leader election, across all actor versions. This should only be used +// for testing. +func SetConsensusMinerMinPower(p abi.StoragePower) { + {{range .versions}} + {{if (eq . 0)}} + power{{.}}.ConsensusMinerMinPower = p + {{else if (eq . 2)}} + for _, policy := range builtin{{.}}.SealProofPolicies { + policy.ConsensusMinerMinPower = p + } + {{else}} + for _, policy := range builtin{{.}}.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + {{end}} + {{end}} +} + +// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should +// only be used for testing. +func SetMinVerifiedDealSize(size abi.StoragePower) { + {{range .versions}} + verifreg{{.}}.MinVerifiedDealSize = size + {{end}} +} + +func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { + switch ver { + {{range .versions}} + case actorstypes.Version{{.}}: + {{if (eq . 0)}} + return miner{{.}}.MaxSealDuration[t], nil + {{else}} + return miner{{.}}.MaxProveCommitDuration[t], nil + {{end}} + {{end}} + default: + return 0, fmt.Errorf("unsupported actors version") + } +} + +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { +{{range .versions}} + {{if (ge . 2)}} + market{{.}}.ProviderCollateralSupplyTarget = builtin{{.}}.BigFrac{ + Numerator: num, + Denominator: denom, + } + {{end}} +{{end}} +} + +func DealProviderCollateralBounds( + size abi.PaddedPieceSize, verified bool, + rawBytePower, qaPower, baselinePower abi.StoragePower, + circulatingFil abi.TokenAmount, nwVer network.Version, +) (min, max abi.TokenAmount, err error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), big.Zero(), err + } + switch v { + {{range .versions}} + case actorstypes.Version{{.}}: + {{if (eq . 0)}} + min, max := market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + return min, max, nil + {{else}} + min, max := market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + {{end}} + {{end}} + default: + return big.Zero(), big.Zero(), fmt.Errorf("unsupported actors version") + } +} + +func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { + return market{{.latestVersion}}.DealDurationBounds(pieceSize) +} + +// Sets the challenge window and scales the proving period to match (such that +// there are always 48 challenge windows in a proving period). +func SetWPoStChallengeWindow(period abi.ChainEpoch) { + {{range .versions}} + miner{{.}}.WPoStChallengeWindow = period + miner{{.}}.WPoStProvingPeriod = period * abi.ChainEpoch(miner{{.}}.WPoStPeriodDeadlines) + {{if (ge . 3)}} + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner{{.}}.WPoStDisputeWindow = period * 30 + {{end}} + {{end}} +} + +func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version3 { + return 10 + } + + // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well + return ChainFinality +} + +func GetMaxSectorExpirationExtension() abi.ChainEpoch { + return miner{{.latestVersion}}.MaxSectorExpirationExtension +} + +func GetMinSectorExpiration() abi.ChainEpoch { + return miner{{.latestVersion}}.MinSectorExpiration +} + +func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { + sectorsPerPart, err := builtin{{.latestVersion}}.PoStProofWindowPoStPartitionSectors(p) + if err != nil { + return 0, err + } + maxSectors, err := GetAddressedSectorsMax(nv) + if err != nil { + return 0, err + } + return int(uint64(maxSectors) / sectorsPerPart), nil +} + +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + +func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version10 { + return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime + } + + return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime +} + +func GetAddressedSectorsMax(nwVer network.Version) (int, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { + {{range .versions}} + case actorstypes.Version{{.}}: + return miner{{.}}.AddressedSectorsMax, nil + {{end}} + default: + return 0, fmt.Errorf("unsupported network version") + } +} + +func GetDeclarationsMax(nwVer network.Version) (int, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return 0, err + } + switch v { + {{range .versions}} + case actorstypes.Version{{.}}: + {{if (eq . 0)}} + // TODO: Should we instead error here since the concept doesn't exist yet? + return miner{{.}}.AddressedPartitionsMax, nil + {{else}} + return miner{{.}}.DeclarationsMax, nil + {{end}} + {{end}} + default: + return 0, fmt.Errorf("unsupported network version") + } +} + +func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), err + } + switch v { + {{range .versions}} + case actorstypes.Version{{.}}: + {{if (ge . 6)}} + return miner{{.}}.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + {{else if (eq . 5)}} + return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee), nil + {{else}} + return big.Zero(), nil + {{end}} + {{end}} + default: + return big.Zero(), fmt.Errorf("unsupported network version") + } +} + +func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) { + v, err := actorstypes.VersionForNetwork(nwVer) + if err != nil { + return big.Zero(), err + } + switch v { + {{range .versions}} + case actorstypes.Version{{.}}: + {{if (ge . 6)}} + return miner{{.}}.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + {{else}} + return big.Zero(), nil + {{end}} + {{end}} + default: + return big.Zero(), fmt.Errorf("unsupported network version") + } +} diff --git a/venus-shared/actors/version.go b/venus-shared/actors/version.go new file mode 100644 index 0000000000..414c962102 --- /dev/null +++ b/venus-shared/actors/version.go @@ -0,0 +1,35 @@ +// FETCHED FROM LOTUS: version.go + +package actors + +type Version int + +/* inline-gen template + +var LatestVersion = {{.latestActorsVersion}} + +var Versions = []int{ {{range .actorVersions}} {{.}}, {{end}} } + +const ({{range .actorVersions}} + Version{{.}} Version = {{.}}{{end}} +) + +/* inline-gen start */ + +var LatestVersion = 9 + +var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9} + +const ( + Version0 Version = 0 + Version2 Version = 2 + Version3 Version = 3 + Version4 Version = 4 + Version5 Version = 5 + Version6 Version = 6 + Version7 Version = 7 + Version8 Version = 8 + Version9 Version = 9 +) + +/* inline-gen end */ diff --git a/venus-shared/api/api_common.go b/venus-shared/api/api_common.go new file mode 100644 index 0000000000..6f57f86bd8 --- /dev/null +++ b/venus-shared/api/api_common.go @@ -0,0 +1,12 @@ +package api + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type Version interface { + // Version provides information about API provider + Version(ctx context.Context) (types.Version, error) //perm:read +} diff --git a/venus-shared/api/api_info.go b/venus-shared/api/api_info.go new file mode 100644 index 0000000000..7fef41d5ef --- /dev/null +++ b/venus-shared/api/api_info.go @@ -0,0 +1,138 @@ +package api + +import ( + "fmt" + "net/http" + "net/url" + "regexp" + "strings" + + multiaddr "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" +) + +var infoWithToken = regexp.MustCompile("^[a-zA-Z0-9\\-_]+?\\.[a-zA-Z0-9\\-_]+?\\.([a-zA-Z0-9\\-_]+)?:.+$") // nolint:gosimple + +func VerString(ver uint32) string { + return fmt.Sprintf("v%d", ver) +} + +type APIInfo struct { // nolint + Addr string + Token []byte +} + +func NewAPIInfo(addr, token string) APIInfo { + return APIInfo{ + Addr: addr, + Token: []byte(token), + } +} + +func ParseApiInfo(s string) APIInfo { + var tok []byte + if infoWithToken.Match([]byte(s)) { + sp := strings.SplitN(s, ":", 2) + tok = []byte(sp[0]) + s = sp[1] + } + + return APIInfo{ + Addr: s, + Token: tok, + } +} + +// DialArgs parser libp2p address to http/ws protocol, the version argument can be override by address in version +func (a APIInfo) DialArgs(version string) (string, error) { + return DialArgs(a.Addr, version) +} + +func (a APIInfo) Host() (string, error) { + ma, err := multiaddr.NewMultiaddr(a.Addr) + if err == nil { + _, addr, err := manet.DialArgs(ma) + if err != nil { + return "", err + } + + return addr, nil + } + + spec, err := url.Parse(a.Addr) + if err != nil { + return "", err + } + return spec.Host, nil +} + +func (a APIInfo) AuthHeader() http.Header { + if len(a.Token) != 0 { + headers := http.Header{} + a.SetAuthHeader(headers) + return headers + } + + return nil +} + +func (a APIInfo) SetAuthHeader(h http.Header) { + if len(a.Token) != 0 { + h.Add(AuthorizationHeader, "Bearer "+string(a.Token)) + } +} + +func DialArgs(addr, version string) (string, error) { + ma, err := multiaddr.NewMultiaddr(addr) + if err == nil { + _, addr, err := manet.DialArgs(ma) + if err != nil { + return "", fmt.Errorf("parser libp2p url fail %w", err) + } + + // override version + val, err := ma.ValueForProtocol(ProtoVersion) + if err == nil { + version = val + } else if err != multiaddr.ErrProtocolNotFound { + return "", err + } + + _, err = ma.ValueForProtocol(multiaddr.P_WSS) + if err == nil { + return "wss://" + addr + "/rpc/" + version, nil + } else if err != multiaddr.ErrProtocolNotFound { + return "", err + } + + _, err = ma.ValueForProtocol(multiaddr.P_HTTPS) + if err == nil { + return "https://" + addr + "/rpc/" + version, nil + } else if err != multiaddr.ErrProtocolNotFound { + return "", err + } + + _, err = ma.ValueForProtocol(multiaddr.P_WS) + if err == nil { + return "ws://" + addr + "/rpc/" + version, nil + } else if err != multiaddr.ErrProtocolNotFound { + return "", err + } + + _, err = ma.ValueForProtocol(multiaddr.P_HTTP) + if err == nil { + return "http://" + addr + "/rpc/" + version, nil + } else if err != multiaddr.ErrProtocolNotFound { + return "", err + } + + return "ws://" + addr + "/rpc/" + version, nil + } + + _, err = url.Parse(addr) + if err != nil { + return "", fmt.Errorf("parser address fail %w", err) + } + + return strings.TrimRight(addr, "/") + "/rpc/" + version, nil +} diff --git a/venus-shared/api/api_info_protocol.go b/venus-shared/api/api_info_protocol.go new file mode 100644 index 0000000000..d92bbbe408 --- /dev/null +++ b/venus-shared/api/api_info_protocol.go @@ -0,0 +1,49 @@ +package api + +import ( + "fmt" + "strconv" + "strings" + + "github.com/multiformats/go-multiaddr" +) + +const ProtoVersion = multiaddr.P_WSS + 1 + +func init() { + err := multiaddr.AddProtocol(multiaddr.Protocol{ + Name: "version", + Code: ProtoVersion, + VCode: multiaddr.CodeToVarint(ProtoVersion), + Size: multiaddr.LengthPrefixedVarSize, + Transcoder: multiaddr.NewTranscoderFromFunctions(func(s string) ([]byte, error) { + if !strings.HasPrefix(s, "v") { + return nil, fmt.Errorf("version must start with version prefix v") + } + if len(s) < 2 { + return nil, fmt.Errorf("must give a specify version such as v0") + } + _, err := strconv.Atoi(s[1:]) + if err != nil { + return nil, fmt.Errorf("version part must be number") + } + return []byte(s), nil + }, func(bytes []byte) (string, error) { + vStr := string(bytes) + if !strings.HasPrefix(vStr, "v") { + return "", fmt.Errorf("version must start with version prefix v") + } + if len(vStr) < 2 { + return "", fmt.Errorf("must give a specify version such as v0") + } + _, err := strconv.Atoi(vStr[1:]) + if err != nil { + return "", fmt.Errorf("version part must be number") + } + return vStr, nil + }, nil), + }) + if err != nil { + panic(fmt.Errorf("add `version` protocol into multiaddr: %w", err)) + } +} diff --git a/venus-shared/api/api_info_test.go b/venus-shared/api/api_info_test.go new file mode 100644 index 0000000000..42e6454405 --- /dev/null +++ b/venus-shared/api/api_info_test.go @@ -0,0 +1,90 @@ +package api + +import ( + "fmt" + "testing" +) + +func TestAPIInfo_DialArgs(t *testing.T) { + tests := []struct { + name string + addr string + want string + wantErr bool + }{ + { + "common", + "http://192.168.5.61:3453", + "http://192.168.5.61:3453/rpc/v0", + false, + }, + { + "wss", + "/ip4/192.168.5.61/tcp/3453/wss", + "wss://192.168.5.61:3453/rpc/v0", + false, + }, + { + "ws", + "/ip4/192.168.5.61/tcp/3453/ws", + "ws://192.168.5.61:3453/rpc/v0", + false, + }, + { + "http", + "/ip4/192.168.5.61/tcp/34531/http", + "http://192.168.5.61:34531/rpc/v0", + false, + }, + { + "https", + "/ip4/192.168.5.61/tcp/34531/https", + "https://192.168.5.61:34531/rpc/v0", + false, + }, + { + "default to ws ", + "/ip4/192.168.5.61/tcp/34532", + "ws://192.168.5.61:34532/rpc/v0", + false, + }, + + { + "version", + "/ip4/192.168.5.61/tcp/34532/version/v1", + "ws://192.168.5.61:34532/rpc/v1", + false, + }, + { + "version", + "/ip4/192.168.5.61/tcp/34532/version/v0", + "ws://192.168.5.61:34532/rpc/v0", + false, + }, + { + "error version", + "/ip4/192.168.5.61/tcp/34532/version/1v", + "/ip4/192.168.5.61/tcp/34532/version/1v/rpc/v0", + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.name == "error version" { + fmt.Println() + } + a := APIInfo{ + Addr: tt.addr, + } + + got, err := a.DialArgs("v0") + if (err != nil) != tt.wantErr { + t.Errorf("DialArgs() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("DialArgs() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/venus-shared/api/chain/v0/blockstore.go b/venus-shared/api/chain/v0/blockstore.go new file mode 100644 index 0000000000..2590ba52ad --- /dev/null +++ b/venus-shared/api/chain/v0/blockstore.go @@ -0,0 +1,18 @@ +package v0 + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type IBlockStore interface { + ChainReadObj(ctx context.Context, cid cid.Cid) ([]byte, error) //perm:read + ChainDeleteObj(ctx context.Context, obj cid.Cid) error //perm:admin + ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) //perm:read + ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (types.ObjStat, error) //perm:read + // ChainPutObj puts a given object into the block store + ChainPutObj(context.Context, blocks.Block) error //perm:admin +} diff --git a/venus-shared/api/chain/v0/chain.go b/venus-shared/api/chain/v0/chain.go new file mode 100644 index 0000000000..5aea862822 --- /dev/null +++ b/venus-shared/api/chain/v0/chain.go @@ -0,0 +1,130 @@ +package v0 + +import ( + "context" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + + lminer "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IChain interface { + IAccount + IActor + IBeacon + IMinerState + IChainInfo +} + +type IAccount interface { + StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) //perm:read +} + +type IActor interface { + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read + ListActor(ctx context.Context) (map[address.Address]*types.Actor, error) //perm:read +} + +type IBeacon interface { + BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read +} + +type IChainInfo interface { + BlockTime(ctx context.Context) time.Duration //perm:read + ChainList(ctx context.Context, tsKey types.TipSetKey, count int) ([]types.TipSetKey, error) //perm:read + ChainHead(ctx context.Context) (*types.TipSet, error) //perm:read + ChainSetHead(ctx context.Context, key types.TipSetKey) error //perm:admin + ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) //perm:read + ChainGetTipSetByHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) //perm:read + ChainGetRandomnessFromBeacon(ctx context.Context, key types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read + ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read + ChainGetBlock(ctx context.Context, id cid.Cid) (*types.BlockHeader, error) //perm:read + ChainGetMessage(ctx context.Context, msgID cid.Cid) (*types.Message, error) //perm:read + ChainGetBlockMessages(ctx context.Context, bid cid.Cid) (*types.BlockMessages, error) //perm:read + ChainGetMessagesInTipset(ctx context.Context, key types.TipSetKey) ([]types.MessageCID, error) //perm:read + ChainGetReceipts(ctx context.Context, id cid.Cid) ([]types.MessageReceipt, error) //perm:read + ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([]types.MessageCID, error) //perm:read + ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) //perm:read + StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read + StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read + ChainNotify(ctx context.Context) (<-chan []*types.HeadChange, error) //perm:read + GetFullBlock(ctx context.Context, id cid.Cid) (*types.FullBlock, error) //perm:read + GetActor(ctx context.Context, addr address.Address) (*types.Actor, error) //perm:read + GetParentStateRootActor(ctx context.Context, ts *types.TipSet, addr address.Address) (*types.Actor, error) //perm:read + GetEntry(ctx context.Context, height abi.ChainEpoch, round uint64) (*types.BeaconEntry, error) //perm:read + MessageWait(ctx context.Context, msgCid cid.Cid, confidence, lookback abi.ChainEpoch) (*types.ChainMessage, error) //perm:read + ProtocolParameters(ctx context.Context) (*types.ProtocolParams, error) //perm:read + ResolveToKeyAddr(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) //perm:read + StateNetworkName(ctx context.Context) (types.NetworkName, error) //perm:read + StateGetReceipt(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) //perm:read + StateSearchMsg(ctx context.Context, msg cid.Cid) (*types.MsgLookup, error) //perm:read + StateSearchMsgLimited(ctx context.Context, cid cid.Cid, limit abi.ChainEpoch) (*types.MsgLookup, error) //perm:read + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*types.MsgLookup, error) //perm:read + StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*types.MsgLookup, error) //perm:read + StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) //perm:read + VerifyEntry(parent, child *types.BeaconEntry, height abi.ChainEpoch) bool //perm:read + ChainExport(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) //perm:read + ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*types.HeadChange, error) //perm:read + // StateGetNetworkParams return current network params + StateGetNetworkParams(ctx context.Context) (*types.NetworkParams, error) //perm:read + // StateActorCodeCIDs returns the CIDs of all the builtin actors for the given network version + StateActorCodeCIDs(context.Context, network.Version) (map[string]cid.Cid, error) //perm:read + // ChainGetGenesis returns the genesis tipset. + ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read + // StateActorManifestCID returns the CID of the builtin actors manifest for the given network version + StateActorManifestCID(context.Context, network.Version) (cid.Cid, error) //perm:read + StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*types.InvocResult, error) //perm:read +} + +type IMinerState interface { + StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) //perm:read + StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (types.SectorPreCommitOnChainInfo, error) //perm:read + StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorOnChainInfo, error) //perm:read + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) //perm:read + StateMinerSectorSize(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (abi.SectorSize, error) //perm:read + StateMinerInfo(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.MinerInfo, error) //perm:read + StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (address.Address, error) //perm:read + StateMinerRecoveries(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) //perm:read + StateMinerFaults(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) //perm:read + StateMinerProvingDeadline(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (*dline.Info, error) //perm:read + StateMinerPartitions(ctx context.Context, maddr address.Address, dlIdx uint64, tsk types.TipSetKey) ([]types.Partition, error) //perm:read + StateMinerDeadlines(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]types.Deadline, error) //perm:read + StateMinerSectors(ctx context.Context, maddr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) //perm:read + StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.MarketDeal, error) //perm:read + // StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if + // pending allocation is not found. + StateGetAllocationForPendingDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.Allocation, error) //perm:read + // StateGetAllocation returns the allocation for a given address and allocation ID. + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationID types.AllocationId, tsk types.TipSetKey) (*types.Allocation, error) //perm:read + // StateGetAllocations returns the all the allocations for a given client. + StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[types.AllocationId]types.Allocation, error) //perm:read + // StateGetClaim returns the claim for a given address and claim ID. + StateGetClaim(ctx context.Context, providerAddr address.Address, claimID types.ClaimId, tsk types.TipSetKey) (*types.Claim, error) //perm:read + // StateGetClaims returns the all the claims for a given provider. + StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[types.ClaimId]types.Claim, error) //perm:read + StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) //perm:read + StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) //perm:read + StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (types.CirculatingSupply, error) //perm:read + StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) //perm:read + StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]*types.MarketDeal, error) //perm:read + StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) //perm:read + StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) //perm:read + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) //perm:read + StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) //perm:read + StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.MinerPower, error) //perm:read + StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (big.Int, error) //perm:read + StateSectorExpiration(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorExpiration, error) //perm:read + StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MinerSectors, error) //perm:read + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MarketBalance, error) //perm:read + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (types.DealCollateralBounds, error) //perm:read + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read +} diff --git a/venus-shared/api/chain/v0/client_gen.go b/venus-shared/api/chain/v0/client_gen.go new file mode 100644 index 0000000000..f70af75488 --- /dev/null +++ b/venus-shared/api/chain/v0/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package v0 + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 0 +const APINamespace = "v0.FullNode" +const MethodNamespace = "Filecoin" + +// NewFullNodeRPC creates a new httpparse jsonrpc remotecli. +func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (FullNode, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res FullNodeStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialFullNodeRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialFullNodeRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (FullNode, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res FullNodeStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/chain/v0/common.go b/venus-shared/api/chain/v0/common.go new file mode 100644 index 0000000000..76f49f70ec --- /dev/null +++ b/venus-shared/api/chain/v0/common.go @@ -0,0 +1,14 @@ +package v0 + +import ( + "context" + "time" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +type ICommon interface { + api.Version + // StartTime returns node start time + StartTime(context.Context) (time.Time, error) //perm:read +} diff --git a/venus-shared/api/chain/v0/fullnode.go b/venus-shared/api/chain/v0/fullnode.go new file mode 100644 index 0000000000..bc6e1867fb --- /dev/null +++ b/venus-shared/api/chain/v0/fullnode.go @@ -0,0 +1,15 @@ +package v0 + +type FullNode interface { + IBlockStore + IChain + IMarket + IMining + IMessagePool + IMultiSig + INetwork + IPaychan + ISyncer + IWallet + ICommon +} diff --git a/venus-shared/api/chain/v0/market.go b/venus-shared/api/chain/v0/market.go new file mode 100644 index 0000000000..cc83965a0f --- /dev/null +++ b/venus-shared/api/chain/v0/market.go @@ -0,0 +1,11 @@ +package v0 + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMarket interface { + StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]types.MarketBalance, error) //perm:read +} diff --git a/venus-shared/api/chain/v0/method.md b/venus-shared/api/chain/v0/method.md new file mode 100644 index 0000000000..6a7410562c --- /dev/null +++ b/venus-shared/api/chain/v0/method.md @@ -0,0 +1,5863 @@ +# Groups + +* [Account](#account) + * [StateAccountKey](#stateaccountkey) +* [Actor](#actor) + * [ListActor](#listactor) + * [StateGetActor](#stategetactor) +* [Beacon](#beacon) + * [BeaconGetEntry](#beacongetentry) +* [BlockStore](#blockstore) + * [ChainDeleteObj](#chaindeleteobj) + * [ChainHasObj](#chainhasobj) + * [ChainPutObj](#chainputobj) + * [ChainReadObj](#chainreadobj) + * [ChainStatObj](#chainstatobj) +* [ChainInfo](#chaininfo) + * [BlockTime](#blocktime) + * [ChainExport](#chainexport) + * [ChainGetBlock](#chaingetblock) + * [ChainGetBlockMessages](#chaingetblockmessages) + * [ChainGetGenesis](#chaingetgenesis) + * [ChainGetMessage](#chaingetmessage) + * [ChainGetMessagesInTipset](#chaingetmessagesintipset) + * [ChainGetParentMessages](#chaingetparentmessages) + * [ChainGetParentReceipts](#chaingetparentreceipts) + * [ChainGetPath](#chaingetpath) + * [ChainGetRandomnessFromBeacon](#chaingetrandomnessfrombeacon) + * [ChainGetRandomnessFromTickets](#chaingetrandomnessfromtickets) + * [ChainGetReceipts](#chaingetreceipts) + * [ChainGetTipSet](#chaingettipset) + * [ChainGetTipSetByHeight](#chaingettipsetbyheight) + * [ChainHead](#chainhead) + * [ChainList](#chainlist) + * [ChainNotify](#chainnotify) + * [ChainSetHead](#chainsethead) + * [GetActor](#getactor) + * [GetEntry](#getentry) + * [GetFullBlock](#getfullblock) + * [GetParentStateRootActor](#getparentstaterootactor) + * [MessageWait](#messagewait) + * [ProtocolParameters](#protocolparameters) + * [ResolveToKeyAddr](#resolvetokeyaddr) + * [StateActorCodeCIDs](#stateactorcodecids) + * [StateActorManifestCID](#stateactormanifestcid) + * [StateCall](#statecall) + * [StateGetNetworkParams](#stategetnetworkparams) + * [StateGetReceipt](#stategetreceipt) + * [StateNetworkName](#statenetworkname) + * [StateNetworkVersion](#statenetworkversion) + * [StateSearchMsg](#statesearchmsg) + * [StateSearchMsgLimited](#statesearchmsglimited) + * [StateVerifiedRegistryRootKey](#stateverifiedregistryrootkey) + * [StateVerifierStatus](#stateverifierstatus) + * [StateWaitMsg](#statewaitmsg) + * [StateWaitMsgLimited](#statewaitmsglimited) + * [VerifyEntry](#verifyentry) +* [Common](#common) + * [StartTime](#starttime) + * [Version](#version) +* [Market](#market) + * [StateMarketParticipants](#statemarketparticipants) +* [MessagePool](#messagepool) + * [GasBatchEstimateMessageGas](#gasbatchestimatemessagegas) + * [GasEstimateFeeCap](#gasestimatefeecap) + * [GasEstimateGasLimit](#gasestimategaslimit) + * [GasEstimateGasPremium](#gasestimategaspremium) + * [GasEstimateMessageGas](#gasestimatemessagegas) + * [MpoolBatchPush](#mpoolbatchpush) + * [MpoolBatchPushMessage](#mpoolbatchpushmessage) + * [MpoolBatchPushUntrusted](#mpoolbatchpushuntrusted) + * [MpoolClear](#mpoolclear) + * [MpoolDeleteByAdress](#mpooldeletebyadress) + * [MpoolGetConfig](#mpoolgetconfig) + * [MpoolGetNonce](#mpoolgetnonce) + * [MpoolPending](#mpoolpending) + * [MpoolPublishByAddr](#mpoolpublishbyaddr) + * [MpoolPublishMessage](#mpoolpublishmessage) + * [MpoolPush](#mpoolpush) + * [MpoolPushMessage](#mpoolpushmessage) + * [MpoolPushUntrusted](#mpoolpushuntrusted) + * [MpoolSelect](#mpoolselect) + * [MpoolSelects](#mpoolselects) + * [MpoolSetConfig](#mpoolsetconfig) + * [MpoolSub](#mpoolsub) +* [MinerState](#minerstate) + * [StateCirculatingSupply](#statecirculatingsupply) + * [StateDealProviderCollateralBounds](#statedealprovidercollateralbounds) + * [StateGetAllocation](#stategetallocation) + * [StateGetAllocationForPendingDeal](#stategetallocationforpendingdeal) + * [StateGetAllocations](#stategetallocations) + * [StateGetClaim](#stategetclaim) + * [StateGetClaims](#stategetclaims) + * [StateListActors](#statelistactors) + * [StateListMiners](#statelistminers) + * [StateLookupID](#statelookupid) + * [StateMarketBalance](#statemarketbalance) + * [StateMarketDeals](#statemarketdeals) + * [StateMarketStorageDeal](#statemarketstoragedeal) + * [StateMinerActiveSectors](#statemineractivesectors) + * [StateMinerAvailableBalance](#statemineravailablebalance) + * [StateMinerDeadlines](#stateminerdeadlines) + * [StateMinerFaults](#stateminerfaults) + * [StateMinerInfo](#stateminerinfo) + * [StateMinerInitialPledgeCollateral](#stateminerinitialpledgecollateral) + * [StateMinerPartitions](#stateminerpartitions) + * [StateMinerPower](#stateminerpower) + * [StateMinerPreCommitDepositForPower](#stateminerprecommitdepositforpower) + * [StateMinerProvingDeadline](#stateminerprovingdeadline) + * [StateMinerRecoveries](#stateminerrecoveries) + * [StateMinerSectorAllocated](#stateminersectorallocated) + * [StateMinerSectorCount](#stateminersectorcount) + * [StateMinerSectorSize](#stateminersectorsize) + * [StateMinerSectors](#stateminersectors) + * [StateMinerWorkerAddress](#stateminerworkeraddress) + * [StateSectorExpiration](#statesectorexpiration) + * [StateSectorGetInfo](#statesectorgetinfo) + * [StateSectorPartition](#statesectorpartition) + * [StateSectorPreCommitInfo](#statesectorprecommitinfo) + * [StateVMCirculatingSupplyInternal](#statevmcirculatingsupplyinternal) + * [StateVerifiedClientStatus](#stateverifiedclientstatus) +* [Mining](#mining) + * [MinerCreateBlock](#minercreateblock) + * [MinerGetBaseInfo](#minergetbaseinfo) +* [MultiSig](#multisig) + * [MsigAddApprove](#msigaddapprove) + * [MsigAddCancel](#msigaddcancel) + * [MsigAddPropose](#msigaddpropose) + * [MsigApprove](#msigapprove) + * [MsigApproveTxnHash](#msigapprovetxnhash) + * [MsigCancel](#msigcancel) + * [MsigCancelTxnHash](#msigcanceltxnhash) + * [MsigCreate](#msigcreate) + * [MsigGetVested](#msiggetvested) + * [MsigPropose](#msigpropose) + * [MsigRemoveSigner](#msigremovesigner) + * [MsigSwapApprove](#msigswapapprove) + * [MsigSwapCancel](#msigswapcancel) + * [MsigSwapPropose](#msigswappropose) +* [Network](#network) + * [ID](#id) + * [NetAddrsListen](#netaddrslisten) + * [NetAgentVersion](#netagentversion) + * [NetAutoNatStatus](#netautonatstatus) + * [NetBandwidthStats](#netbandwidthstats) + * [NetBandwidthStatsByPeer](#netbandwidthstatsbypeer) + * [NetBandwidthStatsByProtocol](#netbandwidthstatsbyprotocol) + * [NetConnect](#netconnect) + * [NetConnectedness](#netconnectedness) + * [NetDisconnect](#netdisconnect) + * [NetFindPeer](#netfindpeer) + * [NetFindProvidersAsync](#netfindprovidersasync) + * [NetGetClosestPeers](#netgetclosestpeers) + * [NetPeerInfo](#netpeerinfo) + * [NetPeers](#netpeers) + * [NetPing](#netping) + * [NetProtectAdd](#netprotectadd) + * [NetProtectList](#netprotectlist) + * [NetProtectRemove](#netprotectremove) + * [NetPubsubScores](#netpubsubscores) +* [Paychan](#paychan) + * [PaychAllocateLane](#paychallocatelane) + * [PaychAvailableFunds](#paychavailablefunds) + * [PaychAvailableFundsByFromTo](#paychavailablefundsbyfromto) + * [PaychCollect](#paychcollect) + * [PaychGet](#paychget) + * [PaychGetWaitReady](#paychgetwaitready) + * [PaychList](#paychlist) + * [PaychNewPayment](#paychnewpayment) + * [PaychSettle](#paychsettle) + * [PaychStatus](#paychstatus) + * [PaychVoucherAdd](#paychvoucheradd) + * [PaychVoucherCheckSpendable](#paychvouchercheckspendable) + * [PaychVoucherCheckValid](#paychvouchercheckvalid) + * [PaychVoucherCreate](#paychvouchercreate) + * [PaychVoucherList](#paychvoucherlist) + * [PaychVoucherSubmit](#paychvouchersubmit) +* [Syncer](#syncer) + * [ChainSyncHandleNewTipSet](#chainsynchandlenewtipset) + * [ChainTipSetWeight](#chaintipsetweight) + * [Concurrent](#concurrent) + * [SetConcurrent](#setconcurrent) + * [SyncState](#syncstate) + * [SyncSubmitBlock](#syncsubmitblock) + * [SyncerTracker](#syncertracker) +* [Wallet](#wallet) + * [HasPassword](#haspassword) + * [LockWallet](#lockwallet) + * [SetPassword](#setpassword) + * [UnLockWallet](#unlockwallet) + * [WalletAddresses](#walletaddresses) + * [WalletBalance](#walletbalance) + * [WalletDefaultAddress](#walletdefaultaddress) + * [WalletDelete](#walletdelete) + * [WalletExport](#walletexport) + * [WalletHas](#wallethas) + * [WalletImport](#walletimport) + * [WalletNewAddress](#walletnewaddress) + * [WalletSetDefault](#walletsetdefault) + * [WalletSign](#walletsign) + * [WalletSignMessage](#walletsignmessage) + * [WalletState](#walletstate) + +## Account + +### StateAccountKey + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +## Actor + +### ListActor + + +Perms: read + +Inputs: `[]` + +Response: `{}` + +### StateGetActor + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +## Beacon + +### BeaconGetEntry + + +Perms: read + +Inputs: +```json +[ + 10101 +] +``` + +Response: +```json +{ + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +## BlockStore + +### ChainDeleteObj + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + +### ChainHasObj + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `true` + +### ChainPutObj +ChainPutObj puts a given object into the block store + + +Perms: admin + +Inputs: +```json +[ + {} +] +``` + +Response: `{}` + +### ChainReadObj + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainStatObj + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Size": 42, + "Links": 42 +} +``` + +## ChainInfo + +### BlockTime + + +Perms: read + +Inputs: `[]` + +Response: `60000000000` + +### ChainExport + + +Perms: read + +Inputs: +```json +[ + 10101, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainGetBlock + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" +} +``` + +### ChainGetBlockMessages + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "BlsMessages": [ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + ], + "SecpkMessages": [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Cids": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] +} +``` + +### ChainGetGenesis +ChainGetGenesis returns the genesis tipset. + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetMessage + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +### ChainGetMessagesInTipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +### ChainGetParentMessages + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +### ChainGetParentReceipts + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` + +### ChainGetPath + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Type": "apply", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` + +### ChainGetRandomnessFromBeacon + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 2, + 10101, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `"Bw=="` + +### ChainGetRandomnessFromTickets + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 2, + 10101, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `"Bw=="` + +### ChainGetReceipts + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` + +### ChainGetTipSet + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetTipSetByHeight + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainHead + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainList + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 123 +] +``` + +Response: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +### ChainNotify + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "Type": "apply", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` + +### ChainSetHead + + +Perms: admin + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### GetActor + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +### GetEntry + + +Perms: read + +Inputs: +```json +[ + 10101, + 42 +] +``` + +Response: +```json +{ + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +### GetFullBlock + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BLSMessages": [ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + ], + "SECPMessages": [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +} +``` + +### GetParentStateRootActor + + +Perms: read + +Inputs: +```json +[ + { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "f01234" +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +### MessageWait + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 10101, + 10101 +] +``` + +Response: +```json +{ + "TS": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Block": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +} +``` + +### ProtocolParameters + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Network": "string value", + "BlockTime": 60000000000, + "SupportedSectors": [ + { + "Size": 34359738368, + "MaxPieceSize": 1024 + } + ] +} +``` + +### ResolveToKeyAddr + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "Cids": null, + "Blocks": null, + "Height": 0 + } +] +``` + +Response: `"f01234"` + +### StateActorCodeCIDs +StateActorCodeCIDs returns the CIDs of all the builtin actors for the given network version + + +Perms: read + +Inputs: +```json +[ + 17 +] +``` + +Response: `{}` + +### StateActorManifestCID +StateActorManifestCID returns the CID of the builtin actors manifest for the given network version + + +Perms: read + +Inputs: +```json +[ + 17 +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### StateCall + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] + }, + "Error": "string value", + "Duration": 60000000000 +} +``` + +### StateGetNetworkParams +StateGetNetworkParams return current network params + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "NetworkName": "mainnet", + "BlockDelaySecs": 42, + "ConsensusMinerMinPower": "0", + "SupportedProofTypes": [ + 8 + ], + "PreCommitChallengeDelay": 10101, + "ForkUpgradeParams": { + "UpgradeSmokeHeight": 10101, + "UpgradeBreezeHeight": 10101, + "UpgradeIgnitionHeight": 10101, + "UpgradeLiftoffHeight": 10101, + "UpgradeAssemblyHeight": 10101, + "UpgradeRefuelHeight": 10101, + "UpgradeTapeHeight": 10101, + "UpgradeKumquatHeight": 10101, + "BreezeGasTampingDuration": 10101, + "UpgradeCalicoHeight": 10101, + "UpgradePersianHeight": 10101, + "UpgradeOrangeHeight": 10101, + "UpgradeClausHeight": 10101, + "UpgradeTrustHeight": 10101, + "UpgradeNorwegianHeight": 10101, + "UpgradeTurboHeight": 10101, + "UpgradeHyperdriveHeight": 10101, + "UpgradeChocolateHeight": 10101, + "UpgradeOhSnapHeight": 10101, + "UpgradeSkyrHeight": 10101, + "UpgradeSharkHeight": 10101 + } +} +``` + +### StateGetReceipt + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 +} +``` + +### StateNetworkName + + +Perms: read + +Inputs: `[]` + +Response: `"mainnet"` + +### StateNetworkVersion + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `17` + +### StateSearchMsg + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### StateSearchMsgLimited + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 10101 +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### StateVerifiedRegistryRootKey + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateVerifierStatus + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateWaitMsg + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 42 +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### StateWaitMsgLimited + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 42, + 10101 +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### VerifyEntry + + +Perms: read + +Inputs: +```json +[ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + 10101 +] +``` + +Response: `true` + +## Common + +### StartTime +StartTime returns node start time + + +Perms: read + +Inputs: `[]` + +Response: `"0001-01-01T00:00:00Z"` + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + +## Market + +### StateMarketParticipants + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Escrow": "0", + "Locked": "0" + } +} +``` + +## MessagePool + +### GasBatchEstimateMessageGas + + +Perms: read + +Inputs: +```json +[ + [ + { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Spec": { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } + } + ], + 42, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Err": "string value" + } +] +``` + +### GasEstimateFeeCap + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateGasLimit + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `9` + +### GasEstimateGasPremium + + +Perms: read + +Inputs: +```json +[ + 42, + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateMessageGas + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +### MpoolBatchPush + + +Perms: write + +Inputs: +```json +[ + [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +] +``` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### MpoolBatchPushMessage + + +Perms: sign + +Inputs: +```json +[ + [ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + ], + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } +] +``` + +Response: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +### MpoolBatchPushUntrusted + + +Perms: write + +Inputs: +```json +[ + [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +] +``` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### MpoolClear + + +Perms: write + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### MpoolDeleteByAdress + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### MpoolGetConfig + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "PriorityAddrs": [ + "f01234" + ], + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 +} +``` + +### MpoolGetNonce + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### MpoolPending + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +### MpoolPublishByAddr + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### MpoolPublishMessage + + +Perms: admin + +Inputs: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: `{}` + +### MpoolPush + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MpoolPushMessage + + +Perms: sign + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +### MpoolPushUntrusted + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MpoolSelect + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 12.3 +] +``` + +Response: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +### MpoolSelects + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + 12.3 + ] +] +``` + +Response: +```json +[ + [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +] +``` + +### MpoolSetConfig + + +Perms: admin + +Inputs: +```json +[ + { + "PriorityAddrs": [ + "f01234" + ], + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 + } +] +``` + +Response: `{}` + +### MpoolSub + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Type": 0, + "Message": { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +} +``` + +## MinerState + +### StateCirculatingSupply + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateDealProviderCollateralBounds + + +Perms: read + +Inputs: +```json +[ + 1032, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Min": "0", + "Max": "0" +} +``` + +### StateGetAllocation +StateGetAllocation returns the allocation for a given address and allocation ID. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 0, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Client": 1000, + "Provider": 1000, + "Data": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1032, + "TermMin": 10101, + "TermMax": 10101, + "Expiration": 10101 +} +``` + +### StateGetAllocationForPendingDeal +StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if +pending allocation is not found. + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Client": 1000, + "Provider": 1000, + "Data": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1032, + "TermMin": 10101, + "TermMax": 10101, + "Expiration": 10101 +} +``` + +### StateGetAllocations +StateGetAllocations returns the all the allocations for a given client. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateGetClaim +StateGetClaim returns the claim for a given address and claim ID. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 0, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Provider": 1000, + "Client": 1000, + "Data": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1032, + "TermMin": 10101, + "TermMax": 10101, + "TermStart": 10101, + "Sector": 9 +} +``` + +### StateGetClaims +StateGetClaims returns the all the claims for a given provider. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateListActors + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + "f01234" +] +``` + +### StateListMiners + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + "f01234" +] +``` + +### StateLookupID + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateMarketBalance + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Escrow": "0", + "Locked": "0" +} +``` + +### StateMarketDeals + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101, + "VerifiedClaim": 0 + } + } +} +``` + +### StateMarketStorageDeal + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101, + "VerifiedClaim": 0 + } +} +``` + +### StateMinerActiveSectors + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "ReplacedSectorAge": 10101, + "ReplacedDayReward": "0", + "SectorKeyCID": null, + "SimpleQAPower": true + } +] +``` + +### StateMinerAvailableBalance + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerDeadlines + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "PostSubmissions": [ + 5, + 1 + ], + "DisputableProofCount": 42 + } +] +``` + +### StateMinerFaults + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerInfo + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Owner": "f01234", + "Worker": "f01234", + "NewWorker": "f01234", + "ControlAddresses": [ + "f01234" + ], + "WorkerChangeEpoch": 10101, + "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Multiaddrs": [ + "Ynl0ZSBhcnJheQ==" + ], + "WindowPoStProofType": 8, + "SectorSize": 34359738368, + "WindowPoStPartitionSectors": 42, + "ConsensusFaultElapsed": 10101, + "Beneficiary": "f01234", + "BeneficiaryTerm": { + "Quota": "0", + "UsedQuota": "0", + "Expiration": 10101 + }, + "PendingBeneficiaryTerm": { + "NewBeneficiary": "f01234", + "NewQuota": "0", + "NewExpiration": 10101, + "ApprovedByBeneficiary": true, + "ApprovedByNominee": true + } +} +``` + +### StateMinerInitialPledgeCollateral + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": [ + 5432 + ], + "Expiration": 10101, + "UnsealedCid": null + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerPartitions + + +Perms: read + +Inputs: +```json +[ + "f01234", + 42, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "AllSectors": [ + 5, + 1 + ], + "FaultySectors": [ + 5, + 1 + ], + "RecoveringSectors": [ + 5, + 1 + ], + "LiveSectors": [ + 5, + 1 + ], + "ActiveSectors": [ + 5, + 1 + ] + } +] +``` + +### StateMinerPower + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + }, + "TotalPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + }, + "HasMinPower": true +} +``` + +### StateMinerPreCommitDepositForPower + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": [ + 5432 + ], + "Expiration": 10101, + "UnsealedCid": null + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerProvingDeadline + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "CurrentEpoch": 10101, + "PeriodStart": 10101, + "Index": 42, + "Open": 10101, + "Close": 10101, + "Challenge": 10101, + "FaultCutoff": 10101, + "WPoStPeriodDeadlines": 42, + "WPoStProvingPeriod": 10101, + "WPoStChallengeWindow": 10101, + "WPoStChallengeLookback": 10101, + "FaultDeclarationCutoff": 10101 +} +``` + +### StateMinerRecoveries + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerSectorAllocated + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `true` + +### StateMinerSectorCount + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Live": 42, + "Active": 42, + "Faulty": 42 +} +``` + +### StateMinerSectorSize + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `34359738368` + +### StateMinerSectors + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + 0 + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "ReplacedSectorAge": 10101, + "ReplacedDayReward": "0", + "SectorKeyCID": null, + "SimpleQAPower": true + } +] +``` + +### StateMinerWorkerAddress + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateSectorExpiration + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "OnTime": 10101, + "Early": 10101 +} +``` + +### StateSectorGetInfo + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "ReplacedSectorAge": 10101, + "ReplacedDayReward": "0", + "SectorKeyCID": null, + "SimpleQAPower": true +} +``` + +### StateSectorPartition + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Deadline": 42, + "Partition": 42 +} +``` + +### StateSectorPreCommitInfo + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Info": { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": [ + 5432 + ], + "Expiration": 10101, + "UnsealedCid": null + }, + "PreCommitDeposit": "0", + "PreCommitEpoch": 10101 +} +``` + +### StateVMCirculatingSupplyInternal + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "FilVested": "0", + "FilMined": "0", + "FilBurnt": "0", + "FilLocked": "0", + "FilCirculating": "0", + "FilReserveDisbursed": "0" +} +``` + +### StateVerifiedClientStatus + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +## Mining + +### MinerCreateBlock + + +Perms: write + +Inputs: +```json +[ + { + "Miner": "f01234", + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Ticket": { + "VRFProof": "Bw==" + }, + "Eproof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconValues": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "Messages": [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Epoch": 10101, + "Timestamp": 42, + "WinningPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ] + } +] +``` + +Response: +```json +{ + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] +} +``` + +### MinerGetBaseInfo + + +Perms: read + +Inputs: +```json +[ + "f01234", + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": "0", + "NetworkPower": "0", + "Sectors": [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], + "WorkerKey": "f01234", + "SectorSize": 34359738368, + "PrevBeaconEntry": { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "EligibleForMining": true +} +``` + +## MultiSig + +### MsigAddApprove + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigAddCancel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + true +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigAddPropose + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigApprove + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigApproveTxnHash + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234", + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigCancel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigCancelTxnHash +MsigCancel cancels a previously-proposed multisig message +It takes the following params: \, \, \, \, +\, \, \ + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigCreate +MsigCreate creates a multisig wallet +It takes the following params: \, \, \ +\, \, \ + + +Perms: sign + +Inputs: +```json +[ + 42, + [ + "f01234" + ], + 10101, + "0", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigGetVested + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### MsigPropose + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigRemoveSigner + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigSwapApprove + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigSwapCancel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigSwapPropose + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## Network + +### ID + + +Perms: read + +Inputs: `[]` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +### NetAddrsListen + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetAgentVersion + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `"string value"` + +### NetAutoNatStatus + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Reachability": 1, + "PublicAddr": "string value" +} +``` + +### NetBandwidthStats +NetBandwidthStats returns statistics about the nodes total bandwidth +usage and current rate across all peers and protocols. + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "TotalIn": 9, + "TotalOut": 9, + "RateIn": 12.3, + "RateOut": 12.3 +} +``` + +### NetBandwidthStatsByPeer +NetBandwidthStatsByPeer returns statistics about the nodes bandwidth +usage and current rate per peer + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBandwidthStatsByProtocol +NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth +usage and current rate per protocol + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "/fil/hello/1.0.0": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetConnect + + +Perms: admin + +Inputs: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +Response: `{}` + +### NetConnectedness + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `1` + +### NetDisconnect + + +Perms: admin + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `{}` + +### NetFindPeer + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetFindProvidersAsync + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 123 +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetGetClosestPeers + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +### NetPeerInfo + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Agent": "string value", + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], + "ConnMgrMeta": { + "FirstSeen": "0001-01-01T00:00:00Z", + "Value": 123, + "Tags": { + "name": 42 + }, + "Conns": { + "name": "2021-03-08T22:52:18Z" + } + } +} +``` + +### NetPeers + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +### NetPing + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `60000000000` + +### NetProtectAdd + + +Perms: admin + +Inputs: +```json +[ + [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ] +] +``` + +Response: `{}` + +### NetProtectList + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +### NetProtectRemove + + +Perms: admin + +Inputs: +```json +[ + [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ] +] +``` + +Response: `{}` + +### NetPubsubScores + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` + +## Paychan + +### PaychAllocateLane +PaychAllocateLane Allocate late creates a lane within a payment channel so that calls to +CreatePaymentVoucher will automatically make vouchers only for the difference in total + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### PaychAvailableFunds +PaychAvailableFunds get the status of an outbound payment channel +@pch: payment channel address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "f01234", + "To": "f01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "NonReservedAmt": "0", + "PendingAvailableAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychAvailableFundsByFromTo +PaychAvailableFundsByFromTo get the status of an outbound payment channel +@from: the payment channel sender +@to: he payment channel recipient + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "f01234", + "To": "f01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "NonReservedAmt": "0", + "PendingAvailableAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychCollect +PaychCollect update payment channel status to collect +Collect sends the value of submitted vouchers to the channel recipient (the provider), +and refunds the remaining channel balance to the channel creator (the client). +@pch: payment channel address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychGet +PaychGet creates a payment channel to a provider with a amount of FIL +@from: the payment channel sender +@to: the payment channel recipient +@amt: the deposits funds in the payment channel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "Channel": "f01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + +### PaychGetWaitReady +PaychGetWaitReady waits until the create channel / add funds message with the sentinel +@sentinel: given message CID arrives. +@ch: the returned channel address can safely be used against the Manager methods. + + +Perms: sign + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"f01234"` + +### PaychList +PaychList list the addresses of all channels that have been created + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### PaychNewPayment +PaychNewPayment aggregate vouchers into a new lane +@from: the payment channel sender +@to: the payment channel recipient +@vouchers: the outstanding (non-redeemed) vouchers + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + [ + { + "Amount": "0", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "MinSettle": 10101, + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] +] +``` + +Response: +```json +{ + "Channel": "f01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Vouchers": [ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] +} +``` + +### PaychSettle +PaychSettle update payment channel status to settle +After a settlement period (currently 12 hours) either party to the payment channel can call collect on chain +@pch: payment channel address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychStatus +PaychStatus get the payment channel status +@pch: payment channel address + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "ControlAddr": "f01234", + "Direction": 1 +} +``` + +### PaychVoucherAdd +PaychVoucherAdd adds a voucher for an inbound channel. +If the channel is not in the store, fetches the channel from state (and checks that +the channel To address is owned by the wallet). + + +Perms: write + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "0" +] +``` + +Response: `"0"` + +### PaychVoucherCheckSpendable +PaychVoucherCheckSpendable checks if the given voucher is currently spendable +@pch: payment channel address +@sv: voucher + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `true` + +### PaychVoucherCheckValid +PaychVoucherCheckValid checks if the given voucher is valid (is or could become spendable at some point). +If the channel is not in the store, fetches the channel from state (and checks that +the channel To address is owned by the wallet). +@pch: payment channel address +@sv: voucher + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +Response: `{}` + +### PaychVoucherCreate +PaychVoucherCreate creates a new signed voucher on the given payment channel +with the given lane and amount. The value passed in is exactly the value +that will be used to create the voucher, so if previous vouchers exist, the +actual additional value of this voucher will only be the difference between +the two. +If there are insufficient funds in the channel to create the voucher, +returns a nil voucher and the shortfall. + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "0", + 42 +] +``` + +Response: +```json +{ + "Voucher": { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Shortfall": "0" +} +``` + +### PaychVoucherList +PaychVoucherList list vouchers in payment channel +@pch: payment channel address + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +### PaychVoucherSubmit +PaychVoucherSubmit Submit voucher to chain to update payment channel state +@pch: payment channel address +@sv: voucher in payment channel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## Syncer + +### ChainSyncHandleNewTipSet + + +Perms: write + +Inputs: +```json +[ + { + "Source": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Sender": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Head": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` + +Response: `{}` + +### ChainTipSetWeight + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### Concurrent + + +Perms: read + +Inputs: `[]` + +Response: `9` + +### SetConcurrent + + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SyncState + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "ActiveSyncs": [ + { + "WorkerID": 42, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Target": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Stage": 1, + "Height": 10101, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Message": "string value" + } + ], + "VMApplied": 42 +} +``` + +### SyncSubmitBlock + + +Perms: write + +Inputs: +```json +[ + { + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] + } +] +``` + +Response: `{}` + +### SyncerTracker + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "History": [ + { + "State": 1, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Current": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Err": {}, + "Source": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Sender": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Head": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } + ], + "Buckets": [ + { + "State": 1, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Current": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Err": {}, + "Source": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Sender": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Head": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } + ] +} +``` + +## Wallet + +### HasPassword + + +Perms: admin + +Inputs: `[]` + +Response: `true` + +### LockWallet + + +Perms: admin + +Inputs: `[]` + +Response: `{}` + +### SetPassword + + +Perms: admin + +Inputs: +```json +[ + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `{}` + +### UnLockWallet + + +Perms: admin + +Inputs: +```json +[ + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `{}` + +### WalletAddresses + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### WalletBalance + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + +### WalletDefaultAddress + + +Perms: write + +Inputs: `[]` + +Response: `"f01234"` + +### WalletDelete + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### WalletExport + + +Perms: admin + +Inputs: +```json +[ + "f01234", + "string value" +] +``` + +Response: +```json +{ + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletHas + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### WalletImport + + +Perms: admin + +Inputs: +```json +[ + { + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `"f01234"` + +### WalletNewAddress + + +Perms: write + +Inputs: +```json +[ + 7 +] +``` + +Response: `"f01234"` + +### WalletSetDefault + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### WalletSign + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": "message", + "Extra": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletSignMessage + + +Perms: sign + +Inputs: +```json +[ + "f01234", + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +### WalletState + + +Perms: admin + +Inputs: `[]` + +Response: `123` + diff --git a/venus-shared/api/chain/v0/mining.go b/venus-shared/api/chain/v0/mining.go new file mode 100644 index 0000000000..7bdd8caa52 --- /dev/null +++ b/venus-shared/api/chain/v0/mining.go @@ -0,0 +1,15 @@ +package v0 + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMining interface { + MinerGetBaseInfo(ctx context.Context, maddr address.Address, round abi.ChainEpoch, tsk types.TipSetKey) (*types.MiningBaseInfo, error) //perm:read + MinerCreateBlock(ctx context.Context, bt *types.BlockTemplate) (*types.BlockMsg, error) //perm:write +} diff --git a/venus-shared/api/chain/v0/mock/mock_fullnode.go b/venus-shared/api/chain/v0/mock/mock_fullnode.go new file mode 100644 index 0000000000..6ccc45e475 --- /dev/null +++ b/venus-shared/api/chain/v0/mock/mock_fullnode.go @@ -0,0 +1,2786 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/chain/v0 (interfaces: FullNode) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + time "time" + + address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + miner "github.com/filecoin-project/go-state-types/builtin/v9/miner" + verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + crypto "github.com/filecoin-project/go-state-types/crypto" + dline "github.com/filecoin-project/go-state-types/dline" + network "github.com/filecoin-project/go-state-types/network" + miner0 "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + internal "github.com/filecoin-project/venus/venus-shared/internal" + types "github.com/filecoin-project/venus/venus-shared/types" + gomock "github.com/golang/mock/gomock" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + metrics "github.com/libp2p/go-libp2p/core/metrics" + network0 "github.com/libp2p/go-libp2p/core/network" + peer "github.com/libp2p/go-libp2p/core/peer" + protocol "github.com/libp2p/go-libp2p/core/protocol" +) + +// MockFullNode is a mock of FullNode interface. +type MockFullNode struct { + ctrl *gomock.Controller + recorder *MockFullNodeMockRecorder +} + +// MockFullNodeMockRecorder is the mock recorder for MockFullNode. +type MockFullNodeMockRecorder struct { + mock *MockFullNode +} + +// NewMockFullNode creates a new mock instance. +func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode { + mock := &MockFullNode{ctrl: ctrl} + mock.recorder = &MockFullNodeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { + return m.recorder +} + +// BeaconGetEntry mocks base method. +func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1) + ret0, _ := ret[0].(*types.BeaconEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeaconGetEntry indicates an expected call of BeaconGetEntry. +func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1) +} + +// BlockTime mocks base method. +func (m *MockFullNode) BlockTime(arg0 context.Context) time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockTime", arg0) + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// BlockTime indicates an expected call of BlockTime. +func (mr *MockFullNodeMockRecorder) BlockTime(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockTime", reflect.TypeOf((*MockFullNode)(nil).BlockTime), arg0) +} + +// ChainDeleteObj mocks base method. +func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainDeleteObj indicates an expected call of ChainDeleteObj. +func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1) +} + +// ChainExport mocks base method. +func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(<-chan []byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainExport indicates an expected call of ChainExport. +func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) +} + +// ChainGetBlock mocks base method. +func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlock indicates an expected call of ChainGetBlock. +func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1) +} + +// ChainGetBlockMessages mocks base method. +func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*types.BlockMessages, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1) + ret0, _ := ret[0].(*types.BlockMessages) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages. +func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1) +} + +// ChainGetGenesis mocks base method. +func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetGenesis", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetGenesis indicates an expected call of ChainGetGenesis. +func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0) +} + +// ChainGetMessage mocks base method. +func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*internal.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1) + ret0, _ := ret[0].(*internal.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessage indicates an expected call of ChainGetMessage. +func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) +} + +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]types.MessageCID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]types.MessageCID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + +// ChainGetParentMessages mocks base method. +func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]types.MessageCID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1) + ret0, _ := ret[0].([]types.MessageCID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentMessages indicates an expected call of ChainGetParentMessages. +func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1) +} + +// ChainGetParentReceipts mocks base method. +func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1) + ret0, _ := ret[0].([]*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts. +func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1) +} + +// ChainGetPath mocks base method. +func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*types.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetPath indicates an expected call of ChainGetPath. +func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2) +} + +// ChainGetRandomnessFromBeacon mocks base method. +func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon. +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetRandomnessFromTickets mocks base method. +func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets. +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetReceipts mocks base method. +func (m *MockFullNode) ChainGetReceipts(arg0 context.Context, arg1 cid.Cid) ([]types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetReceipts", arg0, arg1) + ret0, _ := ret[0].([]types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetReceipts indicates an expected call of ChainGetReceipts. +func (mr *MockFullNodeMockRecorder) ChainGetReceipts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetReceipts), arg0, arg1) +} + +// ChainGetTipSet mocks base method. +func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSet indicates an expected call of ChainGetTipSet. +func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1) +} + +// ChainGetTipSetByHeight mocks base method. +func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight. +func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2) +} + +// ChainHasObj mocks base method. +func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj. +func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1) +} + +// ChainHead mocks base method. +func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) +} + +// ChainList mocks base method. +func (m *MockFullNode) ChainList(arg0 context.Context, arg1 types.TipSetKey, arg2 int) ([]types.TipSetKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainList", arg0, arg1, arg2) + ret0, _ := ret[0].([]types.TipSetKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainList indicates an expected call of ChainList. +func (mr *MockFullNodeMockRecorder) ChainList(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainList", reflect.TypeOf((*MockFullNode)(nil).ChainList), arg0, arg1, arg2) +} + +// ChainNotify mocks base method. +func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*types.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainNotify", arg0) + ret0, _ := ret[0].(<-chan []*types.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainNotify indicates an expected call of ChainNotify. +func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0) +} + +// ChainPutObj mocks base method. +func (m *MockFullNode) ChainPutObj(arg0 context.Context, arg1 blocks.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainPutObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainPutObj indicates an expected call of ChainPutObj. +func (mr *MockFullNodeMockRecorder) ChainPutObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainPutObj", reflect.TypeOf((*MockFullNode)(nil).ChainPutObj), arg0, arg1) +} + +// ChainReadObj mocks base method. +func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainReadObj indicates an expected call of ChainReadObj. +func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1) +} + +// ChainSetHead mocks base method. +func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainSetHead indicates an expected call of ChainSetHead. +func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1) +} + +// ChainStatObj mocks base method. +func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (types.ObjStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2) + ret0, _ := ret[0].(types.ObjStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainStatObj indicates an expected call of ChainStatObj. +func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2) +} + +// ChainSyncHandleNewTipSet mocks base method. +func (m *MockFullNode) ChainSyncHandleNewTipSet(arg0 context.Context, arg1 *types.ChainInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainSyncHandleNewTipSet", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainSyncHandleNewTipSet indicates an expected call of ChainSyncHandleNewTipSet. +func (mr *MockFullNodeMockRecorder) ChainSyncHandleNewTipSet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSyncHandleNewTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainSyncHandleNewTipSet), arg0, arg1) +} + +// ChainTipSetWeight mocks base method. +func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainTipSetWeight indicates an expected call of ChainTipSetWeight. +func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) +} + +// Concurrent mocks base method. +func (m *MockFullNode) Concurrent(arg0 context.Context) int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Concurrent", arg0) + ret0, _ := ret[0].(int64) + return ret0 +} + +// Concurrent indicates an expected call of Concurrent. +func (mr *MockFullNodeMockRecorder) Concurrent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Concurrent", reflect.TypeOf((*MockFullNode)(nil).Concurrent), arg0) +} + +// GasBatchEstimateMessageGas mocks base method. +func (m *MockFullNode) GasBatchEstimateMessageGas(arg0 context.Context, arg1 []*types.EstimateMessage, arg2 uint64, arg3 types.TipSetKey) ([]*types.EstimateResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasBatchEstimateMessageGas", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*types.EstimateResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasBatchEstimateMessageGas indicates an expected call of GasBatchEstimateMessageGas. +func (mr *MockFullNodeMockRecorder) GasBatchEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasBatchEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasBatchEstimateMessageGas), arg0, arg1, arg2, arg3) +} + +// GasEstimateFeeCap mocks base method. +func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *internal.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap. +func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3) +} + +// GasEstimateGasLimit mocks base method. +func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *internal.Message, arg2 types.TipSetKey) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit. +func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2) +} + +// GasEstimateGasPremium mocks base method. +func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium. +func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4) +} + +// GasEstimateMessageGas mocks base method. +func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *internal.Message, arg2 *types.MessageSendSpec, arg3 types.TipSetKey) (*internal.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*internal.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas. +func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) +} + +// GetActor mocks base method. +func (m *MockFullNode) GetActor(arg0 context.Context, arg1 address.Address) (*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActor", arg0, arg1) + ret0, _ := ret[0].(*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActor indicates an expected call of GetActor. +func (mr *MockFullNodeMockRecorder) GetActor(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActor", reflect.TypeOf((*MockFullNode)(nil).GetActor), arg0, arg1) +} + +// GetEntry mocks base method. +func (m *MockFullNode) GetEntry(arg0 context.Context, arg1 abi.ChainEpoch, arg2 uint64) (*types.BeaconEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEntry", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.BeaconEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEntry indicates an expected call of GetEntry. +func (mr *MockFullNodeMockRecorder) GetEntry(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEntry", reflect.TypeOf((*MockFullNode)(nil).GetEntry), arg0, arg1, arg2) +} + +// GetFullBlock mocks base method. +func (m *MockFullNode) GetFullBlock(arg0 context.Context, arg1 cid.Cid) (*types.FullBlock, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFullBlock", arg0, arg1) + ret0, _ := ret[0].(*types.FullBlock) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFullBlock indicates an expected call of GetFullBlock. +func (mr *MockFullNodeMockRecorder) GetFullBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFullBlock", reflect.TypeOf((*MockFullNode)(nil).GetFullBlock), arg0, arg1) +} + +// GetParentStateRootActor mocks base method. +func (m *MockFullNode) GetParentStateRootActor(arg0 context.Context, arg1 *types.TipSet, arg2 address.Address) (*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetParentStateRootActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetParentStateRootActor indicates an expected call of GetParentStateRootActor. +func (mr *MockFullNodeMockRecorder) GetParentStateRootActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParentStateRootActor", reflect.TypeOf((*MockFullNode)(nil).GetParentStateRootActor), arg0, arg1, arg2) +} + +// HasPassword mocks base method. +func (m *MockFullNode) HasPassword(arg0 context.Context) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasPassword", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// HasPassword indicates an expected call of HasPassword. +func (mr *MockFullNodeMockRecorder) HasPassword(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasPassword", reflect.TypeOf((*MockFullNode)(nil).HasPassword), arg0) +} + +// ID mocks base method. +func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID", arg0) + ret0, _ := ret[0].(peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ID indicates an expected call of ID. +func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0) +} + +// ListActor mocks base method. +func (m *MockFullNode) ListActor(arg0 context.Context) (map[address.Address]*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListActor", arg0) + ret0, _ := ret[0].(map[address.Address]*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListActor indicates an expected call of ListActor. +func (mr *MockFullNodeMockRecorder) ListActor(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListActor", reflect.TypeOf((*MockFullNode)(nil).ListActor), arg0) +} + +// LockWallet mocks base method. +func (m *MockFullNode) LockWallet(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LockWallet", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// LockWallet indicates an expected call of LockWallet. +func (mr *MockFullNodeMockRecorder) LockWallet(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockWallet", reflect.TypeOf((*MockFullNode)(nil).LockWallet), arg0) +} + +// MessageWait mocks base method. +func (m *MockFullNode) MessageWait(arg0 context.Context, arg1 cid.Cid, arg2, arg3 abi.ChainEpoch) (*types.ChainMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessageWait", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.ChainMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessageWait indicates an expected call of MessageWait. +func (mr *MockFullNodeMockRecorder) MessageWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageWait", reflect.TypeOf((*MockFullNode)(nil).MessageWait), arg0, arg1, arg2, arg3) +} + +// MinerCreateBlock mocks base method. +func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *types.BlockTemplate) (*types.BlockMsg, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockMsg) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerCreateBlock indicates an expected call of MinerCreateBlock. +func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1) +} + +// MinerGetBaseInfo mocks base method. +func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*types.MiningBaseInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.MiningBaseInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo. +func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3) +} + +// MpoolBatchPush mocks base method. +func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPush indicates an expected call of MpoolBatchPush. +func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1) +} + +// MpoolBatchPushMessage mocks base method. +func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*internal.Message, arg2 *types.MessageSendSpec) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2) +} + +// MpoolBatchPushUntrusted mocks base method. +func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1) +} + +// MpoolClear mocks base method. +func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolClear indicates an expected call of MpoolClear. +func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1) +} + +// MpoolDeleteByAdress mocks base method. +func (m *MockFullNode) MpoolDeleteByAdress(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolDeleteByAdress", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolDeleteByAdress indicates an expected call of MpoolDeleteByAdress. +func (mr *MockFullNodeMockRecorder) MpoolDeleteByAdress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolDeleteByAdress", reflect.TypeOf((*MockFullNode)(nil).MpoolDeleteByAdress), arg0, arg1) +} + +// MpoolGetConfig mocks base method. +func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetConfig", arg0) + ret0, _ := ret[0].(*types.MpoolConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetConfig indicates an expected call of MpoolGetConfig. +func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0) +} + +// MpoolGetNonce mocks base method. +func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetNonce indicates an expected call of MpoolGetNonce. +func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1) +} + +// MpoolPending mocks base method. +func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPending indicates an expected call of MpoolPending. +func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1) +} + +// MpoolPublishByAddr mocks base method. +func (m *MockFullNode) MpoolPublishByAddr(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPublishByAddr", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolPublishByAddr indicates an expected call of MpoolPublishByAddr. +func (mr *MockFullNodeMockRecorder) MpoolPublishByAddr(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPublishByAddr", reflect.TypeOf((*MockFullNode)(nil).MpoolPublishByAddr), arg0, arg1) +} + +// MpoolPublishMessage mocks base method. +func (m *MockFullNode) MpoolPublishMessage(arg0 context.Context, arg1 *types.SignedMessage) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPublishMessage", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolPublishMessage indicates an expected call of MpoolPublishMessage. +func (mr *MockFullNodeMockRecorder) MpoolPublishMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPublishMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPublishMessage), arg0, arg1) +} + +// MpoolPush mocks base method. +func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPush indicates an expected call of MpoolPush. +func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1) +} + +// MpoolPushMessage mocks base method. +func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *internal.Message, arg2 *types.MessageSendSpec) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushMessage indicates an expected call of MpoolPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2) +} + +// MpoolPushUntrusted mocks base method. +func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1) +} + +// MpoolSelect mocks base method. +func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSelect indicates an expected call of MpoolSelect. +func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2) +} + +// MpoolSelects mocks base method. +func (m *MockFullNode) MpoolSelects(arg0 context.Context, arg1 types.TipSetKey, arg2 []float64) ([][]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSelects", arg0, arg1, arg2) + ret0, _ := ret[0].([][]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSelects indicates an expected call of MpoolSelects. +func (mr *MockFullNodeMockRecorder) MpoolSelects(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelects", reflect.TypeOf((*MockFullNode)(nil).MpoolSelects), arg0, arg1, arg2) +} + +// MpoolSetConfig mocks base method. +func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolSetConfig indicates an expected call of MpoolSetConfig. +func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1) +} + +// MpoolSub mocks base method. +func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan types.MpoolUpdate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSub", arg0) + ret0, _ := ret[0].(<-chan types.MpoolUpdate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSub indicates an expected call of MpoolSub. +func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0) +} + +// MsigAddApprove mocks base method. +func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddApprove indicates an expected call of MsigAddApprove. +func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigAddCancel mocks base method. +func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddCancel indicates an expected call of MsigAddCancel. +func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigAddPropose mocks base method. +func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddPropose indicates an expected call of MsigAddPropose. +func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4) +} + +// MsigApprove mocks base method. +func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApprove indicates an expected call of MsigApprove. +func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3) +} + +// MsigApproveTxnHash mocks base method. +func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash. +func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +} + +// MsigCancel mocks base method. +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancel indicates an expected call of MsigCancel. +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3) +} + +// MsigCancelTxnHash mocks base method. +func (m *MockFullNode) MsigCancelTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancelTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancelTxnHash indicates an expected call of MsigCancelTxnHash. +func (mr *MockFullNodeMockRecorder) MsigCancelTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancelTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigCancelTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +// MsigCreate mocks base method. +func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCreate indicates an expected call of MsigCreate. +func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigGetVested mocks base method. +func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVested indicates an expected call of MsigGetVested. +func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3) +} + +// MsigPropose mocks base method. +func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigPropose indicates an expected call of MsigPropose. +func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigRemoveSigner mocks base method. +func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigRemoveSigner indicates an expected call of MsigRemoveSigner. +func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4) +} + +// MsigSwapApprove mocks base method. +func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapApprove indicates an expected call of MsigSwapApprove. +func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigSwapCancel mocks base method. +func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapCancel indicates an expected call of MsigSwapCancel. +func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigSwapPropose mocks base method. +func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapPropose indicates an expected call of MsigSwapPropose. +func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4) +} + +// NetAddrsListen mocks base method. +func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAddrsListen", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAddrsListen indicates an expected call of NetAddrsListen. +func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0) +} + +// NetAgentVersion mocks base method. +func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAgentVersion indicates an expected call of NetAgentVersion. +func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1) +} + +// NetAutoNatStatus mocks base method. +func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (types.NatInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0) + ret0, _ := ret[0].(types.NatInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAutoNatStatus indicates an expected call of NetAutoNatStatus. +func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0) +} + +// NetBandwidthStats mocks base method. +func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStats", arg0) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStats indicates an expected call of NetBandwidthStats. +func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0) +} + +// NetBandwidthStatsByPeer mocks base method. +func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0) + ret0, _ := ret[0].(map[string]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0) +} + +// NetBandwidthStatsByProtocol mocks base method. +func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0) + ret0, _ := ret[0].(map[protocol.ID]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0) +} + +// NetConnect mocks base method. +func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetConnect indicates an expected call of NetConnect. +func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1) +} + +// NetConnectedness mocks base method. +func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1) + ret0, _ := ret[0].(network0.Connectedness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetConnectedness indicates an expected call of NetConnectedness. +func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1) +} + +// NetDisconnect mocks base method. +func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetDisconnect indicates an expected call of NetDisconnect. +func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1) +} + +// NetFindPeer mocks base method. +func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetFindPeer indicates an expected call of NetFindPeer. +func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) +} + +// NetFindProvidersAsync mocks base method. +func (m *MockFullNode) NetFindProvidersAsync(arg0 context.Context, arg1 cid.Cid, arg2 int) <-chan peer.AddrInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindProvidersAsync", arg0, arg1, arg2) + ret0, _ := ret[0].(<-chan peer.AddrInfo) + return ret0 +} + +// NetFindProvidersAsync indicates an expected call of NetFindProvidersAsync. +func (mr *MockFullNodeMockRecorder) NetFindProvidersAsync(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindProvidersAsync", reflect.TypeOf((*MockFullNode)(nil).NetFindProvidersAsync), arg0, arg1, arg2) +} + +// NetGetClosestPeers mocks base method. +func (m *MockFullNode) NetGetClosestPeers(arg0 context.Context, arg1 string) ([]peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetGetClosestPeers", arg0, arg1) + ret0, _ := ret[0].([]peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetGetClosestPeers indicates an expected call of NetGetClosestPeers. +func (mr *MockFullNodeMockRecorder) NetGetClosestPeers(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetGetClosestPeers", reflect.TypeOf((*MockFullNode)(nil).NetGetClosestPeers), arg0, arg1) +} + +// NetPeerInfo mocks base method. +func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*types.ExtendedPeerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1) + ret0, _ := ret[0].(*types.ExtendedPeerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeerInfo indicates an expected call of NetPeerInfo. +func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1) +} + +// NetPeers mocks base method. +func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeers", arg0) + ret0, _ := ret[0].([]peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeers indicates an expected call of NetPeers. +func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) +} + +// NetPing mocks base method. +func (m *MockFullNode) NetPing(arg0 context.Context, arg1 peer.ID) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPing", arg0, arg1) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPing indicates an expected call of NetPing. +func (mr *MockFullNodeMockRecorder) NetPing(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPing", reflect.TypeOf((*MockFullNode)(nil).NetPing), arg0, arg1) +} + +// NetProtectAdd mocks base method. +func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetProtectAdd", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetProtectAdd indicates an expected call of NetProtectAdd. +func (mr *MockFullNodeMockRecorder) NetProtectAdd(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectAdd", reflect.TypeOf((*MockFullNode)(nil).NetProtectAdd), arg0, arg1) +} + +// NetProtectList mocks base method. +func (m *MockFullNode) NetProtectList(arg0 context.Context) ([]peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetProtectList", arg0) + ret0, _ := ret[0].([]peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetProtectList indicates an expected call of NetProtectList. +func (mr *MockFullNodeMockRecorder) NetProtectList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectList", reflect.TypeOf((*MockFullNode)(nil).NetProtectList), arg0) +} + +// NetProtectRemove mocks base method. +func (m *MockFullNode) NetProtectRemove(arg0 context.Context, arg1 []peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetProtectRemove", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetProtectRemove indicates an expected call of NetProtectRemove. +func (mr *MockFullNodeMockRecorder) NetProtectRemove(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectRemove", reflect.TypeOf((*MockFullNode)(nil).NetProtectRemove), arg0, arg1) +} + +// NetPubsubScores mocks base method. +func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]types.PubsubScore, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPubsubScores", arg0) + ret0, _ := ret[0].([]types.PubsubScore) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPubsubScores indicates an expected call of NetPubsubScores. +func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) +} + +// PaychAllocateLane mocks base method. +func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAllocateLane indicates an expected call of PaychAllocateLane. +func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1) +} + +// PaychAvailableFunds mocks base method. +func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*types.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1) + ret0, _ := ret[0].(*types.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFunds indicates an expected call of PaychAvailableFunds. +func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1) +} + +// PaychAvailableFundsByFromTo mocks base method. +func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*types.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo. +func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2) +} + +// PaychCollect mocks base method. +func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychCollect indicates an expected call of PaychCollect. +func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) +} + +// PaychGet mocks base method. +func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*types.ChannelInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.ChannelInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGet indicates an expected call of PaychGet. +func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3) +} + +// PaychGetWaitReady mocks base method. +func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGetWaitReady indicates an expected call of PaychGetWaitReady. +func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1) +} + +// PaychList mocks base method. +func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychList indicates an expected call of PaychList. +func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0) +} + +// PaychNewPayment mocks base method. +func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []types.VoucherSpec) (*types.PaymentInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.PaymentInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychNewPayment indicates an expected call of PaychNewPayment. +func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3) +} + +// PaychSettle mocks base method. +func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychSettle indicates an expected call of PaychSettle. +func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1) +} + +// PaychStatus mocks base method. +func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*types.Status, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1) + ret0, _ := ret[0].(*types.Status) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychStatus indicates an expected call of PaychStatus. +func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1) +} + +// PaychVoucherAdd mocks base method. +func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherAdd indicates an expected call of PaychVoucherAdd. +func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckSpendable mocks base method. +func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckValid mocks base method. +func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2) +} + +// PaychVoucherCreate mocks base method. +func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*types.VoucherCreateResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.VoucherCreateResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCreate indicates an expected call of PaychVoucherCreate. +func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3) +} + +// PaychVoucherList mocks base method. +func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) + ret0, _ := ret[0].([]*paych.SignedVoucher) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherList indicates an expected call of PaychVoucherList. +func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1) +} + +// PaychVoucherSubmit mocks base method. +func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit. +func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) +} + +// ProtocolParameters mocks base method. +func (m *MockFullNode) ProtocolParameters(arg0 context.Context) (*types.ProtocolParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProtocolParameters", arg0) + ret0, _ := ret[0].(*types.ProtocolParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProtocolParameters indicates an expected call of ProtocolParameters. +func (mr *MockFullNodeMockRecorder) ProtocolParameters(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProtocolParameters", reflect.TypeOf((*MockFullNode)(nil).ProtocolParameters), arg0) +} + +// ResolveToKeyAddr mocks base method. +func (m *MockFullNode) ResolveToKeyAddr(arg0 context.Context, arg1 address.Address, arg2 *types.TipSet) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResolveToKeyAddr", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResolveToKeyAddr indicates an expected call of ResolveToKeyAddr. +func (mr *MockFullNodeMockRecorder) ResolveToKeyAddr(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveToKeyAddr", reflect.TypeOf((*MockFullNode)(nil).ResolveToKeyAddr), arg0, arg1, arg2) +} + +// SetConcurrent mocks base method. +func (m *MockFullNode) SetConcurrent(arg0 context.Context, arg1 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetConcurrent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetConcurrent indicates an expected call of SetConcurrent. +func (mr *MockFullNodeMockRecorder) SetConcurrent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetConcurrent", reflect.TypeOf((*MockFullNode)(nil).SetConcurrent), arg0, arg1) +} + +// SetPassword mocks base method. +func (m *MockFullNode) SetPassword(arg0 context.Context, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPassword", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetPassword indicates an expected call of SetPassword. +func (mr *MockFullNodeMockRecorder) SetPassword(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPassword", reflect.TypeOf((*MockFullNode)(nil).SetPassword), arg0, arg1) +} + +// StartTime mocks base method. +func (m *MockFullNode) StartTime(arg0 context.Context) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartTime", arg0) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartTime indicates an expected call of StartTime. +func (mr *MockFullNodeMockRecorder) StartTime(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockFullNode)(nil).StartTime), arg0) +} + +// StateAccountKey mocks base method. +func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAccountKey indicates an expected call of StateAccountKey. +func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2) +} + +// StateActorCodeCIDs mocks base method. +func (m *MockFullNode) StateActorCodeCIDs(arg0 context.Context, arg1 network.Version) (map[string]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateActorCodeCIDs", arg0, arg1) + ret0, _ := ret[0].(map[string]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateActorCodeCIDs indicates an expected call of StateActorCodeCIDs. +func (mr *MockFullNodeMockRecorder) StateActorCodeCIDs(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateActorCodeCIDs", reflect.TypeOf((*MockFullNode)(nil).StateActorCodeCIDs), arg0, arg1) +} + +// StateActorManifestCID mocks base method. +func (m *MockFullNode) StateActorManifestCID(arg0 context.Context, arg1 network.Version) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateActorManifestCID", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateActorManifestCID indicates an expected call of StateActorManifestCID. +func (mr *MockFullNodeMockRecorder) StateActorManifestCID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateActorManifestCID", reflect.TypeOf((*MockFullNode)(nil).StateActorManifestCID), arg0, arg1) +} + +// StateCall mocks base method. +func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *internal.Message, arg2 types.TipSetKey) (*types.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCall indicates an expected call of StateCall. +func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2) +} + +// StateCirculatingSupply mocks base method. +func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCirculatingSupply indicates an expected call of StateCirculatingSupply. +func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1) +} + +// StateDealProviderCollateralBounds mocks base method. +func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (types.DealCollateralBounds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(types.DealCollateralBounds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds. +func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3) +} + +// StateGetActor mocks base method. +func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetActor indicates an expected call of StateGetActor. +func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateGetAllocation mocks base method. +func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockFullNodeMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) +} + +// StateGetAllocationForPendingDeal mocks base method. +func (m *MockFullNode) StateGetAllocationForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationForPendingDeal indicates an expected call of StateGetAllocationForPendingDeal. +func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) +} + +// StateGetAllocations mocks base method. +func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocations", arg0, arg1, arg2) + ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocations indicates an expected call of StateGetAllocations. +func (mr *MockFullNodeMockRecorder) StateGetAllocations(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocations), arg0, arg1, arg2) +} + +// StateGetClaim mocks base method. +func (m *MockFullNode) StateGetClaim(arg0 context.Context, arg1 address.Address, arg2 verifreg.ClaimId, arg3 types.TipSetKey) (*verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetClaim", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetClaim indicates an expected call of StateGetClaim. +func (mr *MockFullNodeMockRecorder) StateGetClaim(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaim", reflect.TypeOf((*MockFullNode)(nil).StateGetClaim), arg0, arg1, arg2, arg3) +} + +// StateGetClaims mocks base method. +func (m *MockFullNode) StateGetClaims(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetClaims", arg0, arg1, arg2) + ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetClaims indicates an expected call of StateGetClaims. +func (mr *MockFullNodeMockRecorder) StateGetClaims(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetClaims), arg0, arg1, arg2) +} + +// StateGetNetworkParams mocks base method. +func (m *MockFullNode) StateGetNetworkParams(arg0 context.Context) (*types.NetworkParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetNetworkParams", arg0) + ret0, _ := ret[0].(*types.NetworkParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetNetworkParams indicates an expected call of StateGetNetworkParams. +func (mr *MockFullNodeMockRecorder) StateGetNetworkParams(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetNetworkParams", reflect.TypeOf((*MockFullNode)(nil).StateGetNetworkParams), arg0) +} + +// StateGetReceipt mocks base method. +func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 types.TipSetKey) (*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetReceipt", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetReceipt indicates an expected call of StateGetReceipt. +func (mr *MockFullNodeMockRecorder) StateGetReceipt(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetReceipt", reflect.TypeOf((*MockFullNode)(nil).StateGetReceipt), arg0, arg1, arg2) +} + +// StateListActors mocks base method. +func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListActors", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListActors indicates an expected call of StateListActors. +func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1) +} + +// StateListMiners mocks base method. +func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMiners indicates an expected call of StateListMiners. +func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1) +} + +// StateLookupID mocks base method. +func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateLookupID indicates an expected call of StateLookupID. +func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2) +} + +// StateMarketBalance mocks base method. +func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (types.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(types.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketBalance indicates an expected call of StateMarketBalance. +func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2) +} + +// StateMarketDeals mocks base method. +func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]*types.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1) + ret0, _ := ret[0].(map[string]*types.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketDeals indicates an expected call of StateMarketDeals. +func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1) +} + +// StateMarketParticipants mocks base method. +func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]types.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1) + ret0, _ := ret[0].(map[string]types.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketParticipants indicates an expected call of StateMarketParticipants. +func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1) +} + +// StateMarketStorageDeal mocks base method. +func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*types.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal. +func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2) +} + +// StateMinerActiveSectors mocks base method. +func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. +func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerDeadlines mocks base method. +func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]types.Deadline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2) + ret0, _ := ret[0].([]types.Deadline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerDeadlines indicates an expected call of StateMinerDeadlines. +func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2) +} + +// StateMinerFaults mocks base method. +func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerFaults indicates an expected call of StateMinerFaults. +func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (types.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(types.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method. +func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. +func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateMinerPartitions mocks base method. +func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]types.Partition, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]types.Partition) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPartitions indicates an expected call of StateMinerPartitions. +func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3) +} + +// StateMinerPower mocks base method. +func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.MinerPower, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MinerPower) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPower indicates an expected call of StateMinerPower. +func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2) +} + +// StateMinerPreCommitDepositForPower mocks base method. +func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower. +func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3) +} + +// StateMinerProvingDeadline mocks base method. +func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2) + ret0, _ := ret[0].(*dline.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline. +func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2) +} + +// StateMinerRecoveries mocks base method. +func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerRecoveries indicates an expected call of StateMinerRecoveries. +func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2) +} + +// StateMinerSectorAllocated mocks base method. +func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated. +func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3) +} + +// StateMinerSectorCount mocks base method. +func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (types.MinerSectors, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2) + ret0, _ := ret[0].(types.MinerSectors) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorCount indicates an expected call of StateMinerSectorCount. +func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2) +} + +// StateMinerSectorSize mocks base method. +func (m *MockFullNode) StateMinerSectorSize(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (abi.SectorSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorSize", arg0, arg1, arg2) + ret0, _ := ret[0].(abi.SectorSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorSize indicates an expected call of StateMinerSectorSize. +func (mr *MockFullNodeMockRecorder) StateMinerSectorSize(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorSize", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorSize), arg0, arg1, arg2) +} + +// StateMinerSectors mocks base method. +func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectors indicates an expected call of StateMinerSectors. +func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3) +} + +// StateMinerWorkerAddress mocks base method. +func (m *MockFullNode) StateMinerWorkerAddress(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerWorkerAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerWorkerAddress indicates an expected call of StateMinerWorkerAddress. +func (mr *MockFullNodeMockRecorder) StateMinerWorkerAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerWorkerAddress", reflect.TypeOf((*MockFullNode)(nil).StateMinerWorkerAddress), arg0, arg1, arg2) +} + +// StateNetworkName mocks base method. +func (m *MockFullNode) StateNetworkName(arg0 context.Context) (types.NetworkName, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkName", arg0) + ret0, _ := ret[0].(types.NetworkName) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkName indicates an expected call of StateNetworkName. +func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0) +} + +// StateNetworkVersion mocks base method. +func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion. +func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateSearchMsg mocks base method. +func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 cid.Cid) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsg indicates an expected call of StateSearchMsg. +func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1) +} + +// StateSearchMsgLimited mocks base method. +func (m *MockFullNode) StateSearchMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 abi.ChainEpoch) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsgLimited", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsgLimited indicates an expected call of StateSearchMsgLimited. +func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsgLimited), arg0, arg1, arg2) +} + +// StateSectorExpiration mocks base method. +func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner0.SectorExpiration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorExpiration indicates an expected call of StateSectorExpiration. +func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3) +} + +// StateSectorGetInfo mocks base method. +func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorGetInfo indicates an expected call of StateSectorGetInfo. +func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3) +} + +// StateSectorPartition mocks base method. +func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner0.SectorLocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPartition indicates an expected call of StateSectorPartition. +func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3) +} + +// StateSectorPreCommitInfo mocks base method. +func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. +func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} + +// StateVMCirculatingSupplyInternal mocks base method. +func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (types.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(types.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. +func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + +// StateVerifiedClientStatus mocks base method. +func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus. +func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2) +} + +// StateVerifiedRegistryRootKey mocks base method. +func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey. +func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1) +} + +// StateVerifierStatus mocks base method. +func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifierStatus indicates an expected call of StateVerifierStatus. +func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2) +} + +// StateWaitMsg mocks base method. +func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsg indicates an expected call of StateWaitMsg. +func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2) +} + +// StateWaitMsgLimited mocks base method. +func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsgLimited", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsgLimited indicates an expected call of StateWaitMsgLimited. +func (mr *MockFullNodeMockRecorder) StateWaitMsgLimited(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsgLimited), arg0, arg1, arg2, arg3) +} + +// SyncState mocks base method. +func (m *MockFullNode) SyncState(arg0 context.Context) (*types.SyncState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncState", arg0) + ret0, _ := ret[0].(*types.SyncState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncState indicates an expected call of SyncState. +func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0) +} + +// SyncSubmitBlock mocks base method. +func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncSubmitBlock indicates an expected call of SyncSubmitBlock. +func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) +} + +// SyncerTracker mocks base method. +func (m *MockFullNode) SyncerTracker(arg0 context.Context) *types.TargetTracker { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncerTracker", arg0) + ret0, _ := ret[0].(*types.TargetTracker) + return ret0 +} + +// SyncerTracker indicates an expected call of SyncerTracker. +func (mr *MockFullNodeMockRecorder) SyncerTracker(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncerTracker", reflect.TypeOf((*MockFullNode)(nil).SyncerTracker), arg0) +} + +// UnLockWallet mocks base method. +func (m *MockFullNode) UnLockWallet(arg0 context.Context, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnLockWallet", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnLockWallet indicates an expected call of UnLockWallet. +func (mr *MockFullNodeMockRecorder) UnLockWallet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnLockWallet", reflect.TypeOf((*MockFullNode)(nil).UnLockWallet), arg0, arg1) +} + +// VerifyEntry mocks base method. +func (m *MockFullNode) VerifyEntry(arg0, arg1 *types.BeaconEntry, arg2 abi.ChainEpoch) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyEntry", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + return ret0 +} + +// VerifyEntry indicates an expected call of VerifyEntry. +func (mr *MockFullNodeMockRecorder) VerifyEntry(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyEntry", reflect.TypeOf((*MockFullNode)(nil).VerifyEntry), arg0, arg1, arg2) +} + +// Version mocks base method. +func (m *MockFullNode) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0) +} + +// WalletAddresses mocks base method. +func (m *MockFullNode) WalletAddresses(arg0 context.Context) []address.Address { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletAddresses", arg0) + ret0, _ := ret[0].([]address.Address) + return ret0 +} + +// WalletAddresses indicates an expected call of WalletAddresses. +func (mr *MockFullNodeMockRecorder) WalletAddresses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletAddresses", reflect.TypeOf((*MockFullNode)(nil).WalletAddresses), arg0) +} + +// WalletBalance mocks base method. +func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletBalance indicates an expected call of WalletBalance. +func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1) +} + +// WalletDefaultAddress mocks base method. +func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletDefaultAddress indicates an expected call of WalletDefaultAddress. +func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0) +} + +// WalletDelete mocks base method. +func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletDelete indicates an expected call of WalletDelete. +func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1) +} + +// WalletExport mocks base method. +func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address, arg2 string) (*types.KeyInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletExport", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.KeyInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletExport indicates an expected call of WalletExport. +func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1, arg2) +} + +// WalletHas mocks base method. +func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1) +} + +// WalletImport mocks base method. +func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletImport indicates an expected call of WalletImport. +func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1) +} + +// WalletNewAddress mocks base method. +func (m *MockFullNode) WalletNewAddress(arg0 context.Context, arg1 byte) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletNewAddress", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletNewAddress indicates an expected call of WalletNewAddress. +func (mr *MockFullNodeMockRecorder) WalletNewAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNewAddress", reflect.TypeOf((*MockFullNode)(nil).WalletNewAddress), arg0, arg1) +} + +// WalletSetDefault mocks base method. +func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletSetDefault indicates an expected call of WalletSetDefault. +func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1) +} + +// WalletSign mocks base method. +func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 types.MsgMeta) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2, arg3) +} + +// WalletSignMessage mocks base method. +func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *internal.Message) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSignMessage indicates an expected call of WalletSignMessage. +func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2) +} + +// WalletState mocks base method. +func (m *MockFullNode) WalletState(arg0 context.Context) int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletState", arg0) + ret0, _ := ret[0].(int) + return ret0 +} + +// WalletState indicates an expected call of WalletState. +func (mr *MockFullNodeMockRecorder) WalletState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletState", reflect.TypeOf((*MockFullNode)(nil).WalletState), arg0) +} diff --git a/venus-shared/api/chain/v0/mpool.go b/venus-shared/api/chain/v0/mpool.go new file mode 100644 index 0000000000..911e72a9f4 --- /dev/null +++ b/venus-shared/api/chain/v0/mpool.go @@ -0,0 +1,36 @@ +package v0 + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMessagePool interface { + MpoolDeleteByAdress(ctx context.Context, addr address.Address) error //perm:admin + MpoolPublishByAddr(context.Context, address.Address) error //perm:admin + MpoolPublishMessage(ctx context.Context, smsg *types.SignedMessage) error //perm:admin + MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) //perm:write + MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read + MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error //perm:admin + MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read + MpoolSelects(context.Context, types.TipSetKey, []float64) ([][]*types.SignedMessage, error) //perm:read + MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) //perm:read + MpoolClear(ctx context.Context, local bool) error //perm:write + MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) //perm:write + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) //perm:sign + MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) //perm:write + MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) //perm:write + MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *types.MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign + MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) //perm:read + MpoolSub(ctx context.Context) (<-chan types.MpoolUpdate, error) //perm:read + GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) //perm:read + GasBatchEstimateMessageGas(ctx context.Context, estimateMessages []*types.EstimateMessage, fromNonce uint64, tsk types.TipSetKey) ([]*types.EstimateResult, error) //perm:read + GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (big.Int, error) //perm:read + GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (big.Int, error) //perm:read + GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) //perm:read +} diff --git a/venus-shared/api/chain/v0/multisig.go b/venus-shared/api/chain/v0/multisig.go new file mode 100644 index 0000000000..324cd3846c --- /dev/null +++ b/venus-shared/api/chain/v0/multisig.go @@ -0,0 +1,35 @@ +package v0 + +import ( + "context" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMultiSig interface { + // MsigCreate creates a multisig wallet + // It takes the following params: , , + //, , + MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign + MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) //perm:sign + MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) //perm:sign + MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) //perm:sign + MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message + // It takes the following params: , , , , + // , , + MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign + MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) //perm:sign + MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) //perm:sign + MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) //perm:sign + MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) //perm:sign + MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) //perm:sign + MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) //perm:sign + MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign + MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) //perm:read +} diff --git a/venus-shared/api/chain/v0/network.go b/venus-shared/api/chain/v0/network.go new file mode 100644 index 0000000000..dc187c692f --- /dev/null +++ b/venus-shared/api/chain/v0/network.go @@ -0,0 +1,47 @@ +package v0 + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/metrics" + network2 "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type INetwork interface { + NetFindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo //perm:read + NetGetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) //perm:read + NetConnectedness(context.Context, peer.ID) (network2.Connectedness, error) //perm:read + NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) //perm:read + NetConnect(ctx context.Context, pi peer.AddrInfo) error //perm:admin + NetPeers(ctx context.Context) ([]peer.AddrInfo, error) //perm:read + NetPeerInfo(ctx context.Context, p peer.ID) (*types.ExtendedPeerInfo, error) //perm:read + NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read + NetPing(ctx context.Context, p peer.ID) (time.Duration, error) //perm:read + NetAddrsListen(ctx context.Context) (peer.AddrInfo, error) //perm:read + NetDisconnect(ctx context.Context, p peer.ID) error //perm:admin + NetAutoNatStatus(context.Context) (types.NatInfo, error) //perm:read + NetPubsubScores(context.Context) ([]types.PubsubScore, error) //perm:read + ID(ctx context.Context) (peer.ID, error) //perm:read + + // NetBandwidthStats returns statistics about the nodes total bandwidth + // usage and current rate across all peers and protocols. + NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read + + // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth + // usage and current rate per peer + NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read + + // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth + // usage and current rate per protocol + NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read + + NetProtectAdd(ctx context.Context, acl []peer.ID) error //perm:admin + NetProtectRemove(ctx context.Context, acl []peer.ID) error //perm:admin + NetProtectList(ctx context.Context) ([]peer.ID, error) //perm:read +} diff --git a/venus-shared/api/chain/v0/paych.go b/venus-shared/api/chain/v0/paych.go new file mode 100644 index 0000000000..0ad9d4d127 --- /dev/null +++ b/venus-shared/api/chain/v0/paych.go @@ -0,0 +1,82 @@ +package v0 + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" +) + +type IPaychan interface { + // PaychGet creates a payment channel to a provider with a amount of FIL + // @from: the payment channel sender + // @to: the payment channel recipient + // @amt: the deposits funds in the payment channel + PaychGet(ctx context.Context, from, to address.Address, amt big.Int) (*types.ChannelInfo, error) //perm:sign + // PaychAvailableFunds get the status of an outbound payment channel + // @pch: payment channel address + PaychAvailableFunds(ctx context.Context, ch address.Address) (*types.ChannelAvailableFunds, error) //perm:sign + // PaychAvailableFundsByFromTo get the status of an outbound payment channel + // @from: the payment channel sender + // @to: he payment channel recipient + PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*types.ChannelAvailableFunds, error) //perm:sign + // PaychGetWaitReady waits until the create channel / add funds message with the sentinel + // @sentinel: given message CID arrives. + // @ch: the returned channel address can safely be used against the Manager methods. + PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) //perm:sign + // PaychAllocateLane Allocate late creates a lane within a payment channel so that calls to + // CreatePaymentVoucher will automatically make vouchers only for the difference in total + PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign + // PaychNewPayment aggregate vouchers into a new lane + // @from: the payment channel sender + // @to: the payment channel recipient + // @vouchers: the outstanding (non-redeemed) vouchers + PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []types.VoucherSpec) (*types.PaymentInfo, error) //perm:sign + // PaychList list the addresses of all channels that have been created + PaychList(ctx context.Context) ([]address.Address, error) //perm:read + // PaychStatus get the payment channel status + // @pch: payment channel address + PaychStatus(ctx context.Context, pch address.Address) (*types.Status, error) //perm:read + // PaychSettle update payment channel status to settle + // After a settlement period (currently 12 hours) either party to the payment channel can call collect on chain + // @pch: payment channel address + PaychSettle(ctx context.Context, addr address.Address) (cid.Cid, error) //perm:sign + // PaychCollect update payment channel status to collect + // Collect sends the value of submitted vouchers to the channel recipient (the provider), + // and refunds the remaining channel balance to the channel creator (the client). + // @pch: payment channel address + PaychCollect(ctx context.Context, addr address.Address) (cid.Cid, error) //perm:sign + + // PaychVoucherCheckValid checks if the given voucher is valid (is or could become spendable at some point). + // If the channel is not in the store, fetches the channel from state (and checks that + // the channel To address is owned by the wallet). + // @pch: payment channel address + // @sv: voucher + PaychVoucherCheckValid(ctx context.Context, ch address.Address, sv *types.SignedVoucher) error //perm:read + // PaychVoucherCheckSpendable checks if the given voucher is currently spendable + // @pch: payment channel address + // @sv: voucher + PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (bool, error) //perm:read + // PaychVoucherAdd adds a voucher for an inbound channel. + // If the channel is not in the store, fetches the channel from state (and checks that + // the channel To address is owned by the wallet). + PaychVoucherAdd(ctx context.Context, ch address.Address, sv *types.SignedVoucher, proof []byte, minDelta big.Int) (big.Int, error) //perm:write + // PaychVoucherCreate creates a new signed voucher on the given payment channel + // with the given lane and amount. The value passed in is exactly the value + // that will be used to create the voucher, so if previous vouchers exist, the + // actual additional value of this voucher will only be the difference between + // the two. + // If there are insufficient funds in the channel to create the voucher, + // returns a nil voucher and the shortfall. + PaychVoucherCreate(ctx context.Context, pch address.Address, amt big.Int, lane uint64) (*types.VoucherCreateResult, error) //perm:sign + // PaychVoucherList list vouchers in payment channel + // @pch: payment channel address + PaychVoucherList(ctx context.Context, pch address.Address) ([]*types.SignedVoucher, error) //perm:write + // PaychVoucherSubmit Submit voucher to chain to update payment channel state + // @pch: payment channel address + // @sv: voucher in payment channel + PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) //perm:sign +} diff --git a/venus-shared/api/chain/v0/proxy_gen.go b/venus-shared/api/chain/v0/proxy_gen.go new file mode 100644 index 0000000000..c63004c58b --- /dev/null +++ b/venus-shared/api/chain/v0/proxy_gen.go @@ -0,0 +1,862 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package v0 + +import ( + "context" + "time" + + address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/metrics" + network2 "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + lminer "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IBlockStoreStruct struct { + Internal struct { + ChainDeleteObj func(ctx context.Context, obj cid.Cid) error `perm:"admin"` + ChainHasObj func(ctx context.Context, obj cid.Cid) (bool, error) `perm:"read"` + ChainPutObj func(context.Context, blocks.Block) error `perm:"admin"` + ChainReadObj func(ctx context.Context, cid cid.Cid) ([]byte, error) `perm:"read"` + ChainStatObj func(ctx context.Context, obj cid.Cid, base cid.Cid) (types.ObjStat, error) `perm:"read"` + } +} + +func (s *IBlockStoreStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + return s.Internal.ChainDeleteObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ChainHasObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainPutObj(p0 context.Context, p1 blocks.Block) error { + return s.Internal.ChainPutObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return s.Internal.ChainReadObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (types.ObjStat, error) { + return s.Internal.ChainStatObj(p0, p1, p2) +} + +type IAccountStruct struct { + Internal struct { + StateAccountKey func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + } +} + +func (s *IAccountStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateAccountKey(p0, p1, p2) +} + +type IActorStruct struct { + Internal struct { + ListActor func(ctx context.Context) (map[address.Address]*types.Actor, error) `perm:"read"` + StateGetActor func(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) `perm:"read"` + } +} + +func (s *IActorStruct) ListActor(p0 context.Context) (map[address.Address]*types.Actor, error) { + return s.Internal.ListActor(p0) +} +func (s *IActorStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return s.Internal.StateGetActor(p0, p1, p2) +} + +type IBeaconStruct struct { + Internal struct { + BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` + } +} + +func (s *IBeaconStruct) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + return s.Internal.BeaconGetEntry(p0, p1) +} + +type IMinerStateStruct struct { + Internal struct { + StateCirculatingSupply func(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) `perm:"read"` + StateDealProviderCollateralBounds func(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (types.DealCollateralBounds, error) `perm:"read"` + StateGetAllocation func(ctx context.Context, clientAddr address.Address, allocationID types.AllocationId, tsk types.TipSetKey) (*types.Allocation, error) `perm:"read"` + StateGetAllocationForPendingDeal func(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.Allocation, error) `perm:"read"` + StateGetAllocations func(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[types.AllocationId]types.Allocation, error) `perm:"read"` + StateGetClaim func(ctx context.Context, providerAddr address.Address, claimID types.ClaimId, tsk types.TipSetKey) (*types.Claim, error) `perm:"read"` + StateGetClaims func(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[types.ClaimId]types.Claim, error) `perm:"read"` + StateListActors func(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) `perm:"read"` + StateListMiners func(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) `perm:"read"` + StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MarketBalance, error) `perm:"read"` + StateMarketDeals func(ctx context.Context, tsk types.TipSetKey) (map[string]*types.MarketDeal, error) `perm:"read"` + StateMarketStorageDeal func(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.MarketDeal, error) `perm:"read"` + StateMinerActiveSectors func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) `perm:"read"` + StateMinerAvailableBalance func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + StateMinerDeadlines func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]types.Deadline, error) `perm:"read"` + StateMinerFaults func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + StateMinerInfo func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.MinerInfo, error) `perm:"read"` + StateMinerInitialPledgeCollateral func(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + StateMinerPartitions func(ctx context.Context, maddr address.Address, dlIdx uint64, tsk types.TipSetKey) ([]types.Partition, error) `perm:"read"` + StateMinerPower func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.MinerPower, error) `perm:"read"` + StateMinerPreCommitDepositForPower func(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + StateMinerProvingDeadline func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (*dline.Info, error) `perm:"read"` + StateMinerRecoveries func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + StateMinerSectorAllocated func(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) `perm:"read"` + StateMinerSectorCount func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MinerSectors, error) `perm:"read"` + StateMinerSectorSize func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (abi.SectorSize, error) `perm:"read"` + StateMinerSectors func(ctx context.Context, maddr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) `perm:"read"` + StateMinerWorkerAddress func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + StateSectorExpiration func(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"` + StateSectorGetInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorOnChainInfo, error) `perm:"read"` + StateSectorPartition func(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"` + StateSectorPreCommitInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (types.SectorPreCommitOnChainInfo, error) `perm:"read"` + StateVMCirculatingSupplyInternal func(ctx context.Context, tsk types.TipSetKey) (types.CirculatingSupply, error) `perm:"read"` + StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + } +} + +func (s *IMinerStateStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + return s.Internal.StateCirculatingSupply(p0, p1) +} +func (s *IMinerStateStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (types.DealCollateralBounds, error) { + return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 types.AllocationId, p3 types.TipSetKey) (*types.Allocation, error) { + return s.Internal.StateGetAllocation(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*types.Allocation, error) { + return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2) +} +func (s *IMinerStateStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[types.AllocationId]types.Allocation, error) { + return s.Internal.StateGetAllocations(p0, p1, p2) +} +func (s *IMinerStateStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 types.ClaimId, p3 types.TipSetKey) (*types.Claim, error) { + return s.Internal.StateGetClaim(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[types.ClaimId]types.Claim, error) { + return s.Internal.StateGetClaims(p0, p1, p2) +} +func (s *IMinerStateStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListActors(p0, p1) +} +func (s *IMinerStateStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListMiners(p0, p1) +} +func (s *IMinerStateStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateLookupID(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.MarketBalance, error) { + return s.Internal.StateMarketBalance(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]*types.MarketDeal, error) { + return s.Internal.StateMarketDeals(p0, p1) +} +func (s *IMinerStateStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*types.MarketDeal, error) { + return s.Internal.StateMarketStorageDeal(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*types.SectorOnChainInfo, error) { + return s.Internal.StateMinerActiveSectors(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (big.Int, error) { + return s.Internal.StateMinerAvailableBalance(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]types.Deadline, error) { + return s.Internal.StateMinerDeadlines(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerFaults(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.MinerInfo, error) { + return s.Internal.StateMinerInfo(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 types.SectorPreCommitInfo, p3 types.TipSetKey) (big.Int, error) { + return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]types.Partition, error) { + return s.Internal.StateMinerPartitions(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.MinerPower, error) { + return s.Internal.StateMinerPower(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 types.SectorPreCommitInfo, p3 types.TipSetKey) (big.Int, error) { + return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return s.Internal.StateMinerProvingDeadline(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerRecoveries(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.MinerSectors, error) { + return s.Internal.StateMinerSectorCount(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerSectorSize(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (abi.SectorSize, error) { + return s.Internal.StateMinerSectorSize(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*types.SectorOnChainInfo, error) { + return s.Internal.StateMinerSectors(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerWorkerAddress(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateMinerWorkerAddress(p0, p1, p2) +} +func (s *IMinerStateStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { + return s.Internal.StateSectorExpiration(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*types.SectorOnChainInfo, error) { + return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { + return s.Internal.StateSectorPartition(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (types.SectorPreCommitOnChainInfo, error) { + return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (types.CirculatingSupply, error) { + return s.Internal.StateVMCirculatingSupplyInternal(p0, p1) +} +func (s *IMinerStateStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifiedClientStatus(p0, p1, p2) +} + +type IChainInfoStruct struct { + Internal struct { + BlockTime func(ctx context.Context) time.Duration `perm:"read"` + ChainExport func(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) `perm:"read"` + ChainGetBlock func(ctx context.Context, id cid.Cid) (*types.BlockHeader, error) `perm:"read"` + ChainGetBlockMessages func(ctx context.Context, bid cid.Cid) (*types.BlockMessages, error) `perm:"read"` + ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"` + ChainGetMessage func(ctx context.Context, msgID cid.Cid) (*types.Message, error) `perm:"read"` + ChainGetMessagesInTipset func(ctx context.Context, key types.TipSetKey) ([]types.MessageCID, error) `perm:"read"` + ChainGetParentMessages func(ctx context.Context, bcid cid.Cid) ([]types.MessageCID, error) `perm:"read"` + ChainGetParentReceipts func(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` + ChainGetPath func(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*types.HeadChange, error) `perm:"read"` + ChainGetRandomnessFromBeacon func(ctx context.Context, key types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) `perm:"read"` + ChainGetRandomnessFromTickets func(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) `perm:"read"` + ChainGetReceipts func(ctx context.Context, id cid.Cid) ([]types.MessageReceipt, error) `perm:"read"` + ChainGetTipSet func(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) `perm:"read"` + ChainGetTipSetByHeight func(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) `perm:"read"` + ChainHead func(ctx context.Context) (*types.TipSet, error) `perm:"read"` + ChainList func(ctx context.Context, tsKey types.TipSetKey, count int) ([]types.TipSetKey, error) `perm:"read"` + ChainNotify func(ctx context.Context) (<-chan []*types.HeadChange, error) `perm:"read"` + ChainSetHead func(ctx context.Context, key types.TipSetKey) error `perm:"admin"` + GetActor func(ctx context.Context, addr address.Address) (*types.Actor, error) `perm:"read"` + GetEntry func(ctx context.Context, height abi.ChainEpoch, round uint64) (*types.BeaconEntry, error) `perm:"read"` + GetFullBlock func(ctx context.Context, id cid.Cid) (*types.FullBlock, error) `perm:"read"` + GetParentStateRootActor func(ctx context.Context, ts *types.TipSet, addr address.Address) (*types.Actor, error) `perm:"read"` + MessageWait func(ctx context.Context, msgCid cid.Cid, confidence, lookback abi.ChainEpoch) (*types.ChainMessage, error) `perm:"read"` + ProtocolParameters func(ctx context.Context) (*types.ProtocolParams, error) `perm:"read"` + ResolveToKeyAddr func(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) `perm:"read"` + StateActorCodeCIDs func(context.Context, network.Version) (map[string]cid.Cid, error) `perm:"read"` + StateActorManifestCID func(context.Context, network.Version) (cid.Cid, error) `perm:"read"` + StateCall func(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*types.InvocResult, error) `perm:"read"` + StateGetNetworkParams func(ctx context.Context) (*types.NetworkParams, error) `perm:"read"` + StateGetReceipt func(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"` + StateNetworkName func(ctx context.Context) (types.NetworkName, error) `perm:"read"` + StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (network.Version, error) `perm:"read"` + StateSearchMsg func(ctx context.Context, msg cid.Cid) (*types.MsgLookup, error) `perm:"read"` + StateSearchMsgLimited func(ctx context.Context, cid cid.Cid, limit abi.ChainEpoch) (*types.MsgLookup, error) `perm:"read"` + StateVerifiedRegistryRootKey func(ctx context.Context, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + StateVerifierStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*types.MsgLookup, error) `perm:"read"` + StateWaitMsgLimited func(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*types.MsgLookup, error) `perm:"read"` + VerifyEntry func(parent, child *types.BeaconEntry, height abi.ChainEpoch) bool `perm:"read"` + } +} + +func (s *IChainInfoStruct) BlockTime(p0 context.Context) time.Duration { + return s.Internal.BlockTime(p0) +} +func (s *IChainInfoStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + return s.Internal.ChainExport(p0, p1, p2, p3) +} +func (s *IChainInfoStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return s.Internal.ChainGetBlock(p0, p1) +} +func (s *IChainInfoStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*types.BlockMessages, error) { + return s.Internal.ChainGetBlockMessages(p0, p1) +} +func (s *IChainInfoStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainGetGenesis(p0) +} +func (s *IChainInfoStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.ChainGetMessage(p0, p1) +} +func (s *IChainInfoStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]types.MessageCID, error) { + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} +func (s *IChainInfoStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]types.MessageCID, error) { + return s.Internal.ChainGetParentMessages(p0, p1) +} +func (s *IChainInfoStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return s.Internal.ChainGetParentReceipts(p0, p1) +} +func (s *IChainInfoStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*types.HeadChange, error) { + return s.Internal.ChainGetPath(p0, p1, p2) +} +func (s *IChainInfoStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4) +} +func (s *IChainInfoStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4) +} +func (s *IChainInfoStruct) ChainGetReceipts(p0 context.Context, p1 cid.Cid) ([]types.MessageReceipt, error) { + return s.Internal.ChainGetReceipts(p0, p1) +} +func (s *IChainInfoStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSet(p0, p1) +} +func (s *IChainInfoStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) +} +func (s *IChainInfoStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainHead(p0) +} +func (s *IChainInfoStruct) ChainList(p0 context.Context, p1 types.TipSetKey, p2 int) ([]types.TipSetKey, error) { + return s.Internal.ChainList(p0, p1, p2) +} +func (s *IChainInfoStruct) ChainNotify(p0 context.Context) (<-chan []*types.HeadChange, error) { + return s.Internal.ChainNotify(p0) +} +func (s *IChainInfoStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + return s.Internal.ChainSetHead(p0, p1) +} +func (s *IChainInfoStruct) GetActor(p0 context.Context, p1 address.Address) (*types.Actor, error) { + return s.Internal.GetActor(p0, p1) +} +func (s *IChainInfoStruct) GetEntry(p0 context.Context, p1 abi.ChainEpoch, p2 uint64) (*types.BeaconEntry, error) { + return s.Internal.GetEntry(p0, p1, p2) +} +func (s *IChainInfoStruct) GetFullBlock(p0 context.Context, p1 cid.Cid) (*types.FullBlock, error) { + return s.Internal.GetFullBlock(p0, p1) +} +func (s *IChainInfoStruct) GetParentStateRootActor(p0 context.Context, p1 *types.TipSet, p2 address.Address) (*types.Actor, error) { + return s.Internal.GetParentStateRootActor(p0, p1, p2) +} +func (s *IChainInfoStruct) MessageWait(p0 context.Context, p1 cid.Cid, p2, p3 abi.ChainEpoch) (*types.ChainMessage, error) { + return s.Internal.MessageWait(p0, p1, p2, p3) +} +func (s *IChainInfoStruct) ProtocolParameters(p0 context.Context) (*types.ProtocolParams, error) { + return s.Internal.ProtocolParameters(p0) +} +func (s *IChainInfoStruct) ResolveToKeyAddr(p0 context.Context, p1 address.Address, p2 *types.TipSet) (address.Address, error) { + return s.Internal.ResolveToKeyAddr(p0, p1, p2) +} +func (s *IChainInfoStruct) StateActorCodeCIDs(p0 context.Context, p1 network.Version) (map[string]cid.Cid, error) { + return s.Internal.StateActorCodeCIDs(p0, p1) +} +func (s *IChainInfoStruct) StateActorManifestCID(p0 context.Context, p1 network.Version) (cid.Cid, error) { + return s.Internal.StateActorManifestCID(p0, p1) +} +func (s *IChainInfoStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*types.InvocResult, error) { + return s.Internal.StateCall(p0, p1, p2) +} +func (s *IChainInfoStruct) StateGetNetworkParams(p0 context.Context) (*types.NetworkParams, error) { + return s.Internal.StateGetNetworkParams(p0) +} +func (s *IChainInfoStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { + return s.Internal.StateGetReceipt(p0, p1, p2) +} +func (s *IChainInfoStruct) StateNetworkName(p0 context.Context) (types.NetworkName, error) { + return s.Internal.StateNetworkName(p0) +} +func (s *IChainInfoStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { + return s.Internal.StateNetworkVersion(p0, p1) +} +func (s *IChainInfoStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*types.MsgLookup, error) { + return s.Internal.StateSearchMsg(p0, p1) +} +func (s *IChainInfoStruct) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*types.MsgLookup, error) { + return s.Internal.StateSearchMsgLimited(p0, p1, p2) +} +func (s *IChainInfoStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + return s.Internal.StateVerifiedRegistryRootKey(p0, p1) +} +func (s *IChainInfoStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifierStatus(p0, p1, p2) +} +func (s *IChainInfoStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*types.MsgLookup, error) { + return s.Internal.StateWaitMsg(p0, p1, p2) +} +func (s *IChainInfoStruct) StateWaitMsgLimited(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*types.MsgLookup, error) { + return s.Internal.StateWaitMsgLimited(p0, p1, p2, p3) +} +func (s *IChainInfoStruct) VerifyEntry(p0, p1 *types.BeaconEntry, p2 abi.ChainEpoch) bool { + return s.Internal.VerifyEntry(p0, p1, p2) +} + +type IChainStruct struct { + IAccountStruct + IActorStruct + IBeaconStruct + IMinerStateStruct + IChainInfoStruct +} + +type IMarketStruct struct { + Internal struct { + StateMarketParticipants func(ctx context.Context, tsk types.TipSetKey) (map[string]types.MarketBalance, error) `perm:"read"` + } +} + +func (s *IMarketStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]types.MarketBalance, error) { + return s.Internal.StateMarketParticipants(p0, p1) +} + +type IMiningStruct struct { + Internal struct { + MinerCreateBlock func(ctx context.Context, bt *types.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` + MinerGetBaseInfo func(ctx context.Context, maddr address.Address, round abi.ChainEpoch, tsk types.TipSetKey) (*types.MiningBaseInfo, error) `perm:"read"` + } +} + +func (s *IMiningStruct) MinerCreateBlock(p0 context.Context, p1 *types.BlockTemplate) (*types.BlockMsg, error) { + return s.Internal.MinerCreateBlock(p0, p1) +} +func (s *IMiningStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*types.MiningBaseInfo, error) { + return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) +} + +type IMessagePoolStruct struct { + Internal struct { + GasBatchEstimateMessageGas func(ctx context.Context, estimateMessages []*types.EstimateMessage, fromNonce uint64, tsk types.TipSetKey) ([]*types.EstimateResult, error) `perm:"read"` + GasEstimateFeeCap func(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + GasEstimateGasLimit func(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) `perm:"read"` + GasEstimateGasPremium func(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + GasEstimateMessageGas func(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) `perm:"read"` + MpoolBatchPush func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + MpoolBatchPushMessage func(ctx context.Context, msgs []*types.Message, spec *types.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"` + MpoolBatchPushUntrusted func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + MpoolClear func(ctx context.Context, local bool) error `perm:"write"` + MpoolDeleteByAdress func(ctx context.Context, addr address.Address) error `perm:"admin"` + MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"` + MpoolGetNonce func(ctx context.Context, addr address.Address) (uint64, error) `perm:"read"` + MpoolPending func(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` + MpoolPublishByAddr func(context.Context, address.Address) error `perm:"admin"` + MpoolPublishMessage func(ctx context.Context, smsg *types.SignedMessage) error `perm:"admin"` + MpoolPush func(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) `perm:"write"` + MpoolPushMessage func(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"` + MpoolPushUntrusted func(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) `perm:"write"` + MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"` + MpoolSelects func(context.Context, types.TipSetKey, []float64) ([][]*types.SignedMessage, error) `perm:"read"` + MpoolSetConfig func(ctx context.Context, cfg *types.MpoolConfig) error `perm:"admin"` + MpoolSub func(ctx context.Context) (<-chan types.MpoolUpdate, error) `perm:"read"` + } +} + +func (s *IMessagePoolStruct) GasBatchEstimateMessageGas(p0 context.Context, p1 []*types.EstimateMessage, p2 uint64, p3 types.TipSetKey) ([]*types.EstimateResult, error) { + return s.Internal.GasBatchEstimateMessageGas(p0, p1, p2, p3) +} +func (s *IMessagePoolStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (big.Int, error) { + return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3) +} +func (s *IMessagePoolStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + return s.Internal.GasEstimateGasLimit(p0, p1, p2) +} +func (s *IMessagePoolStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (big.Int, error) { + return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4) +} +func (s *IMessagePoolStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *types.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) +} +func (s *IMessagePoolStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPush(p0, p1) +} +func (s *IMessagePoolStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *types.MessageSendSpec) ([]*types.SignedMessage, error) { + return s.Internal.MpoolBatchPushMessage(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPushUntrusted(p0, p1) +} +func (s *IMessagePoolStruct) MpoolClear(p0 context.Context, p1 bool) error { + return s.Internal.MpoolClear(p0, p1) +} +func (s *IMessagePoolStruct) MpoolDeleteByAdress(p0 context.Context, p1 address.Address) error { + return s.Internal.MpoolDeleteByAdress(p0, p1) +} +func (s *IMessagePoolStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + return s.Internal.MpoolGetConfig(p0) +} +func (s *IMessagePoolStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.MpoolGetNonce(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return s.Internal.MpoolPending(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPublishByAddr(p0 context.Context, p1 address.Address) error { + return s.Internal.MpoolPublishByAddr(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPublishMessage(p0 context.Context, p1 *types.SignedMessage) error { + return s.Internal.MpoolPublishMessage(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPush(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *types.MessageSendSpec) (*types.SignedMessage, error) { + return s.Internal.MpoolPushMessage(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPushUntrusted(p0, p1) +} +func (s *IMessagePoolStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + return s.Internal.MpoolSelect(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolSelects(p0 context.Context, p1 types.TipSetKey, p2 []float64) ([][]*types.SignedMessage, error) { + return s.Internal.MpoolSelects(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + return s.Internal.MpoolSetConfig(p0, p1) +} +func (s *IMessagePoolStruct) MpoolSub(p0 context.Context) (<-chan types.MpoolUpdate, error) { + return s.Internal.MpoolSub(p0) +} + +type IMultiSigStruct struct { + Internal struct { + MsigAddApprove func(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) `perm:"sign"` + MsigAddCancel func(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) `perm:"sign"` + MsigAddPropose func(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) `perm:"sign"` + MsigApprove func(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) `perm:"sign"` + MsigApproveTxnHash func(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) `perm:"sign"` + MsigCancel func(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) `perm:"sign"` + MsigCancelTxnHash func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` + MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` + MsigGetVested func(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) `perm:"read"` + MsigPropose func(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) `perm:"sign"` + MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"` + MsigSwapApprove func(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) `perm:"sign"` + MsigSwapCancel func(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) `perm:"sign"` + MsigSwapPropose func(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) `perm:"sign"` + } +} + +func (s *IMultiSigStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { + return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { + return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) +} +func (s *IMultiSigStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) +} +func (s *IMultiSigStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { + return s.Internal.MsigApprove(p0, p1, p2, p3) +} +func (s *IMultiSigStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { + return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) +} +func (s *IMultiSigStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { + return s.Internal.MsigCancel(p0, p1, p2, p3) +} +func (s *IMultiSigStruct) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { + return s.Internal.MsigCancelTxnHash(p0, p1, p2, p3, p4, p5, p6, p7) +} +func (s *IMultiSigStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { + return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetVested(p0, p1, p2, p3) +} +func (s *IMultiSigStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { + return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) +} +func (s *IMultiSigStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { + return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { + return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) +} +func (s *IMultiSigStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { + return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) +} + +type INetworkStruct struct { + Internal struct { + ID func(ctx context.Context) (peer.ID, error) `perm:"read"` + NetAddrsListen func(ctx context.Context) (peer.AddrInfo, error) `perm:"read"` + NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"` + NetAutoNatStatus func(context.Context) (types.NatInfo, error) `perm:"read"` + NetBandwidthStats func(ctx context.Context) (metrics.Stats, error) `perm:"read"` + NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"` + NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` + NetConnect func(ctx context.Context, pi peer.AddrInfo) error `perm:"admin"` + NetConnectedness func(context.Context, peer.ID) (network2.Connectedness, error) `perm:"read"` + NetDisconnect func(ctx context.Context, p peer.ID) error `perm:"admin"` + NetFindPeer func(ctx context.Context, p peer.ID) (peer.AddrInfo, error) `perm:"read"` + NetFindProvidersAsync func(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo `perm:"read"` + NetGetClosestPeers func(ctx context.Context, key string) ([]peer.ID, error) `perm:"read"` + NetPeerInfo func(ctx context.Context, p peer.ID) (*types.ExtendedPeerInfo, error) `perm:"read"` + NetPeers func(ctx context.Context) ([]peer.AddrInfo, error) `perm:"read"` + NetPing func(ctx context.Context, p peer.ID) (time.Duration, error) `perm:"read"` + NetProtectAdd func(ctx context.Context, acl []peer.ID) error `perm:"admin"` + NetProtectList func(ctx context.Context) ([]peer.ID, error) `perm:"read"` + NetProtectRemove func(ctx context.Context, acl []peer.ID) error `perm:"admin"` + NetPubsubScores func(context.Context) ([]types.PubsubScore, error) `perm:"read"` + } +} + +func (s *INetworkStruct) ID(p0 context.Context) (peer.ID, error) { return s.Internal.ID(p0) } +func (s *INetworkStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + return s.Internal.NetAddrsListen(p0) +} +func (s *INetworkStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { + return s.Internal.NetAgentVersion(p0, p1) +} +func (s *INetworkStruct) NetAutoNatStatus(p0 context.Context) (types.NatInfo, error) { + return s.Internal.NetAutoNatStatus(p0) +} +func (s *INetworkStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { + return s.Internal.NetBandwidthStats(p0) +} +func (s *INetworkStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { + return s.Internal.NetBandwidthStatsByPeer(p0) +} +func (s *INetworkStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { + return s.Internal.NetBandwidthStatsByProtocol(p0) +} +func (s *INetworkStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { + return s.Internal.NetConnect(p0, p1) +} +func (s *INetworkStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network2.Connectedness, error) { + return s.Internal.NetConnectedness(p0, p1) +} +func (s *INetworkStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error { + return s.Internal.NetDisconnect(p0, p1) +} +func (s *INetworkStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { + return s.Internal.NetFindPeer(p0, p1) +} +func (s *INetworkStruct) NetFindProvidersAsync(p0 context.Context, p1 cid.Cid, p2 int) <-chan peer.AddrInfo { + return s.Internal.NetFindProvidersAsync(p0, p1, p2) +} +func (s *INetworkStruct) NetGetClosestPeers(p0 context.Context, p1 string) ([]peer.ID, error) { + return s.Internal.NetGetClosestPeers(p0, p1) +} +func (s *INetworkStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*types.ExtendedPeerInfo, error) { + return s.Internal.NetPeerInfo(p0, p1) +} +func (s *INetworkStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { + return s.Internal.NetPeers(p0) +} +func (s *INetworkStruct) NetPing(p0 context.Context, p1 peer.ID) (time.Duration, error) { + return s.Internal.NetPing(p0, p1) +} +func (s *INetworkStruct) NetProtectAdd(p0 context.Context, p1 []peer.ID) error { + return s.Internal.NetProtectAdd(p0, p1) +} +func (s *INetworkStruct) NetProtectList(p0 context.Context) ([]peer.ID, error) { + return s.Internal.NetProtectList(p0) +} +func (s *INetworkStruct) NetProtectRemove(p0 context.Context, p1 []peer.ID) error { + return s.Internal.NetProtectRemove(p0, p1) +} +func (s *INetworkStruct) NetPubsubScores(p0 context.Context) ([]types.PubsubScore, error) { + return s.Internal.NetPubsubScores(p0) +} + +type IPaychanStruct struct { + Internal struct { + PaychAllocateLane func(ctx context.Context, ch address.Address) (uint64, error) `perm:"sign"` + PaychAvailableFunds func(ctx context.Context, ch address.Address) (*types.ChannelAvailableFunds, error) `perm:"sign"` + PaychAvailableFundsByFromTo func(ctx context.Context, from, to address.Address) (*types.ChannelAvailableFunds, error) `perm:"sign"` + PaychCollect func(ctx context.Context, addr address.Address) (cid.Cid, error) `perm:"sign"` + PaychGet func(ctx context.Context, from, to address.Address, amt big.Int) (*types.ChannelInfo, error) `perm:"sign"` + PaychGetWaitReady func(ctx context.Context, sentinel cid.Cid) (address.Address, error) `perm:"sign"` + PaychList func(ctx context.Context) ([]address.Address, error) `perm:"read"` + PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []types.VoucherSpec) (*types.PaymentInfo, error) `perm:"sign"` + PaychSettle func(ctx context.Context, addr address.Address) (cid.Cid, error) `perm:"sign"` + PaychStatus func(ctx context.Context, pch address.Address) (*types.Status, error) `perm:"read"` + PaychVoucherAdd func(ctx context.Context, ch address.Address, sv *types.SignedVoucher, proof []byte, minDelta big.Int) (big.Int, error) `perm:"write"` + PaychVoucherCheckSpendable func(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (bool, error) `perm:"read"` + PaychVoucherCheckValid func(ctx context.Context, ch address.Address, sv *types.SignedVoucher) error `perm:"read"` + PaychVoucherCreate func(ctx context.Context, pch address.Address, amt big.Int, lane uint64) (*types.VoucherCreateResult, error) `perm:"sign"` + PaychVoucherList func(ctx context.Context, pch address.Address) ([]*types.SignedVoucher, error) `perm:"write"` + PaychVoucherSubmit func(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) `perm:"sign"` + } +} + +func (s *IPaychanStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.PaychAllocateLane(p0, p1) +} +func (s *IPaychanStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*types.ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFunds(p0, p1) +} +func (s *IPaychanStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1, p2 address.Address) (*types.ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2) +} +func (s *IPaychanStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychCollect(p0, p1) +} +func (s *IPaychanStruct) PaychGet(p0 context.Context, p1, p2 address.Address, p3 big.Int) (*types.ChannelInfo, error) { + return s.Internal.PaychGet(p0, p1, p2, p3) +} +func (s *IPaychanStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + return s.Internal.PaychGetWaitReady(p0, p1) +} +func (s *IPaychanStruct) PaychList(p0 context.Context) ([]address.Address, error) { + return s.Internal.PaychList(p0) +} +func (s *IPaychanStruct) PaychNewPayment(p0 context.Context, p1, p2 address.Address, p3 []types.VoucherSpec) (*types.PaymentInfo, error) { + return s.Internal.PaychNewPayment(p0, p1, p2, p3) +} +func (s *IPaychanStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychSettle(p0, p1) +} +func (s *IPaychanStruct) PaychStatus(p0 context.Context, p1 address.Address) (*types.Status, error) { + return s.Internal.PaychStatus(p0, p1) +} +func (s *IPaychanStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher, p3 []byte, p4 big.Int) (big.Int, error) { + return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4) +} +func (s *IPaychanStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4) +} +func (s *IPaychanStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher) error { + return s.Internal.PaychVoucherCheckValid(p0, p1, p2) +} +func (s *IPaychanStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 big.Int, p3 uint64) (*types.VoucherCreateResult, error) { + return s.Internal.PaychVoucherCreate(p0, p1, p2, p3) +} +func (s *IPaychanStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*types.SignedVoucher, error) { + return s.Internal.PaychVoucherList(p0, p1) +} +func (s *IPaychanStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4) +} + +type ISyncerStruct struct { + Internal struct { + ChainSyncHandleNewTipSet func(ctx context.Context, ci *types.ChainInfo) error `perm:"write"` + ChainTipSetWeight func(ctx context.Context, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + Concurrent func(ctx context.Context) int64 `perm:"read"` + SetConcurrent func(ctx context.Context, concurrent int64) error `perm:"admin"` + SyncState func(ctx context.Context) (*types.SyncState, error) `perm:"read"` + SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"` + SyncerTracker func(ctx context.Context) *types.TargetTracker `perm:"read"` + } +} + +func (s *ISyncerStruct) ChainSyncHandleNewTipSet(p0 context.Context, p1 *types.ChainInfo) error { + return s.Internal.ChainSyncHandleNewTipSet(p0, p1) +} +func (s *ISyncerStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (big.Int, error) { + return s.Internal.ChainTipSetWeight(p0, p1) +} +func (s *ISyncerStruct) Concurrent(p0 context.Context) int64 { return s.Internal.Concurrent(p0) } +func (s *ISyncerStruct) SetConcurrent(p0 context.Context, p1 int64) error { + return s.Internal.SetConcurrent(p0, p1) +} +func (s *ISyncerStruct) SyncState(p0 context.Context) (*types.SyncState, error) { + return s.Internal.SyncState(p0) +} +func (s *ISyncerStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + return s.Internal.SyncSubmitBlock(p0, p1) +} +func (s *ISyncerStruct) SyncerTracker(p0 context.Context) *types.TargetTracker { + return s.Internal.SyncerTracker(p0) +} + +type IWalletStruct struct { + Internal struct { + HasPassword func(ctx context.Context) bool `perm:"admin"` + LockWallet func(ctx context.Context) error `perm:"admin"` + SetPassword func(ctx context.Context, password []byte) error `perm:"admin"` + UnLockWallet func(ctx context.Context, password []byte) error `perm:"admin"` + WalletAddresses func(ctx context.Context) []address.Address `perm:"admin"` + WalletBalance func(ctx context.Context, addr address.Address) (abi.TokenAmount, error) `perm:"read"` + WalletDefaultAddress func(ctx context.Context) (address.Address, error) `perm:"write"` + WalletDelete func(ctx context.Context, addr address.Address) error `perm:"admin"` + WalletExport func(ctx context.Context, addr address.Address, password string) (*types.KeyInfo, error) `perm:"admin"` + WalletHas func(ctx context.Context, addr address.Address) (bool, error) `perm:"write"` + WalletImport func(ctx context.Context, key *types.KeyInfo) (address.Address, error) `perm:"admin"` + WalletNewAddress func(ctx context.Context, protocol address.Protocol) (address.Address, error) `perm:"write"` + WalletSetDefault func(ctx context.Context, addr address.Address) error `perm:"write"` + WalletSign func(ctx context.Context, k address.Address, msg []byte, meta types.MsgMeta) (*crypto.Signature, error) `perm:"sign"` + WalletSignMessage func(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) `perm:"sign"` + WalletState func(ctx context.Context) int `perm:"admin"` + } +} + +func (s *IWalletStruct) HasPassword(p0 context.Context) bool { return s.Internal.HasPassword(p0) } +func (s *IWalletStruct) LockWallet(p0 context.Context) error { return s.Internal.LockWallet(p0) } +func (s *IWalletStruct) SetPassword(p0 context.Context, p1 []byte) error { + return s.Internal.SetPassword(p0, p1) +} +func (s *IWalletStruct) UnLockWallet(p0 context.Context, p1 []byte) error { + return s.Internal.UnLockWallet(p0, p1) +} +func (s *IWalletStruct) WalletAddresses(p0 context.Context) []address.Address { + return s.Internal.WalletAddresses(p0) +} +func (s *IWalletStruct) WalletBalance(p0 context.Context, p1 address.Address) (abi.TokenAmount, error) { + return s.Internal.WalletBalance(p0, p1) +} +func (s *IWalletStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + return s.Internal.WalletDefaultAddress(p0) +} +func (s *IWalletStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletDelete(p0, p1) +} +func (s *IWalletStruct) WalletExport(p0 context.Context, p1 address.Address, p2 string) (*types.KeyInfo, error) { + return s.Internal.WalletExport(p0, p1, p2) +} +func (s *IWalletStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1) +} +func (s *IWalletStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return s.Internal.WalletImport(p0, p1) +} +func (s *IWalletStruct) WalletNewAddress(p0 context.Context, p1 address.Protocol) (address.Address, error) { + return s.Internal.WalletNewAddress(p0, p1) +} +func (s *IWalletStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletSetDefault(p0, p1) +} +func (s *IWalletStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 types.MsgMeta) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2, p3) +} +func (s *IWalletStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + return s.Internal.WalletSignMessage(p0, p1, p2) +} +func (s *IWalletStruct) WalletState(p0 context.Context) int { return s.Internal.WalletState(p0) } + +type ICommonStruct struct { + Internal struct { + StartTime func(context.Context) (time.Time, error) `perm:"read"` + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *ICommonStruct) StartTime(p0 context.Context) (time.Time, error) { + return s.Internal.StartTime(p0) +} +func (s *ICommonStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} + +type FullNodeStruct struct { + IBlockStoreStruct + IChainStruct + IMarketStruct + IMiningStruct + IMessagePoolStruct + IMultiSigStruct + INetworkStruct + IPaychanStruct + ISyncerStruct + IWalletStruct + ICommonStruct +} diff --git a/venus-shared/api/chain/v0/syncer.go b/venus-shared/api/chain/v0/syncer.go new file mode 100644 index 0000000000..beaa649387 --- /dev/null +++ b/venus-shared/api/chain/v0/syncer.go @@ -0,0 +1,19 @@ +package v0 + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type ISyncer interface { + ChainSyncHandleNewTipSet(ctx context.Context, ci *types.ChainInfo) error //perm:write + SetConcurrent(ctx context.Context, concurrent int64) error //perm:admin + SyncerTracker(ctx context.Context) *types.TargetTracker //perm:read + Concurrent(ctx context.Context) int64 //perm:read + ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (big.Int, error) //perm:read + SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write + SyncState(ctx context.Context) (*types.SyncState, error) //perm:read +} diff --git a/venus-shared/api/chain/v0/wallet.go b/venus-shared/api/chain/v0/wallet.go new file mode 100644 index 0000000000..fa9c5a251b --- /dev/null +++ b/venus-shared/api/chain/v0/wallet.go @@ -0,0 +1,30 @@ +package v0 + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IWallet interface { + WalletSign(ctx context.Context, k address.Address, msg []byte, meta types.MsgMeta) (*crypto.Signature, error) //perm:sign + WalletExport(ctx context.Context, addr address.Address, password string) (*types.KeyInfo, error) //perm:admin + WalletImport(ctx context.Context, key *types.KeyInfo) (address.Address, error) //perm:admin + WalletDelete(ctx context.Context, addr address.Address) error //perm:admin + WalletHas(ctx context.Context, addr address.Address) (bool, error) //perm:write + WalletNewAddress(ctx context.Context, protocol address.Protocol) (address.Address, error) //perm:write + WalletBalance(ctx context.Context, addr address.Address) (abi.TokenAmount, error) //perm:read + WalletDefaultAddress(ctx context.Context) (address.Address, error) //perm:write + WalletAddresses(ctx context.Context) []address.Address //perm:admin + WalletSetDefault(ctx context.Context, addr address.Address) error //perm:write + WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) //perm:sign + LockWallet(ctx context.Context) error //perm:admin + UnLockWallet(ctx context.Context, password []byte) error //perm:admin + SetPassword(ctx context.Context, password []byte) error //perm:admin + HasPassword(ctx context.Context) bool //perm:admin + WalletState(ctx context.Context) int //perm:admin +} diff --git a/venus-shared/api/chain/v1/blockstore.go b/venus-shared/api/chain/v1/blockstore.go new file mode 100644 index 0000000000..ddb74b74e7 --- /dev/null +++ b/venus-shared/api/chain/v1/blockstore.go @@ -0,0 +1,18 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type IBlockStore interface { + ChainReadObj(ctx context.Context, cid cid.Cid) ([]byte, error) //perm:read + ChainDeleteObj(ctx context.Context, obj cid.Cid) error //perm:admin + ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) //perm:read + ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (types.ObjStat, error) //perm:read + // ChainPutObj puts a given object into the block store + ChainPutObj(context.Context, blocks.Block) error //perm:admin +} diff --git a/venus-shared/api/chain/v1/chain.go b/venus-shared/api/chain/v1/chain.go new file mode 100644 index 0000000000..273b4255bf --- /dev/null +++ b/venus-shared/api/chain/v1/chain.go @@ -0,0 +1,180 @@ +package v1 + +import ( + "context" + "encoding/json" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + + lminer "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IChain interface { + IAccount + IActor + IMinerState + IChainInfo +} + +type IAccount interface { + StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) //perm:read +} + +type IActor interface { + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read + ListActor(ctx context.Context) (map[address.Address]*types.Actor, error) //perm:read +} + +type IChainInfo interface { + BlockTime(ctx context.Context) time.Duration //perm:read + ChainList(ctx context.Context, tsKey types.TipSetKey, count int) ([]types.TipSetKey, error) //perm:read + ChainHead(ctx context.Context) (*types.TipSet, error) //perm:read + ChainSetHead(ctx context.Context, key types.TipSetKey) error //perm:admin + ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) //perm:read + ChainGetTipSetByHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) //perm:read + ChainGetTipSetAfterHeight(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) //perm:read + StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read + StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read + // StateGetBeaconEntry returns the beacon entry for the given filecoin epoch. If + // the entry has not yet been produced, the call will block until the entry + // becomes available + StateGetBeaconEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read + ChainGetBlock(ctx context.Context, id cid.Cid) (*types.BlockHeader, error) //perm:read + ChainGetMessage(ctx context.Context, msgID cid.Cid) (*types.Message, error) //perm:read + ChainGetBlockMessages(ctx context.Context, bid cid.Cid) (*types.BlockMessages, error) //perm:read + ChainGetMessagesInTipset(ctx context.Context, key types.TipSetKey) ([]types.MessageCID, error) //perm:read + ChainGetReceipts(ctx context.Context, id cid.Cid) ([]types.MessageReceipt, error) //perm:read + ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([]types.MessageCID, error) //perm:read + ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) //perm:read + StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read + StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read + ChainNotify(ctx context.Context) (<-chan []*types.HeadChange, error) //perm:read + GetFullBlock(ctx context.Context, id cid.Cid) (*types.FullBlock, error) //perm:read + GetActor(ctx context.Context, addr address.Address) (*types.Actor, error) //perm:read + GetParentStateRootActor(ctx context.Context, ts *types.TipSet, addr address.Address) (*types.Actor, error) //perm:read + GetEntry(ctx context.Context, height abi.ChainEpoch, round uint64) (*types.BeaconEntry, error) //perm:read + MessageWait(ctx context.Context, msgCid cid.Cid, confidence, lookback abi.ChainEpoch) (*types.ChainMessage, error) //perm:read + ProtocolParameters(ctx context.Context) (*types.ProtocolParams, error) //perm:read + ResolveToKeyAddr(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) //perm:read + StateNetworkName(ctx context.Context) (types.NetworkName, error) //perm:read + // StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they must check that MsgLookup.Message is equal to the provided 'cid', or set the + // `allowReplaced` parameter to false. Without this check, and with `allowReplaced` + // set to true, both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) //perm:read + // StateWaitMsg looks back up to limit epochs in the chain for a message. + // If not found, it blocks until the message arrives on chain, and gets to the + // indicated confidence depth. + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they must check that MsgLookup.Message is equal to the provided 'cid', or set the + // `allowReplaced` parameter to false. Without this check, and with `allowReplaced` + // set to true, both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) //perm:read + StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) //perm:read + VerifyEntry(parent, child *types.BeaconEntry, height abi.ChainEpoch) bool //perm:read + ChainExport(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) //perm:read + ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*types.HeadChange, error) //perm:read + // StateGetNetworkParams return current network params + StateGetNetworkParams(ctx context.Context) (*types.NetworkParams, error) //perm:read + // StateActorCodeCIDs returns the CIDs of all the builtin actors for the given network version + StateActorCodeCIDs(context.Context, network.Version) (map[string]cid.Cid, error) //perm:read + // ChainGetGenesis returns the genesis tipset. + ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read + // StateActorManifestCID returns the CID of the builtin actors manifest for the given network version + StateActorManifestCID(context.Context, network.Version) (cid.Cid, error) //perm:read + StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*types.InvocResult, error) //perm:read +} + +type IMinerState interface { + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.ActorState, error) //perm:read + StateListMessages(ctx context.Context, match *types.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read + StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read + StateEncodeParams(ctx context.Context, toActCode cid.Cid, method abi.MethodNum, params json.RawMessage) ([]byte, error) //perm:read + StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) //perm:read + // StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector. + // Returns nil and no error if the sector isn't precommitted. + // + // Note that the sector number may be allocated while PreCommitInfo is nil. This means that either allocated sector + // numbers were compacted, and the sector number was marked as allocated in order to reduce size of the allocated + // sectors bitfield, or that the sector was precommitted, but the precommit has expired. + StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorPreCommitOnChainInfo, error) //perm:read + StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorOnChainInfo, error) //perm:read + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) //perm:read + StateMinerSectorSize(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (abi.SectorSize, error) //perm:read + StateMinerInfo(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.MinerInfo, error) //perm:read + StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (address.Address, error) //perm:read + StateMinerFaults(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) //perm:read + StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*types.Fault, error) //perm:read + StateMinerRecoveries(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) //perm:read + StateMinerProvingDeadline(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (*dline.Info, error) //perm:read + StateMinerPartitions(ctx context.Context, maddr address.Address, dlIdx uint64, tsk types.TipSetKey) ([]types.Partition, error) //perm:read + StateMinerDeadlines(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]types.Deadline, error) //perm:read + StateMinerSectors(ctx context.Context, maddr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) //perm:read + StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.MarketDeal, error) //perm:read + // StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if + // pending allocation is not found. + StateGetAllocationForPendingDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.Allocation, error) //perm:read + // StateGetAllocation returns the allocation for a given address and allocation ID. + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationID types.AllocationId, tsk types.TipSetKey) (*types.Allocation, error) //perm:read + // StateGetAllocations returns the all the allocations for a given client. + StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[types.AllocationId]types.Allocation, error) //perm:read + // StateGetClaim returns the claim for a given address and claim ID. + StateGetClaim(ctx context.Context, providerAddr address.Address, claimID types.ClaimId, tsk types.TipSetKey) (*types.Claim, error) //perm:read + // StateGetClaims returns the all the claims for a given provider. + StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[types.ClaimId]types.Claim, error) //perm:read + // StateComputeDataCID computes DataCID from a set of on-chain deals + StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) //perm:read + StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) //perm:read + StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) //perm:read + StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (types.CirculatingSupply, error) //perm:read + StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) //perm:read + StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]*types.MarketDeal, error) //perm:read + StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) //perm:read + StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) //perm:read + // StateLookupRobustAddress returns the public key address of the given ID address for non-account addresses (multisig, miners etc) + StateLookupRobustAddress(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) //perm:read + StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) //perm:read + StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.MinerPower, error) //perm:read + StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (big.Int, error) //perm:read + StateSectorExpiration(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorExpiration, error) //perm:read + StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read + StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MinerSectors, error) //perm:read + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MarketBalance, error) //perm:read + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (types.DealCollateralBounds, error) //perm:read + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read + // StateMinerAllocated returns a bitfield containing all sector numbers marked as allocated in miner state + StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) //perm:read +} diff --git a/venus-shared/api/chain/v1/client_gen.go b/venus-shared/api/chain/v1/client_gen.go new file mode 100644 index 0000000000..9bb8428fb6 --- /dev/null +++ b/venus-shared/api/chain/v1/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package v1 + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 1 +const APINamespace = "v1.FullNode" +const MethodNamespace = "Filecoin" + +// NewFullNodeRPC creates a new httpparse jsonrpc remotecli. +func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (FullNode, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res FullNodeStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialFullNodeRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialFullNodeRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (FullNode, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res FullNodeStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/chain/v1/common.go b/venus-shared/api/chain/v1/common.go new file mode 100644 index 0000000000..bd6ac33647 --- /dev/null +++ b/venus-shared/api/chain/v1/common.go @@ -0,0 +1,17 @@ +package v1 + +import ( + "context" + "time" + + "github.com/filecoin-project/venus/venus-shared/api" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type ICommon interface { + api.Version + + NodeStatus(ctx context.Context, inclChainStatus bool) (types.NodeStatus, error) //perm:read + // StartTime returns node start time + StartTime(context.Context) (time.Time, error) //perm:read +} diff --git a/venus-shared/api/chain/v1/fullnode.go b/venus-shared/api/chain/v1/fullnode.go new file mode 100644 index 0000000000..27efd8f6e4 --- /dev/null +++ b/venus-shared/api/chain/v1/fullnode.go @@ -0,0 +1,15 @@ +package v1 + +type FullNode interface { + IBlockStore + IChain + IMarket + IMining + IMessagePool + IMultiSig + INetwork + IPaychan + ISyncer + IWallet + ICommon +} diff --git a/venus-shared/api/chain/v1/market.go b/venus-shared/api/chain/v1/market.go new file mode 100644 index 0000000000..d3fd0cecf8 --- /dev/null +++ b/venus-shared/api/chain/v1/market.go @@ -0,0 +1,11 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMarket interface { + StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]types.MarketBalance, error) //perm:read +} diff --git a/venus-shared/api/chain/v1/method.md b/venus-shared/api/chain/v1/method.md new file mode 100644 index 0000000000..0e5453c848 --- /dev/null +++ b/venus-shared/api/chain/v1/method.md @@ -0,0 +1,6483 @@ +# Groups + +* [Account](#account) + * [StateAccountKey](#stateaccountkey) +* [Actor](#actor) + * [ListActor](#listactor) + * [StateGetActor](#stategetactor) +* [BlockStore](#blockstore) + * [ChainDeleteObj](#chaindeleteobj) + * [ChainHasObj](#chainhasobj) + * [ChainPutObj](#chainputobj) + * [ChainReadObj](#chainreadobj) + * [ChainStatObj](#chainstatobj) +* [ChainInfo](#chaininfo) + * [BlockTime](#blocktime) + * [ChainExport](#chainexport) + * [ChainGetBlock](#chaingetblock) + * [ChainGetBlockMessages](#chaingetblockmessages) + * [ChainGetGenesis](#chaingetgenesis) + * [ChainGetMessage](#chaingetmessage) + * [ChainGetMessagesInTipset](#chaingetmessagesintipset) + * [ChainGetParentMessages](#chaingetparentmessages) + * [ChainGetParentReceipts](#chaingetparentreceipts) + * [ChainGetPath](#chaingetpath) + * [ChainGetReceipts](#chaingetreceipts) + * [ChainGetTipSet](#chaingettipset) + * [ChainGetTipSetAfterHeight](#chaingettipsetafterheight) + * [ChainGetTipSetByHeight](#chaingettipsetbyheight) + * [ChainHead](#chainhead) + * [ChainList](#chainlist) + * [ChainNotify](#chainnotify) + * [ChainSetHead](#chainsethead) + * [GetActor](#getactor) + * [GetEntry](#getentry) + * [GetFullBlock](#getfullblock) + * [GetParentStateRootActor](#getparentstaterootactor) + * [MessageWait](#messagewait) + * [ProtocolParameters](#protocolparameters) + * [ResolveToKeyAddr](#resolvetokeyaddr) + * [StateActorCodeCIDs](#stateactorcodecids) + * [StateActorManifestCID](#stateactormanifestcid) + * [StateCall](#statecall) + * [StateGetBeaconEntry](#stategetbeaconentry) + * [StateGetNetworkParams](#stategetnetworkparams) + * [StateGetRandomnessFromBeacon](#stategetrandomnessfrombeacon) + * [StateGetRandomnessFromTickets](#stategetrandomnessfromtickets) + * [StateNetworkName](#statenetworkname) + * [StateNetworkVersion](#statenetworkversion) + * [StateSearchMsg](#statesearchmsg) + * [StateVerifiedRegistryRootKey](#stateverifiedregistryrootkey) + * [StateVerifierStatus](#stateverifierstatus) + * [StateWaitMsg](#statewaitmsg) + * [VerifyEntry](#verifyentry) +* [Common](#common) + * [NodeStatus](#nodestatus) + * [StartTime](#starttime) + * [Version](#version) +* [Market](#market) + * [StateMarketParticipants](#statemarketparticipants) +* [MessagePool](#messagepool) + * [GasBatchEstimateMessageGas](#gasbatchestimatemessagegas) + * [GasEstimateFeeCap](#gasestimatefeecap) + * [GasEstimateGasLimit](#gasestimategaslimit) + * [GasEstimateGasPremium](#gasestimategaspremium) + * [GasEstimateMessageGas](#gasestimatemessagegas) + * [MpoolBatchPush](#mpoolbatchpush) + * [MpoolBatchPushMessage](#mpoolbatchpushmessage) + * [MpoolBatchPushUntrusted](#mpoolbatchpushuntrusted) + * [MpoolCheckMessages](#mpoolcheckmessages) + * [MpoolCheckPendingMessages](#mpoolcheckpendingmessages) + * [MpoolCheckReplaceMessages](#mpoolcheckreplacemessages) + * [MpoolClear](#mpoolclear) + * [MpoolDeleteByAdress](#mpooldeletebyadress) + * [MpoolGetConfig](#mpoolgetconfig) + * [MpoolGetNonce](#mpoolgetnonce) + * [MpoolPending](#mpoolpending) + * [MpoolPublishByAddr](#mpoolpublishbyaddr) + * [MpoolPublishMessage](#mpoolpublishmessage) + * [MpoolPush](#mpoolpush) + * [MpoolPushMessage](#mpoolpushmessage) + * [MpoolPushUntrusted](#mpoolpushuntrusted) + * [MpoolSelect](#mpoolselect) + * [MpoolSelects](#mpoolselects) + * [MpoolSetConfig](#mpoolsetconfig) + * [MpoolSub](#mpoolsub) +* [MinerState](#minerstate) + * [StateAllMinerFaults](#stateallminerfaults) + * [StateChangedActors](#statechangedactors) + * [StateCirculatingSupply](#statecirculatingsupply) + * [StateComputeDataCID](#statecomputedatacid) + * [StateDealProviderCollateralBounds](#statedealprovidercollateralbounds) + * [StateDecodeParams](#statedecodeparams) + * [StateEncodeParams](#stateencodeparams) + * [StateGetAllocation](#stategetallocation) + * [StateGetAllocationForPendingDeal](#stategetallocationforpendingdeal) + * [StateGetAllocations](#stategetallocations) + * [StateGetClaim](#stategetclaim) + * [StateGetClaims](#stategetclaims) + * [StateListActors](#statelistactors) + * [StateListMessages](#statelistmessages) + * [StateListMiners](#statelistminers) + * [StateLookupID](#statelookupid) + * [StateLookupRobustAddress](#statelookuprobustaddress) + * [StateMarketBalance](#statemarketbalance) + * [StateMarketDeals](#statemarketdeals) + * [StateMarketStorageDeal](#statemarketstoragedeal) + * [StateMinerActiveSectors](#statemineractivesectors) + * [StateMinerAllocated](#stateminerallocated) + * [StateMinerAvailableBalance](#statemineravailablebalance) + * [StateMinerDeadlines](#stateminerdeadlines) + * [StateMinerFaults](#stateminerfaults) + * [StateMinerInfo](#stateminerinfo) + * [StateMinerInitialPledgeCollateral](#stateminerinitialpledgecollateral) + * [StateMinerPartitions](#stateminerpartitions) + * [StateMinerPower](#stateminerpower) + * [StateMinerPreCommitDepositForPower](#stateminerprecommitdepositforpower) + * [StateMinerProvingDeadline](#stateminerprovingdeadline) + * [StateMinerRecoveries](#stateminerrecoveries) + * [StateMinerSectorAllocated](#stateminersectorallocated) + * [StateMinerSectorCount](#stateminersectorcount) + * [StateMinerSectorSize](#stateminersectorsize) + * [StateMinerSectors](#stateminersectors) + * [StateMinerWorkerAddress](#stateminerworkeraddress) + * [StateReadState](#statereadstate) + * [StateSectorExpiration](#statesectorexpiration) + * [StateSectorGetInfo](#statesectorgetinfo) + * [StateSectorPartition](#statesectorpartition) + * [StateSectorPreCommitInfo](#statesectorprecommitinfo) + * [StateVMCirculatingSupplyInternal](#statevmcirculatingsupplyinternal) + * [StateVerifiedClientStatus](#stateverifiedclientstatus) +* [Mining](#mining) + * [MinerCreateBlock](#minercreateblock) + * [MinerGetBaseInfo](#minergetbaseinfo) +* [MultiSig](#multisig) + * [MsigAddApprove](#msigaddapprove) + * [MsigAddCancel](#msigaddcancel) + * [MsigAddPropose](#msigaddpropose) + * [MsigApprove](#msigapprove) + * [MsigApproveTxnHash](#msigapprovetxnhash) + * [MsigCancel](#msigcancel) + * [MsigCancelTxnHash](#msigcanceltxnhash) + * [MsigCreate](#msigcreate) + * [MsigGetVested](#msiggetvested) + * [MsigPropose](#msigpropose) + * [MsigRemoveSigner](#msigremovesigner) + * [MsigSwapApprove](#msigswapapprove) + * [MsigSwapCancel](#msigswapcancel) + * [MsigSwapPropose](#msigswappropose) +* [Network](#network) + * [ID](#id) + * [NetAddrsListen](#netaddrslisten) + * [NetAgentVersion](#netagentversion) + * [NetAutoNatStatus](#netautonatstatus) + * [NetBandwidthStats](#netbandwidthstats) + * [NetBandwidthStatsByPeer](#netbandwidthstatsbypeer) + * [NetBandwidthStatsByProtocol](#netbandwidthstatsbyprotocol) + * [NetConnect](#netconnect) + * [NetConnectedness](#netconnectedness) + * [NetDisconnect](#netdisconnect) + * [NetFindPeer](#netfindpeer) + * [NetFindProvidersAsync](#netfindprovidersasync) + * [NetGetClosestPeers](#netgetclosestpeers) + * [NetPeerInfo](#netpeerinfo) + * [NetPeers](#netpeers) + * [NetPing](#netping) + * [NetProtectAdd](#netprotectadd) + * [NetProtectList](#netprotectlist) + * [NetProtectRemove](#netprotectremove) + * [NetPubsubScores](#netpubsubscores) +* [Paychan](#paychan) + * [PaychAllocateLane](#paychallocatelane) + * [PaychAvailableFunds](#paychavailablefunds) + * [PaychAvailableFundsByFromTo](#paychavailablefundsbyfromto) + * [PaychCollect](#paychcollect) + * [PaychFund](#paychfund) + * [PaychGet](#paychget) + * [PaychGetWaitReady](#paychgetwaitready) + * [PaychList](#paychlist) + * [PaychNewPayment](#paychnewpayment) + * [PaychSettle](#paychsettle) + * [PaychStatus](#paychstatus) + * [PaychVoucherAdd](#paychvoucheradd) + * [PaychVoucherCheckSpendable](#paychvouchercheckspendable) + * [PaychVoucherCheckValid](#paychvouchercheckvalid) + * [PaychVoucherCreate](#paychvouchercreate) + * [PaychVoucherList](#paychvoucherlist) + * [PaychVoucherSubmit](#paychvouchersubmit) +* [Syncer](#syncer) + * [ChainSyncHandleNewTipSet](#chainsynchandlenewtipset) + * [ChainTipSetWeight](#chaintipsetweight) + * [Concurrent](#concurrent) + * [SetConcurrent](#setconcurrent) + * [SyncState](#syncstate) + * [SyncSubmitBlock](#syncsubmitblock) + * [SyncerTracker](#syncertracker) +* [Wallet](#wallet) + * [HasPassword](#haspassword) + * [LockWallet](#lockwallet) + * [SetPassword](#setpassword) + * [UnLockWallet](#unlockwallet) + * [WalletAddresses](#walletaddresses) + * [WalletBalance](#walletbalance) + * [WalletDefaultAddress](#walletdefaultaddress) + * [WalletDelete](#walletdelete) + * [WalletExport](#walletexport) + * [WalletHas](#wallethas) + * [WalletImport](#walletimport) + * [WalletNewAddress](#walletnewaddress) + * [WalletSetDefault](#walletsetdefault) + * [WalletSign](#walletsign) + * [WalletSignMessage](#walletsignmessage) + * [WalletState](#walletstate) + +## Account + +### StateAccountKey + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +## Actor + +### ListActor + + +Perms: read + +Inputs: `[]` + +Response: `{}` + +### StateGetActor + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +## BlockStore + +### ChainDeleteObj + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + +### ChainHasObj + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `true` + +### ChainPutObj +ChainPutObj puts a given object into the block store + + +Perms: admin + +Inputs: +```json +[ + {} +] +``` + +Response: `{}` + +### ChainReadObj + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainStatObj + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Size": 42, + "Links": 42 +} +``` + +## ChainInfo + +### BlockTime + + +Perms: read + +Inputs: `[]` + +Response: `60000000000` + +### ChainExport + + +Perms: read + +Inputs: +```json +[ + 10101, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainGetBlock + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" +} +``` + +### ChainGetBlockMessages + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "BlsMessages": [ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + ], + "SecpkMessages": [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Cids": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] +} +``` + +### ChainGetGenesis +ChainGetGenesis returns the genesis tipset. + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetMessage + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +### ChainGetMessagesInTipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +### ChainGetParentMessages + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +### ChainGetParentReceipts + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` + +### ChainGetPath + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Type": "apply", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` + +### ChainGetReceipts + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` + +### ChainGetTipSet + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetTipSetAfterHeight + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetTipSetByHeight + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainHead + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainList + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 123 +] +``` + +Response: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +### ChainNotify + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "Type": "apply", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` + +### ChainSetHead + + +Perms: admin + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### GetActor + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +### GetEntry + + +Perms: read + +Inputs: +```json +[ + 10101, + 42 +] +``` + +Response: +```json +{ + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +### GetFullBlock + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BLSMessages": [ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + ], + "SECPMessages": [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +} +``` + +### GetParentStateRootActor + + +Perms: read + +Inputs: +```json +[ + { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "f01234" +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +### MessageWait + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 10101, + 10101 +] +``` + +Response: +```json +{ + "TS": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Block": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +} +``` + +### ProtocolParameters + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Network": "string value", + "BlockTime": 60000000000, + "SupportedSectors": [ + { + "Size": 34359738368, + "MaxPieceSize": 1024 + } + ] +} +``` + +### ResolveToKeyAddr + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "Cids": null, + "Blocks": null, + "Height": 0 + } +] +``` + +Response: `"f01234"` + +### StateActorCodeCIDs +StateActorCodeCIDs returns the CIDs of all the builtin actors for the given network version + + +Perms: read + +Inputs: +```json +[ + 17 +] +``` + +Response: `{}` + +### StateActorManifestCID +StateActorManifestCID returns the CID of the builtin actors manifest for the given network version + + +Perms: read + +Inputs: +```json +[ + 17 +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### StateCall + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] + }, + "Error": "string value", + "Duration": 60000000000 +} +``` + +### StateGetBeaconEntry +StateGetBeaconEntry returns the beacon entry for the given filecoin epoch. If +the entry has not yet been produced, the call will block until the entry +becomes available + + +Perms: read + +Inputs: +```json +[ + 10101 +] +``` + +Response: +```json +{ + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +### StateGetNetworkParams +StateGetNetworkParams return current network params + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "NetworkName": "mainnet", + "BlockDelaySecs": 42, + "ConsensusMinerMinPower": "0", + "SupportedProofTypes": [ + 8 + ], + "PreCommitChallengeDelay": 10101, + "ForkUpgradeParams": { + "UpgradeSmokeHeight": 10101, + "UpgradeBreezeHeight": 10101, + "UpgradeIgnitionHeight": 10101, + "UpgradeLiftoffHeight": 10101, + "UpgradeAssemblyHeight": 10101, + "UpgradeRefuelHeight": 10101, + "UpgradeTapeHeight": 10101, + "UpgradeKumquatHeight": 10101, + "BreezeGasTampingDuration": 10101, + "UpgradeCalicoHeight": 10101, + "UpgradePersianHeight": 10101, + "UpgradeOrangeHeight": 10101, + "UpgradeClausHeight": 10101, + "UpgradeTrustHeight": 10101, + "UpgradeNorwegianHeight": 10101, + "UpgradeTurboHeight": 10101, + "UpgradeHyperdriveHeight": 10101, + "UpgradeChocolateHeight": 10101, + "UpgradeOhSnapHeight": 10101, + "UpgradeSkyrHeight": 10101, + "UpgradeSharkHeight": 10101 + } +} +``` + +### StateGetRandomnessFromBeacon + + +Perms: read + +Inputs: +```json +[ + 2, + 10101, + "Ynl0ZSBhcnJheQ==", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"Bw=="` + +### StateGetRandomnessFromTickets + + +Perms: read + +Inputs: +```json +[ + 2, + 10101, + "Ynl0ZSBhcnJheQ==", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"Bw=="` + +### StateNetworkName + + +Perms: read + +Inputs: `[]` + +Response: `"mainnet"` + +### StateNetworkVersion + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `17` + +### StateSearchMsg +StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they must check that MsgLookup.Message is equal to the provided 'cid', or set the +`allowReplaced` parameter to false. Without this check, and with `allowReplaced` +set to true, both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 10101, + true +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### StateVerifiedRegistryRootKey + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateVerifierStatus + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateWaitMsg +StateWaitMsg looks back up to limit epochs in the chain for a message. +If not found, it blocks until the message arrives on chain, and gets to the +indicated confidence depth. + +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they must check that MsgLookup.Message is equal to the provided 'cid', or set the +`allowReplaced` parameter to false. Without this check, and with `allowReplaced` +set to true, both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 42, + 10101, + true +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### VerifyEntry + + +Perms: read + +Inputs: +```json +[ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + 10101 +] +``` + +Response: `true` + +## Common + +### NodeStatus + + +Perms: read + +Inputs: +```json +[ + true +] +``` + +Response: +```json +{ + "SyncStatus": { + "Epoch": 42, + "Behind": 42 + }, + "PeerStatus": { + "PeersToPublishMsgs": 123, + "PeersToPublishBlocks": 123 + }, + "ChainStatus": { + "BlocksPerTipsetLast100": 12.3, + "BlocksPerTipsetLastFinality": 12.3 + } +} +``` + +### StartTime +StartTime returns node start time + + +Perms: read + +Inputs: `[]` + +Response: `"0001-01-01T00:00:00Z"` + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + +## Market + +### StateMarketParticipants + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Escrow": "0", + "Locked": "0" + } +} +``` + +## MessagePool + +### GasBatchEstimateMessageGas + + +Perms: read + +Inputs: +```json +[ + [ + { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Spec": { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } + } + ], + 42, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Msg": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Err": "string value" + } +] +``` + +### GasEstimateFeeCap + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateGasLimit + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `9` + +### GasEstimateGasPremium + + +Perms: read + +Inputs: +```json +[ + 42, + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateMessageGas + + +Perms: read + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +### MpoolBatchPush + + +Perms: write + +Inputs: +```json +[ + [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +] +``` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### MpoolBatchPushMessage + + +Perms: sign + +Inputs: +```json +[ + [ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + ], + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } +] +``` + +Response: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +### MpoolBatchPushUntrusted + + +Perms: write + +Inputs: +```json +[ + [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +] +``` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### MpoolCheckMessages +MpoolCheckMessages performs logical checks on a batch of messages + + +Perms: read + +Inputs: +```json +[ + [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true + } + ] +] +``` + +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` + +### MpoolCheckPendingMessages +MpoolCheckPendingMessages performs logical checks for all pending messages from a given address + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` + +### MpoolCheckReplaceMessages +MpoolCheckReplaceMessages performs logical checks on pending messages with replacement + + +Perms: read + +Inputs: +```json +[ + [ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } + ] +] +``` + +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` + +### MpoolClear + + +Perms: write + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### MpoolDeleteByAdress + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### MpoolGetConfig + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "PriorityAddrs": [ + "f01234" + ], + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 +} +``` + +### MpoolGetNonce + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### MpoolPending + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +### MpoolPublishByAddr + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### MpoolPublishMessage + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: `{}` + +### MpoolPush + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MpoolPushMessage + + +Perms: sign + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +### MpoolPushUntrusted + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MpoolSelect + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 12.3 +] +``` + +Response: +```json +[ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +### MpoolSelects + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + 12.3 + ] +] +``` + +Response: +```json +[ + [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +] +``` + +### MpoolSetConfig + + +Perms: admin + +Inputs: +```json +[ + { + "PriorityAddrs": [ + "f01234" + ], + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 + } +] +``` + +Response: `{}` + +### MpoolSub + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Type": 0, + "Message": { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +} +``` + +## MinerState + +### StateAllMinerFaults + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "Miner": "f01234", + "Epoch": 10101 + } +] +``` + +### StateChangedActors + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "t01236": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" + } +} +``` + +### StateCirculatingSupply + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateComputeDataCID +StateComputeDataCID computes DataCID from a set of on-chain deals + + +Perms: read + +Inputs: +```json +[ + "f01234", + 8, + [ + 5432 + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### StateDealProviderCollateralBounds + + +Perms: read + +Inputs: +```json +[ + 1032, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Min": "0", + "Max": "0" +} +``` + +### StateDecodeParams + + +Perms: read + +Inputs: +```json +[ + "f01234", + 1, + "Ynl0ZSBhcnJheQ==", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateEncodeParams + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 1, + "json raw message" +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### StateGetAllocation +StateGetAllocation returns the allocation for a given address and allocation ID. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 0, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Client": 1000, + "Provider": 1000, + "Data": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1032, + "TermMin": 10101, + "TermMax": 10101, + "Expiration": 10101 +} +``` + +### StateGetAllocationForPendingDeal +StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if +pending allocation is not found. + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Client": 1000, + "Provider": 1000, + "Data": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1032, + "TermMin": 10101, + "TermMax": 10101, + "Expiration": 10101 +} +``` + +### StateGetAllocations +StateGetAllocations returns the all the allocations for a given client. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateGetClaim +StateGetClaim returns the claim for a given address and claim ID. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 0, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Provider": 1000, + "Client": 1000, + "Data": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1032, + "TermMin": 10101, + "TermMax": 10101, + "TermStart": 10101, + "Sector": 9 +} +``` + +### StateGetClaims +StateGetClaims returns the all the claims for a given provider. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateListActors + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + "f01234" +] +``` + +### StateListMessages + + +Perms: read + +Inputs: +```json +[ + { + "To": "f01234", + "From": "f01234" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 10101 +] +``` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### StateListMiners + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + "f01234" +] +``` + +### StateLookupID + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateLookupRobustAddress +StateLookupRobustAddress returns the public key address of the given ID address for non-account addresses (multisig, miners etc) + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateMarketBalance + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Escrow": "0", + "Locked": "0" +} +``` + +### StateMarketDeals + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101, + "VerifiedClaim": 0 + } + } +} +``` + +### StateMarketStorageDeal + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101, + "VerifiedClaim": 0 + } +} +``` + +### StateMinerActiveSectors + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "ReplacedSectorAge": 10101, + "ReplacedDayReward": "0", + "SectorKeyCID": null, + "SimpleQAPower": true + } +] +``` + +### StateMinerAllocated +StateMinerAllocated returns a bitfield containing all sector numbers marked as allocated in miner state + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 0 +] +``` + +### StateMinerAvailableBalance + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerDeadlines + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "PostSubmissions": [ + 5, + 1 + ], + "DisputableProofCount": 42 + } +] +``` + +### StateMinerFaults + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerInfo + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Owner": "f01234", + "Worker": "f01234", + "NewWorker": "f01234", + "ControlAddresses": [ + "f01234" + ], + "WorkerChangeEpoch": 10101, + "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Multiaddrs": [ + "Ynl0ZSBhcnJheQ==" + ], + "WindowPoStProofType": 8, + "SectorSize": 34359738368, + "WindowPoStPartitionSectors": 42, + "ConsensusFaultElapsed": 10101, + "Beneficiary": "f01234", + "BeneficiaryTerm": { + "Quota": "0", + "UsedQuota": "0", + "Expiration": 10101 + }, + "PendingBeneficiaryTerm": { + "NewBeneficiary": "f01234", + "NewQuota": "0", + "NewExpiration": 10101, + "ApprovedByBeneficiary": true, + "ApprovedByNominee": true + } +} +``` + +### StateMinerInitialPledgeCollateral + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": [ + 5432 + ], + "Expiration": 10101, + "UnsealedCid": null + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerPartitions + + +Perms: read + +Inputs: +```json +[ + "f01234", + 42, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "AllSectors": [ + 5, + 1 + ], + "FaultySectors": [ + 5, + 1 + ], + "RecoveringSectors": [ + 5, + 1 + ], + "LiveSectors": [ + 5, + 1 + ], + "ActiveSectors": [ + 5, + 1 + ] + } +] +``` + +### StateMinerPower + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + }, + "TotalPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + }, + "HasMinPower": true +} +``` + +### StateMinerPreCommitDepositForPower + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": [ + 5432 + ], + "Expiration": 10101, + "UnsealedCid": null + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerProvingDeadline + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "CurrentEpoch": 10101, + "PeriodStart": 10101, + "Index": 42, + "Open": 10101, + "Close": 10101, + "Challenge": 10101, + "FaultCutoff": 10101, + "WPoStPeriodDeadlines": 42, + "WPoStProvingPeriod": 10101, + "WPoStChallengeWindow": 10101, + "WPoStChallengeLookback": 10101, + "FaultDeclarationCutoff": 10101 +} +``` + +### StateMinerRecoveries + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerSectorAllocated + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `true` + +### StateMinerSectorCount + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Live": 42, + "Active": 42, + "Faulty": 42 +} +``` + +### StateMinerSectorSize + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `34359738368` + +### StateMinerSectors + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + 0 + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "ReplacedSectorAge": 10101, + "ReplacedDayReward": "0", + "SectorKeyCID": null, + "SimpleQAPower": true + } +] +``` + +### StateMinerWorkerAddress + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateReadState + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Balance": "0", + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": {} +} +``` + +### StateSectorExpiration + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "OnTime": 10101, + "Early": 10101 +} +``` + +### StateSectorGetInfo + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "ReplacedSectorAge": 10101, + "ReplacedDayReward": "0", + "SectorKeyCID": null, + "SimpleQAPower": true +} +``` + +### StateSectorPartition + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Deadline": 42, + "Partition": 42 +} +``` + +### StateSectorPreCommitInfo +StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector. +Returns nil and no error if the sector isn't precommitted. + +Note that the sector number may be allocated while PreCommitInfo is nil. This means that either allocated sector +numbers were compacted, and the sector number was marked as allocated in order to reduce size of the allocated +sectors bitfield, or that the sector was precommitted, but the precommit has expired. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Info": { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": [ + 5432 + ], + "Expiration": 10101, + "UnsealedCid": null + }, + "PreCommitDeposit": "0", + "PreCommitEpoch": 10101 +} +``` + +### StateVMCirculatingSupplyInternal + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "FilVested": "0", + "FilMined": "0", + "FilBurnt": "0", + "FilLocked": "0", + "FilCirculating": "0", + "FilReserveDisbursed": "0" +} +``` + +### StateVerifiedClientStatus + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +## Mining + +### MinerCreateBlock + + +Perms: write + +Inputs: +```json +[ + { + "Miner": "f01234", + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Ticket": { + "VRFProof": "Bw==" + }, + "Eproof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconValues": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "Messages": [ + { + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Epoch": 10101, + "Timestamp": 42, + "WinningPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ] + } +] +``` + +Response: +```json +{ + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] +} +``` + +### MinerGetBaseInfo + + +Perms: read + +Inputs: +```json +[ + "f01234", + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": "0", + "NetworkPower": "0", + "Sectors": [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], + "WorkerKey": "f01234", + "SectorSize": 34359738368, + "PrevBeaconEntry": { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "EligibleForMining": true +} +``` + +## MultiSig + +### MsigAddApprove + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigAddCancel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigAddPropose + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigApprove + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigApproveTxnHash + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234", + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigCancel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigCancelTxnHash +MsigCancel cancels a previously-proposed multisig message +It takes the following params: \, \, \, \, +\, \, \ + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigCreate + + +Perms: sign + +Inputs: +```json +[ + 42, + [ + "f01234" + ], + 10101, + "0", + "f01234", + "0" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigGetVested + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### MsigPropose + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigRemoveSigner + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigSwapApprove + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigSwapCancel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +### MsigSwapPropose + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "ValidNonce": true +} +``` + +## Network + +### ID + + +Perms: read + +Inputs: `[]` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +### NetAddrsListen + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetAgentVersion + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `"string value"` + +### NetAutoNatStatus + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Reachability": 1, + "PublicAddr": "string value" +} +``` + +### NetBandwidthStats +NetBandwidthStats returns statistics about the nodes total bandwidth +usage and current rate across all peers and protocols. + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "TotalIn": 9, + "TotalOut": 9, + "RateIn": 12.3, + "RateOut": 12.3 +} +``` + +### NetBandwidthStatsByPeer +NetBandwidthStatsByPeer returns statistics about the nodes bandwidth +usage and current rate per peer + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBandwidthStatsByProtocol +NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth +usage and current rate per protocol + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "/fil/hello/1.0.0": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetConnect + + +Perms: admin + +Inputs: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +Response: `{}` + +### NetConnectedness + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `1` + +### NetDisconnect + + +Perms: admin + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `{}` + +### NetFindPeer + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetFindProvidersAsync + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 123 +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetGetClosestPeers + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +### NetPeerInfo + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Agent": "string value", + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], + "ConnMgrMeta": { + "FirstSeen": "0001-01-01T00:00:00Z", + "Value": 123, + "Tags": { + "name": 42 + }, + "Conns": { + "name": "2021-03-08T22:52:18Z" + } + } +} +``` + +### NetPeers + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +### NetPing + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `60000000000` + +### NetProtectAdd + + +Perms: admin + +Inputs: +```json +[ + [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ] +] +``` + +Response: `{}` + +### NetProtectList + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +### NetProtectRemove + + +Perms: admin + +Inputs: +```json +[ + [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ] +] +``` + +Response: `{}` + +### NetPubsubScores + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` + +## Paychan + +### PaychAllocateLane +PaychAllocateLane Allocate late creates a lane within a payment channel so that calls to +CreatePaymentVoucher will automatically make vouchers only for the difference in total + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### PaychAvailableFunds +PaychAvailableFunds get the status of an outbound payment channel +@pch: payment channel address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "f01234", + "To": "f01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "NonReservedAmt": "0", + "PendingAvailableAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychAvailableFundsByFromTo +PaychAvailableFundsByFromTo get the status of an outbound payment channel +@from: the payment channel sender +@to: he payment channel recipient + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "f01234", + "To": "f01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "NonReservedAmt": "0", + "PendingAvailableAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychCollect +PaychCollect update payment channel status to collect +Collect sends the value of submitted vouchers to the channel recipient (the provider), +and refunds the remaining channel balance to the channel creator (the client). +@pch: payment channel address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychFund +PaychFund gets or creates a payment channel between address pair. +The specified amount will be added to the channel through on-chain send for future use + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "Channel": "f01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + +### PaychGet +PaychGet gets or creates a payment channel between address pair +The specified amount will be reserved for use. If there aren't enough non-reserved funds +available, funds will be added through an on-chain message. +- When opts.OffChain is true, this call will not cause any messages to be sent to the chain (no automatic +channel creation/funds adding). If the operation can't be performed without sending a message an error will be +returned. Note that even when this option is specified, this call can be blocked by previous operations on the +channel waiting for on-chain operations. + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0", + { + "OffChain": true + } +] +``` + +Response: +```json +{ + "Channel": "f01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + +### PaychGetWaitReady +PaychGetWaitReady waits until the create channel / add funds message with the sentinel +@sentinel: given message CID arrives. +@ch: the returned channel address can safely be used against the Manager methods. + + +Perms: sign + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"f01234"` + +### PaychList +PaychList list the addresses of all channels that have been created + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### PaychNewPayment +PaychNewPayment aggregate vouchers into a new lane +@from: the payment channel sender +@to: the payment channel recipient +@vouchers: the outstanding (non-redeemed) vouchers + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + [ + { + "Amount": "0", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "MinSettle": 10101, + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] +] +``` + +Response: +```json +{ + "Channel": "f01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Vouchers": [ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] +} +``` + +### PaychSettle +PaychSettle update payment channel status to settle +After a settlement period (currently 12 hours) either party to the payment channel can call collect on chain +@pch: payment channel address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychStatus +PaychStatus get the payment channel status +@pch: payment channel address + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "ControlAddr": "f01234", + "Direction": 1 +} +``` + +### PaychVoucherAdd +PaychVoucherAdd adds a voucher for an inbound channel. +If the channel is not in the store, fetches the channel from state (and checks that +the channel To address is owned by the wallet). + + +Perms: write + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "0" +] +``` + +Response: `"0"` + +### PaychVoucherCheckSpendable +PaychVoucherCheckSpendable checks if the given voucher is currently spendable +@pch: payment channel address +@sv: voucher + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `true` + +### PaychVoucherCheckValid +PaychVoucherCheckValid checks if the given voucher is valid (is or could become spendable at some point). +If the channel is not in the store, fetches the channel from state (and checks that +the channel To address is owned by the wallet). +@pch: payment channel address +@sv: voucher + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +Response: `{}` + +### PaychVoucherCreate +PaychVoucherCreate creates a new signed voucher on the given payment channel +with the given lane and amount. The value passed in is exactly the value +that will be used to create the voucher, so if previous vouchers exist, the +actual additional value of this voucher will only be the difference between +the two. +If there are insufficient funds in the channel to create the voucher, +returns a nil voucher and the shortfall. + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "0", + 42 +] +``` + +Response: +```json +{ + "Voucher": { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Shortfall": "0" +} +``` + +### PaychVoucherList +PaychVoucherList list vouchers in payment channel +@pch: payment channel address + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +### PaychVoucherSubmit +PaychVoucherSubmit Submit voucher to chain to update payment channel state +@pch: payment channel address +@sv: voucher in payment channel + + +Perms: sign + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## Syncer + +### ChainSyncHandleNewTipSet + + +Perms: write + +Inputs: +```json +[ + { + "Source": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Sender": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Head": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` + +Response: `{}` + +### ChainTipSetWeight + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### Concurrent + + +Perms: read + +Inputs: `[]` + +Response: `9` + +### SetConcurrent + + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SyncState + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "ActiveSyncs": [ + { + "WorkerID": 42, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Target": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Stage": 1, + "Height": 10101, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Message": "string value" + } + ], + "VMApplied": 42 +} +``` + +### SyncSubmitBlock + + +Perms: write + +Inputs: +```json +[ + { + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Bw==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Bw==" + }, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] + } +] +``` + +Response: `{}` + +### SyncerTracker + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "History": [ + { + "State": 1, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Current": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Err": {}, + "Source": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Sender": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Head": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } + ], + "Buckets": [ + { + "State": 1, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Current": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Err": {}, + "Source": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Sender": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Head": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } + ] +} +``` + +## Wallet + +### HasPassword + + +Perms: admin + +Inputs: `[]` + +Response: `true` + +### LockWallet + + +Perms: admin + +Inputs: `[]` + +Response: `{}` + +### SetPassword + + +Perms: admin + +Inputs: +```json +[ + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `{}` + +### UnLockWallet + + +Perms: admin + +Inputs: +```json +[ + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `{}` + +### WalletAddresses + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### WalletBalance + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + +### WalletDefaultAddress + + +Perms: write + +Inputs: `[]` + +Response: `"f01234"` + +### WalletDelete + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### WalletExport + + +Perms: admin + +Inputs: +```json +[ + "f01234", + "string value" +] +``` + +Response: +```json +{ + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletHas + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### WalletImport + + +Perms: admin + +Inputs: +```json +[ + { + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `"f01234"` + +### WalletNewAddress + + +Perms: write + +Inputs: +```json +[ + 7 +] +``` + +Response: `"f01234"` + +### WalletSetDefault + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### WalletSign + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": "message", + "Extra": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletSignMessage + + +Perms: sign + +Inputs: +```json +[ + "f01234", + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Message": { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +### WalletState + + +Perms: admin + +Inputs: `[]` + +Response: `123` + diff --git a/venus-shared/api/chain/v1/mining.go b/venus-shared/api/chain/v1/mining.go new file mode 100644 index 0000000000..16fc35bd8f --- /dev/null +++ b/venus-shared/api/chain/v1/mining.go @@ -0,0 +1,15 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMining interface { + MinerGetBaseInfo(ctx context.Context, maddr address.Address, round abi.ChainEpoch, tsk types.TipSetKey) (*types.MiningBaseInfo, error) //perm:read + MinerCreateBlock(ctx context.Context, bt *types.BlockTemplate) (*types.BlockMsg, error) //perm:write +} diff --git a/venus-shared/api/chain/v1/mock/mock_fullnode.go b/venus-shared/api/chain/v1/mock/mock_fullnode.go new file mode 100644 index 0000000000..a772cd0a02 --- /dev/null +++ b/venus-shared/api/chain/v1/mock/mock_fullnode.go @@ -0,0 +1,2967 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/chain/v1 (interfaces: FullNode) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + json "encoding/json" + reflect "reflect" + time "time" + + address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + miner "github.com/filecoin-project/go-state-types/builtin/v9/miner" + verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + crypto "github.com/filecoin-project/go-state-types/crypto" + dline "github.com/filecoin-project/go-state-types/dline" + network "github.com/filecoin-project/go-state-types/network" + miner0 "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + internal "github.com/filecoin-project/venus/venus-shared/internal" + types "github.com/filecoin-project/venus/venus-shared/types" + gomock "github.com/golang/mock/gomock" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + metrics "github.com/libp2p/go-libp2p/core/metrics" + network0 "github.com/libp2p/go-libp2p/core/network" + peer "github.com/libp2p/go-libp2p/core/peer" + protocol "github.com/libp2p/go-libp2p/core/protocol" +) + +// MockFullNode is a mock of FullNode interface. +type MockFullNode struct { + ctrl *gomock.Controller + recorder *MockFullNodeMockRecorder +} + +// MockFullNodeMockRecorder is the mock recorder for MockFullNode. +type MockFullNodeMockRecorder struct { + mock *MockFullNode +} + +// NewMockFullNode creates a new mock instance. +func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode { + mock := &MockFullNode{ctrl: ctrl} + mock.recorder = &MockFullNodeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { + return m.recorder +} + +// BlockTime mocks base method. +func (m *MockFullNode) BlockTime(arg0 context.Context) time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockTime", arg0) + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// BlockTime indicates an expected call of BlockTime. +func (mr *MockFullNodeMockRecorder) BlockTime(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockTime", reflect.TypeOf((*MockFullNode)(nil).BlockTime), arg0) +} + +// ChainDeleteObj mocks base method. +func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainDeleteObj indicates an expected call of ChainDeleteObj. +func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1) +} + +// ChainExport mocks base method. +func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(<-chan []byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainExport indicates an expected call of ChainExport. +func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) +} + +// ChainGetBlock mocks base method. +func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlock indicates an expected call of ChainGetBlock. +func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1) +} + +// ChainGetBlockMessages mocks base method. +func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*types.BlockMessages, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1) + ret0, _ := ret[0].(*types.BlockMessages) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages. +func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1) +} + +// ChainGetGenesis mocks base method. +func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetGenesis", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetGenesis indicates an expected call of ChainGetGenesis. +func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0) +} + +// ChainGetMessage mocks base method. +func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*internal.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1) + ret0, _ := ret[0].(*internal.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessage indicates an expected call of ChainGetMessage. +func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) +} + +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]types.MessageCID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]types.MessageCID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + +// ChainGetParentMessages mocks base method. +func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]types.MessageCID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1) + ret0, _ := ret[0].([]types.MessageCID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentMessages indicates an expected call of ChainGetParentMessages. +func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1) +} + +// ChainGetParentReceipts mocks base method. +func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1) + ret0, _ := ret[0].([]*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts. +func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1) +} + +// ChainGetPath mocks base method. +func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*types.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetPath indicates an expected call of ChainGetPath. +func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2) +} + +// ChainGetReceipts mocks base method. +func (m *MockFullNode) ChainGetReceipts(arg0 context.Context, arg1 cid.Cid) ([]types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetReceipts", arg0, arg1) + ret0, _ := ret[0].([]types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetReceipts indicates an expected call of ChainGetReceipts. +func (mr *MockFullNodeMockRecorder) ChainGetReceipts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetReceipts), arg0, arg1) +} + +// ChainGetTipSet mocks base method. +func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSet indicates an expected call of ChainGetTipSet. +func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1) +} + +// ChainGetTipSetAfterHeight mocks base method. +func (m *MockFullNode) ChainGetTipSetAfterHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSetAfterHeight", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSetAfterHeight indicates an expected call of ChainGetTipSetAfterHeight. +func (mr *MockFullNodeMockRecorder) ChainGetTipSetAfterHeight(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetAfterHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetAfterHeight), arg0, arg1, arg2) +} + +// ChainGetTipSetByHeight mocks base method. +func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight. +func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2) +} + +// ChainHasObj mocks base method. +func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj. +func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1) +} + +// ChainHead mocks base method. +func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) +} + +// ChainList mocks base method. +func (m *MockFullNode) ChainList(arg0 context.Context, arg1 types.TipSetKey, arg2 int) ([]types.TipSetKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainList", arg0, arg1, arg2) + ret0, _ := ret[0].([]types.TipSetKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainList indicates an expected call of ChainList. +func (mr *MockFullNodeMockRecorder) ChainList(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainList", reflect.TypeOf((*MockFullNode)(nil).ChainList), arg0, arg1, arg2) +} + +// ChainNotify mocks base method. +func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*types.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainNotify", arg0) + ret0, _ := ret[0].(<-chan []*types.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainNotify indicates an expected call of ChainNotify. +func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0) +} + +// ChainPutObj mocks base method. +func (m *MockFullNode) ChainPutObj(arg0 context.Context, arg1 blocks.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainPutObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainPutObj indicates an expected call of ChainPutObj. +func (mr *MockFullNodeMockRecorder) ChainPutObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainPutObj", reflect.TypeOf((*MockFullNode)(nil).ChainPutObj), arg0, arg1) +} + +// ChainReadObj mocks base method. +func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainReadObj indicates an expected call of ChainReadObj. +func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1) +} + +// ChainSetHead mocks base method. +func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainSetHead indicates an expected call of ChainSetHead. +func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1) +} + +// ChainStatObj mocks base method. +func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (types.ObjStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2) + ret0, _ := ret[0].(types.ObjStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainStatObj indicates an expected call of ChainStatObj. +func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2) +} + +// ChainSyncHandleNewTipSet mocks base method. +func (m *MockFullNode) ChainSyncHandleNewTipSet(arg0 context.Context, arg1 *types.ChainInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainSyncHandleNewTipSet", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainSyncHandleNewTipSet indicates an expected call of ChainSyncHandleNewTipSet. +func (mr *MockFullNodeMockRecorder) ChainSyncHandleNewTipSet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSyncHandleNewTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainSyncHandleNewTipSet), arg0, arg1) +} + +// ChainTipSetWeight mocks base method. +func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainTipSetWeight indicates an expected call of ChainTipSetWeight. +func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) +} + +// Concurrent mocks base method. +func (m *MockFullNode) Concurrent(arg0 context.Context) int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Concurrent", arg0) + ret0, _ := ret[0].(int64) + return ret0 +} + +// Concurrent indicates an expected call of Concurrent. +func (mr *MockFullNodeMockRecorder) Concurrent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Concurrent", reflect.TypeOf((*MockFullNode)(nil).Concurrent), arg0) +} + +// GasBatchEstimateMessageGas mocks base method. +func (m *MockFullNode) GasBatchEstimateMessageGas(arg0 context.Context, arg1 []*types.EstimateMessage, arg2 uint64, arg3 types.TipSetKey) ([]*types.EstimateResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasBatchEstimateMessageGas", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*types.EstimateResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasBatchEstimateMessageGas indicates an expected call of GasBatchEstimateMessageGas. +func (mr *MockFullNodeMockRecorder) GasBatchEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasBatchEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasBatchEstimateMessageGas), arg0, arg1, arg2, arg3) +} + +// GasEstimateFeeCap mocks base method. +func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *internal.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap. +func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3) +} + +// GasEstimateGasLimit mocks base method. +func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *internal.Message, arg2 types.TipSetKey) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit. +func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2) +} + +// GasEstimateGasPremium mocks base method. +func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium. +func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4) +} + +// GasEstimateMessageGas mocks base method. +func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *internal.Message, arg2 *types.MessageSendSpec, arg3 types.TipSetKey) (*internal.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*internal.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas. +func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) +} + +// GetActor mocks base method. +func (m *MockFullNode) GetActor(arg0 context.Context, arg1 address.Address) (*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActor", arg0, arg1) + ret0, _ := ret[0].(*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActor indicates an expected call of GetActor. +func (mr *MockFullNodeMockRecorder) GetActor(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActor", reflect.TypeOf((*MockFullNode)(nil).GetActor), arg0, arg1) +} + +// GetEntry mocks base method. +func (m *MockFullNode) GetEntry(arg0 context.Context, arg1 abi.ChainEpoch, arg2 uint64) (*types.BeaconEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEntry", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.BeaconEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEntry indicates an expected call of GetEntry. +func (mr *MockFullNodeMockRecorder) GetEntry(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEntry", reflect.TypeOf((*MockFullNode)(nil).GetEntry), arg0, arg1, arg2) +} + +// GetFullBlock mocks base method. +func (m *MockFullNode) GetFullBlock(arg0 context.Context, arg1 cid.Cid) (*types.FullBlock, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFullBlock", arg0, arg1) + ret0, _ := ret[0].(*types.FullBlock) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFullBlock indicates an expected call of GetFullBlock. +func (mr *MockFullNodeMockRecorder) GetFullBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFullBlock", reflect.TypeOf((*MockFullNode)(nil).GetFullBlock), arg0, arg1) +} + +// GetParentStateRootActor mocks base method. +func (m *MockFullNode) GetParentStateRootActor(arg0 context.Context, arg1 *types.TipSet, arg2 address.Address) (*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetParentStateRootActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetParentStateRootActor indicates an expected call of GetParentStateRootActor. +func (mr *MockFullNodeMockRecorder) GetParentStateRootActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParentStateRootActor", reflect.TypeOf((*MockFullNode)(nil).GetParentStateRootActor), arg0, arg1, arg2) +} + +// HasPassword mocks base method. +func (m *MockFullNode) HasPassword(arg0 context.Context) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasPassword", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// HasPassword indicates an expected call of HasPassword. +func (mr *MockFullNodeMockRecorder) HasPassword(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasPassword", reflect.TypeOf((*MockFullNode)(nil).HasPassword), arg0) +} + +// ID mocks base method. +func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID", arg0) + ret0, _ := ret[0].(peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ID indicates an expected call of ID. +func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0) +} + +// ListActor mocks base method. +func (m *MockFullNode) ListActor(arg0 context.Context) (map[address.Address]*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListActor", arg0) + ret0, _ := ret[0].(map[address.Address]*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListActor indicates an expected call of ListActor. +func (mr *MockFullNodeMockRecorder) ListActor(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListActor", reflect.TypeOf((*MockFullNode)(nil).ListActor), arg0) +} + +// LockWallet mocks base method. +func (m *MockFullNode) LockWallet(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LockWallet", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// LockWallet indicates an expected call of LockWallet. +func (mr *MockFullNodeMockRecorder) LockWallet(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockWallet", reflect.TypeOf((*MockFullNode)(nil).LockWallet), arg0) +} + +// MessageWait mocks base method. +func (m *MockFullNode) MessageWait(arg0 context.Context, arg1 cid.Cid, arg2, arg3 abi.ChainEpoch) (*types.ChainMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessageWait", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.ChainMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessageWait indicates an expected call of MessageWait. +func (mr *MockFullNodeMockRecorder) MessageWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageWait", reflect.TypeOf((*MockFullNode)(nil).MessageWait), arg0, arg1, arg2, arg3) +} + +// MinerCreateBlock mocks base method. +func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *types.BlockTemplate) (*types.BlockMsg, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockMsg) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerCreateBlock indicates an expected call of MinerCreateBlock. +func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1) +} + +// MinerGetBaseInfo mocks base method. +func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*types.MiningBaseInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.MiningBaseInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo. +func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3) +} + +// MpoolBatchPush mocks base method. +func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPush indicates an expected call of MpoolBatchPush. +func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1) +} + +// MpoolBatchPushMessage mocks base method. +func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*internal.Message, arg2 *types.MessageSendSpec) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2) +} + +// MpoolBatchPushUntrusted mocks base method. +func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1) +} + +// MpoolCheckMessages mocks base method. +func (m *MockFullNode) MpoolCheckMessages(arg0 context.Context, arg1 []*types.MessagePrototype) ([][]types.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckMessages", arg0, arg1) + ret0, _ := ret[0].([][]types.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckMessages indicates an expected call of MpoolCheckMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckMessages), arg0, arg1) +} + +// MpoolCheckPendingMessages mocks base method. +func (m *MockFullNode) MpoolCheckPendingMessages(arg0 context.Context, arg1 address.Address) ([][]types.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckPendingMessages", arg0, arg1) + ret0, _ := ret[0].([][]types.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckPendingMessages indicates an expected call of MpoolCheckPendingMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckPendingMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckPendingMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckPendingMessages), arg0, arg1) +} + +// MpoolCheckReplaceMessages mocks base method. +func (m *MockFullNode) MpoolCheckReplaceMessages(arg0 context.Context, arg1 []*internal.Message) ([][]types.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckReplaceMessages", arg0, arg1) + ret0, _ := ret[0].([][]types.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckReplaceMessages indicates an expected call of MpoolCheckReplaceMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckReplaceMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckReplaceMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckReplaceMessages), arg0, arg1) +} + +// MpoolClear mocks base method. +func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolClear indicates an expected call of MpoolClear. +func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1) +} + +// MpoolDeleteByAdress mocks base method. +func (m *MockFullNode) MpoolDeleteByAdress(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolDeleteByAdress", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolDeleteByAdress indicates an expected call of MpoolDeleteByAdress. +func (mr *MockFullNodeMockRecorder) MpoolDeleteByAdress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolDeleteByAdress", reflect.TypeOf((*MockFullNode)(nil).MpoolDeleteByAdress), arg0, arg1) +} + +// MpoolGetConfig mocks base method. +func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetConfig", arg0) + ret0, _ := ret[0].(*types.MpoolConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetConfig indicates an expected call of MpoolGetConfig. +func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0) +} + +// MpoolGetNonce mocks base method. +func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetNonce indicates an expected call of MpoolGetNonce. +func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1) +} + +// MpoolPending mocks base method. +func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPending indicates an expected call of MpoolPending. +func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1) +} + +// MpoolPublishByAddr mocks base method. +func (m *MockFullNode) MpoolPublishByAddr(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPublishByAddr", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolPublishByAddr indicates an expected call of MpoolPublishByAddr. +func (mr *MockFullNodeMockRecorder) MpoolPublishByAddr(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPublishByAddr", reflect.TypeOf((*MockFullNode)(nil).MpoolPublishByAddr), arg0, arg1) +} + +// MpoolPublishMessage mocks base method. +func (m *MockFullNode) MpoolPublishMessage(arg0 context.Context, arg1 *types.SignedMessage) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPublishMessage", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolPublishMessage indicates an expected call of MpoolPublishMessage. +func (mr *MockFullNodeMockRecorder) MpoolPublishMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPublishMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPublishMessage), arg0, arg1) +} + +// MpoolPush mocks base method. +func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPush indicates an expected call of MpoolPush. +func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1) +} + +// MpoolPushMessage mocks base method. +func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *internal.Message, arg2 *types.MessageSendSpec) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushMessage indicates an expected call of MpoolPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2) +} + +// MpoolPushUntrusted mocks base method. +func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1) +} + +// MpoolSelect mocks base method. +func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSelect indicates an expected call of MpoolSelect. +func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2) +} + +// MpoolSelects mocks base method. +func (m *MockFullNode) MpoolSelects(arg0 context.Context, arg1 types.TipSetKey, arg2 []float64) ([][]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSelects", arg0, arg1, arg2) + ret0, _ := ret[0].([][]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSelects indicates an expected call of MpoolSelects. +func (mr *MockFullNodeMockRecorder) MpoolSelects(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelects", reflect.TypeOf((*MockFullNode)(nil).MpoolSelects), arg0, arg1, arg2) +} + +// MpoolSetConfig mocks base method. +func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolSetConfig indicates an expected call of MpoolSetConfig. +func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1) +} + +// MpoolSub mocks base method. +func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan types.MpoolUpdate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSub", arg0) + ret0, _ := ret[0].(<-chan types.MpoolUpdate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSub indicates an expected call of MpoolSub. +func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0) +} + +// MsigAddApprove mocks base method. +func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddApprove indicates an expected call of MsigAddApprove. +func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigAddCancel mocks base method. +func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddCancel indicates an expected call of MsigAddCancel. +func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigAddPropose mocks base method. +func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddPropose indicates an expected call of MsigAddPropose. +func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4) +} + +// MsigApprove mocks base method. +func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApprove indicates an expected call of MsigApprove. +func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3) +} + +// MsigApproveTxnHash mocks base method. +func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash. +func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +} + +// MsigCancel mocks base method. +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancel indicates an expected call of MsigCancel. +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3) +} + +// MsigCancelTxnHash mocks base method. +func (m *MockFullNode) MsigCancelTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancelTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancelTxnHash indicates an expected call of MsigCancelTxnHash. +func (mr *MockFullNodeMockRecorder) MsigCancelTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancelTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigCancelTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +// MsigCreate mocks base method. +func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCreate indicates an expected call of MsigCreate. +func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigGetVested mocks base method. +func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVested indicates an expected call of MsigGetVested. +func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3) +} + +// MsigPropose mocks base method. +func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigPropose indicates an expected call of MsigPropose. +func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigRemoveSigner mocks base method. +func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigRemoveSigner indicates an expected call of MsigRemoveSigner. +func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4) +} + +// MsigSwapApprove mocks base method. +func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapApprove indicates an expected call of MsigSwapApprove. +func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigSwapCancel mocks base method. +func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapCancel indicates an expected call of MsigSwapCancel. +func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigSwapPropose mocks base method. +func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (*types.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapPropose indicates an expected call of MsigSwapPropose. +func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4) +} + +// NetAddrsListen mocks base method. +func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAddrsListen", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAddrsListen indicates an expected call of NetAddrsListen. +func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0) +} + +// NetAgentVersion mocks base method. +func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAgentVersion indicates an expected call of NetAgentVersion. +func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1) +} + +// NetAutoNatStatus mocks base method. +func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (types.NatInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0) + ret0, _ := ret[0].(types.NatInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAutoNatStatus indicates an expected call of NetAutoNatStatus. +func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0) +} + +// NetBandwidthStats mocks base method. +func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStats", arg0) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStats indicates an expected call of NetBandwidthStats. +func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0) +} + +// NetBandwidthStatsByPeer mocks base method. +func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0) + ret0, _ := ret[0].(map[string]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0) +} + +// NetBandwidthStatsByProtocol mocks base method. +func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0) + ret0, _ := ret[0].(map[protocol.ID]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0) +} + +// NetConnect mocks base method. +func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetConnect indicates an expected call of NetConnect. +func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1) +} + +// NetConnectedness mocks base method. +func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1) + ret0, _ := ret[0].(network0.Connectedness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetConnectedness indicates an expected call of NetConnectedness. +func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1) +} + +// NetDisconnect mocks base method. +func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetDisconnect indicates an expected call of NetDisconnect. +func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1) +} + +// NetFindPeer mocks base method. +func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetFindPeer indicates an expected call of NetFindPeer. +func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) +} + +// NetFindProvidersAsync mocks base method. +func (m *MockFullNode) NetFindProvidersAsync(arg0 context.Context, arg1 cid.Cid, arg2 int) <-chan peer.AddrInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindProvidersAsync", arg0, arg1, arg2) + ret0, _ := ret[0].(<-chan peer.AddrInfo) + return ret0 +} + +// NetFindProvidersAsync indicates an expected call of NetFindProvidersAsync. +func (mr *MockFullNodeMockRecorder) NetFindProvidersAsync(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindProvidersAsync", reflect.TypeOf((*MockFullNode)(nil).NetFindProvidersAsync), arg0, arg1, arg2) +} + +// NetGetClosestPeers mocks base method. +func (m *MockFullNode) NetGetClosestPeers(arg0 context.Context, arg1 string) ([]peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetGetClosestPeers", arg0, arg1) + ret0, _ := ret[0].([]peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetGetClosestPeers indicates an expected call of NetGetClosestPeers. +func (mr *MockFullNodeMockRecorder) NetGetClosestPeers(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetGetClosestPeers", reflect.TypeOf((*MockFullNode)(nil).NetGetClosestPeers), arg0, arg1) +} + +// NetPeerInfo mocks base method. +func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*types.ExtendedPeerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1) + ret0, _ := ret[0].(*types.ExtendedPeerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeerInfo indicates an expected call of NetPeerInfo. +func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1) +} + +// NetPeers mocks base method. +func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeers", arg0) + ret0, _ := ret[0].([]peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeers indicates an expected call of NetPeers. +func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) +} + +// NetPing mocks base method. +func (m *MockFullNode) NetPing(arg0 context.Context, arg1 peer.ID) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPing", arg0, arg1) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPing indicates an expected call of NetPing. +func (mr *MockFullNodeMockRecorder) NetPing(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPing", reflect.TypeOf((*MockFullNode)(nil).NetPing), arg0, arg1) +} + +// NetProtectAdd mocks base method. +func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetProtectAdd", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetProtectAdd indicates an expected call of NetProtectAdd. +func (mr *MockFullNodeMockRecorder) NetProtectAdd(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectAdd", reflect.TypeOf((*MockFullNode)(nil).NetProtectAdd), arg0, arg1) +} + +// NetProtectList mocks base method. +func (m *MockFullNode) NetProtectList(arg0 context.Context) ([]peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetProtectList", arg0) + ret0, _ := ret[0].([]peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetProtectList indicates an expected call of NetProtectList. +func (mr *MockFullNodeMockRecorder) NetProtectList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectList", reflect.TypeOf((*MockFullNode)(nil).NetProtectList), arg0) +} + +// NetProtectRemove mocks base method. +func (m *MockFullNode) NetProtectRemove(arg0 context.Context, arg1 []peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetProtectRemove", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetProtectRemove indicates an expected call of NetProtectRemove. +func (mr *MockFullNodeMockRecorder) NetProtectRemove(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectRemove", reflect.TypeOf((*MockFullNode)(nil).NetProtectRemove), arg0, arg1) +} + +// NetPubsubScores mocks base method. +func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]types.PubsubScore, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPubsubScores", arg0) + ret0, _ := ret[0].([]types.PubsubScore) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPubsubScores indicates an expected call of NetPubsubScores. +func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) +} + +// NodeStatus mocks base method. +func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (types.NodeStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeStatus", arg0, arg1) + ret0, _ := ret[0].(types.NodeStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeStatus indicates an expected call of NodeStatus. +func (mr *MockFullNodeMockRecorder) NodeStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStatus", reflect.TypeOf((*MockFullNode)(nil).NodeStatus), arg0, arg1) +} + +// PaychAllocateLane mocks base method. +func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAllocateLane indicates an expected call of PaychAllocateLane. +func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1) +} + +// PaychAvailableFunds mocks base method. +func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*types.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1) + ret0, _ := ret[0].(*types.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFunds indicates an expected call of PaychAvailableFunds. +func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1) +} + +// PaychAvailableFundsByFromTo mocks base method. +func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*types.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo. +func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2) +} + +// PaychCollect mocks base method. +func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychCollect indicates an expected call of PaychCollect. +func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) +} + +// PaychFund mocks base method. +func (m *MockFullNode) PaychFund(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*types.ChannelInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychFund", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.ChannelInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychFund indicates an expected call of PaychFund. +func (mr *MockFullNodeMockRecorder) PaychFund(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychFund", reflect.TypeOf((*MockFullNode)(nil).PaychFund), arg0, arg1, arg2, arg3) +} + +// PaychGet mocks base method. +func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 types.PaychGetOpts) (*types.ChannelInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.ChannelInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGet indicates an expected call of PaychGet. +func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3, arg4) +} + +// PaychGetWaitReady mocks base method. +func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGetWaitReady indicates an expected call of PaychGetWaitReady. +func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1) +} + +// PaychList mocks base method. +func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychList indicates an expected call of PaychList. +func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0) +} + +// PaychNewPayment mocks base method. +func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []types.VoucherSpec) (*types.PaymentInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.PaymentInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychNewPayment indicates an expected call of PaychNewPayment. +func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3) +} + +// PaychSettle mocks base method. +func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychSettle indicates an expected call of PaychSettle. +func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1) +} + +// PaychStatus mocks base method. +func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*types.Status, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1) + ret0, _ := ret[0].(*types.Status) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychStatus indicates an expected call of PaychStatus. +func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1) +} + +// PaychVoucherAdd mocks base method. +func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherAdd indicates an expected call of PaychVoucherAdd. +func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckSpendable mocks base method. +func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckValid mocks base method. +func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2) +} + +// PaychVoucherCreate mocks base method. +func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*types.VoucherCreateResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.VoucherCreateResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCreate indicates an expected call of PaychVoucherCreate. +func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3) +} + +// PaychVoucherList mocks base method. +func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) + ret0, _ := ret[0].([]*paych.SignedVoucher) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherList indicates an expected call of PaychVoucherList. +func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1) +} + +// PaychVoucherSubmit mocks base method. +func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit. +func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) +} + +// ProtocolParameters mocks base method. +func (m *MockFullNode) ProtocolParameters(arg0 context.Context) (*types.ProtocolParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProtocolParameters", arg0) + ret0, _ := ret[0].(*types.ProtocolParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProtocolParameters indicates an expected call of ProtocolParameters. +func (mr *MockFullNodeMockRecorder) ProtocolParameters(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProtocolParameters", reflect.TypeOf((*MockFullNode)(nil).ProtocolParameters), arg0) +} + +// ResolveToKeyAddr mocks base method. +func (m *MockFullNode) ResolveToKeyAddr(arg0 context.Context, arg1 address.Address, arg2 *types.TipSet) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResolveToKeyAddr", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResolveToKeyAddr indicates an expected call of ResolveToKeyAddr. +func (mr *MockFullNodeMockRecorder) ResolveToKeyAddr(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveToKeyAddr", reflect.TypeOf((*MockFullNode)(nil).ResolveToKeyAddr), arg0, arg1, arg2) +} + +// SetConcurrent mocks base method. +func (m *MockFullNode) SetConcurrent(arg0 context.Context, arg1 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetConcurrent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetConcurrent indicates an expected call of SetConcurrent. +func (mr *MockFullNodeMockRecorder) SetConcurrent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetConcurrent", reflect.TypeOf((*MockFullNode)(nil).SetConcurrent), arg0, arg1) +} + +// SetPassword mocks base method. +func (m *MockFullNode) SetPassword(arg0 context.Context, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPassword", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetPassword indicates an expected call of SetPassword. +func (mr *MockFullNodeMockRecorder) SetPassword(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPassword", reflect.TypeOf((*MockFullNode)(nil).SetPassword), arg0, arg1) +} + +// StartTime mocks base method. +func (m *MockFullNode) StartTime(arg0 context.Context) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartTime", arg0) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartTime indicates an expected call of StartTime. +func (mr *MockFullNodeMockRecorder) StartTime(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockFullNode)(nil).StartTime), arg0) +} + +// StateAccountKey mocks base method. +func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAccountKey indicates an expected call of StateAccountKey. +func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2) +} + +// StateActorCodeCIDs mocks base method. +func (m *MockFullNode) StateActorCodeCIDs(arg0 context.Context, arg1 network.Version) (map[string]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateActorCodeCIDs", arg0, arg1) + ret0, _ := ret[0].(map[string]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateActorCodeCIDs indicates an expected call of StateActorCodeCIDs. +func (mr *MockFullNodeMockRecorder) StateActorCodeCIDs(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateActorCodeCIDs", reflect.TypeOf((*MockFullNode)(nil).StateActorCodeCIDs), arg0, arg1) +} + +// StateActorManifestCID mocks base method. +func (m *MockFullNode) StateActorManifestCID(arg0 context.Context, arg1 network.Version) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateActorManifestCID", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateActorManifestCID indicates an expected call of StateActorManifestCID. +func (mr *MockFullNodeMockRecorder) StateActorManifestCID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateActorManifestCID", reflect.TypeOf((*MockFullNode)(nil).StateActorManifestCID), arg0, arg1) +} + +// StateAllMinerFaults mocks base method. +func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*types.Fault, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.Fault) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAllMinerFaults indicates an expected call of StateAllMinerFaults. +func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2) +} + +// StateCall mocks base method. +func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *internal.Message, arg2 types.TipSetKey) (*types.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCall indicates an expected call of StateCall. +func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2) +} + +// StateChangedActors mocks base method. +func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2) + ret0, _ := ret[0].(map[string]internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateChangedActors indicates an expected call of StateChangedActors. +func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2) +} + +// StateCirculatingSupply mocks base method. +func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCirculatingSupply indicates an expected call of StateCirculatingSupply. +func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1) +} + +// StateComputeDataCID mocks base method. +func (m *MockFullNode) StateComputeDataCID(arg0 context.Context, arg1 address.Address, arg2 abi.RegisteredSealProof, arg3 []abi.DealID, arg4 types.TipSetKey) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateComputeDataCID", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateComputeDataCID indicates an expected call of StateComputeDataCID. +func (mr *MockFullNodeMockRecorder) StateComputeDataCID(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateComputeDataCID", reflect.TypeOf((*MockFullNode)(nil).StateComputeDataCID), arg0, arg1, arg2, arg3, arg4) +} + +// StateDealProviderCollateralBounds mocks base method. +func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (types.DealCollateralBounds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(types.DealCollateralBounds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds. +func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3) +} + +// StateDecodeParams mocks base method. +func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDecodeParams indicates an expected call of StateDecodeParams. +func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4) +} + +// StateEncodeParams mocks base method. +func (m *MockFullNode) StateEncodeParams(arg0 context.Context, arg1 cid.Cid, arg2 abi.MethodNum, arg3 json.RawMessage) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateEncodeParams", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateEncodeParams indicates an expected call of StateEncodeParams. +func (mr *MockFullNodeMockRecorder) StateEncodeParams(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateEncodeParams", reflect.TypeOf((*MockFullNode)(nil).StateEncodeParams), arg0, arg1, arg2, arg3) +} + +// StateGetActor mocks base method. +func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*internal.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*internal.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetActor indicates an expected call of StateGetActor. +func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateGetAllocation mocks base method. +func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockFullNodeMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) +} + +// StateGetAllocationForPendingDeal mocks base method. +func (m *MockFullNode) StateGetAllocationForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationForPendingDeal indicates an expected call of StateGetAllocationForPendingDeal. +func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) +} + +// StateGetAllocations mocks base method. +func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocations", arg0, arg1, arg2) + ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocations indicates an expected call of StateGetAllocations. +func (mr *MockFullNodeMockRecorder) StateGetAllocations(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocations), arg0, arg1, arg2) +} + +// StateGetBeaconEntry mocks base method. +func (m *MockFullNode) StateGetBeaconEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetBeaconEntry", arg0, arg1) + ret0, _ := ret[0].(*types.BeaconEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetBeaconEntry indicates an expected call of StateGetBeaconEntry. +func (mr *MockFullNodeMockRecorder) StateGetBeaconEntry(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetBeaconEntry", reflect.TypeOf((*MockFullNode)(nil).StateGetBeaconEntry), arg0, arg1) +} + +// StateGetClaim mocks base method. +func (m *MockFullNode) StateGetClaim(arg0 context.Context, arg1 address.Address, arg2 verifreg.ClaimId, arg3 types.TipSetKey) (*verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetClaim", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetClaim indicates an expected call of StateGetClaim. +func (mr *MockFullNodeMockRecorder) StateGetClaim(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaim", reflect.TypeOf((*MockFullNode)(nil).StateGetClaim), arg0, arg1, arg2, arg3) +} + +// StateGetClaims mocks base method. +func (m *MockFullNode) StateGetClaims(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetClaims", arg0, arg1, arg2) + ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetClaims indicates an expected call of StateGetClaims. +func (mr *MockFullNodeMockRecorder) StateGetClaims(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetClaims), arg0, arg1, arg2) +} + +// StateGetNetworkParams mocks base method. +func (m *MockFullNode) StateGetNetworkParams(arg0 context.Context) (*types.NetworkParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetNetworkParams", arg0) + ret0, _ := ret[0].(*types.NetworkParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetNetworkParams indicates an expected call of StateGetNetworkParams. +func (mr *MockFullNodeMockRecorder) StateGetNetworkParams(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetNetworkParams", reflect.TypeOf((*MockFullNode)(nil).StateGetNetworkParams), arg0) +} + +// StateGetRandomnessFromBeacon mocks base method. +func (m *MockFullNode) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetRandomnessFromBeacon indicates an expected call of StateGetRandomnessFromBeacon. +func (mr *MockFullNodeMockRecorder) StateGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4) +} + +// StateGetRandomnessFromTickets mocks base method. +func (m *MockFullNode) StateGetRandomnessFromTickets(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetRandomnessFromTickets indicates an expected call of StateGetRandomnessFromTickets. +func (mr *MockFullNodeMockRecorder) StateGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4) +} + +// StateListActors mocks base method. +func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListActors", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListActors indicates an expected call of StateListActors. +func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1) +} + +// StateListMessages mocks base method. +func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *types.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMessages indicates an expected call of StateListMessages. +func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3) +} + +// StateListMiners mocks base method. +func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMiners indicates an expected call of StateListMiners. +func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1) +} + +// StateLookupID mocks base method. +func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateLookupID indicates an expected call of StateLookupID. +func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2) +} + +// StateLookupRobustAddress mocks base method. +func (m *MockFullNode) StateLookupRobustAddress(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateLookupRobustAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateLookupRobustAddress indicates an expected call of StateLookupRobustAddress. +func (mr *MockFullNodeMockRecorder) StateLookupRobustAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupRobustAddress", reflect.TypeOf((*MockFullNode)(nil).StateLookupRobustAddress), arg0, arg1, arg2) +} + +// StateMarketBalance mocks base method. +func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (types.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(types.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketBalance indicates an expected call of StateMarketBalance. +func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2) +} + +// StateMarketDeals mocks base method. +func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]*types.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1) + ret0, _ := ret[0].(map[string]*types.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketDeals indicates an expected call of StateMarketDeals. +func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1) +} + +// StateMarketParticipants mocks base method. +func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]types.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1) + ret0, _ := ret[0].(map[string]types.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketParticipants indicates an expected call of StateMarketParticipants. +func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1) +} + +// StateMarketStorageDeal mocks base method. +func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*types.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal. +func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2) +} + +// StateMinerActiveSectors mocks base method. +func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. +func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2) +} + +// StateMinerAllocated mocks base method. +func (m *MockFullNode) StateMinerAllocated(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAllocated", arg0, arg1, arg2) + ret0, _ := ret[0].(*bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAllocated indicates an expected call of StateMinerAllocated. +func (mr *MockFullNodeMockRecorder) StateMinerAllocated(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerAllocated), arg0, arg1, arg2) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerDeadlines mocks base method. +func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]types.Deadline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2) + ret0, _ := ret[0].([]types.Deadline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerDeadlines indicates an expected call of StateMinerDeadlines. +func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2) +} + +// StateMinerFaults mocks base method. +func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerFaults indicates an expected call of StateMinerFaults. +func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (types.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(types.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method. +func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. +func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateMinerPartitions mocks base method. +func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]types.Partition, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]types.Partition) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPartitions indicates an expected call of StateMinerPartitions. +func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3) +} + +// StateMinerPower mocks base method. +func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.MinerPower, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MinerPower) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPower indicates an expected call of StateMinerPower. +func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2) +} + +// StateMinerPreCommitDepositForPower mocks base method. +func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower. +func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3) +} + +// StateMinerProvingDeadline mocks base method. +func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2) + ret0, _ := ret[0].(*dline.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline. +func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2) +} + +// StateMinerRecoveries mocks base method. +func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerRecoveries indicates an expected call of StateMinerRecoveries. +func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2) +} + +// StateMinerSectorAllocated mocks base method. +func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated. +func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3) +} + +// StateMinerSectorCount mocks base method. +func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (types.MinerSectors, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2) + ret0, _ := ret[0].(types.MinerSectors) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorCount indicates an expected call of StateMinerSectorCount. +func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2) +} + +// StateMinerSectorSize mocks base method. +func (m *MockFullNode) StateMinerSectorSize(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (abi.SectorSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorSize", arg0, arg1, arg2) + ret0, _ := ret[0].(abi.SectorSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorSize indicates an expected call of StateMinerSectorSize. +func (mr *MockFullNodeMockRecorder) StateMinerSectorSize(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorSize", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorSize), arg0, arg1, arg2) +} + +// StateMinerSectors mocks base method. +func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectors indicates an expected call of StateMinerSectors. +func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3) +} + +// StateMinerWorkerAddress mocks base method. +func (m *MockFullNode) StateMinerWorkerAddress(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerWorkerAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerWorkerAddress indicates an expected call of StateMinerWorkerAddress. +func (mr *MockFullNodeMockRecorder) StateMinerWorkerAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerWorkerAddress", reflect.TypeOf((*MockFullNode)(nil).StateMinerWorkerAddress), arg0, arg1, arg2) +} + +// StateNetworkName mocks base method. +func (m *MockFullNode) StateNetworkName(arg0 context.Context) (types.NetworkName, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkName", arg0) + ret0, _ := ret[0].(types.NetworkName) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkName indicates an expected call of StateNetworkName. +func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0) +} + +// StateNetworkVersion mocks base method. +func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion. +func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateReadState mocks base method. +func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.ActorState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ActorState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateReadState indicates an expected call of StateReadState. +func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2) +} + +// StateSearchMsg mocks base method. +func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid, arg3 abi.ChainEpoch, arg4 bool) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsg indicates an expected call of StateSearchMsg. +func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1, arg2, arg3, arg4) +} + +// StateSectorExpiration mocks base method. +func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner0.SectorExpiration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorExpiration indicates an expected call of StateSectorExpiration. +func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3) +} + +// StateSectorGetInfo mocks base method. +func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorGetInfo indicates an expected call of StateSectorGetInfo. +func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3) +} + +// StateSectorPartition mocks base method. +func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner0.SectorLocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPartition indicates an expected call of StateSectorPartition. +func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3) +} + +// StateSectorPreCommitInfo mocks base method. +func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. +func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} + +// StateVMCirculatingSupplyInternal mocks base method. +func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (types.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(types.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. +func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + +// StateVerifiedClientStatus mocks base method. +func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus. +func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2) +} + +// StateVerifiedRegistryRootKey mocks base method. +func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey. +func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1) +} + +// StateVerifierStatus mocks base method. +func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifierStatus indicates an expected call of StateVerifierStatus. +func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2) +} + +// StateWaitMsg mocks base method. +func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsg indicates an expected call of StateWaitMsg. +func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4) +} + +// SyncState mocks base method. +func (m *MockFullNode) SyncState(arg0 context.Context) (*types.SyncState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncState", arg0) + ret0, _ := ret[0].(*types.SyncState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncState indicates an expected call of SyncState. +func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0) +} + +// SyncSubmitBlock mocks base method. +func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncSubmitBlock indicates an expected call of SyncSubmitBlock. +func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) +} + +// SyncerTracker mocks base method. +func (m *MockFullNode) SyncerTracker(arg0 context.Context) *types.TargetTracker { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncerTracker", arg0) + ret0, _ := ret[0].(*types.TargetTracker) + return ret0 +} + +// SyncerTracker indicates an expected call of SyncerTracker. +func (mr *MockFullNodeMockRecorder) SyncerTracker(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncerTracker", reflect.TypeOf((*MockFullNode)(nil).SyncerTracker), arg0) +} + +// UnLockWallet mocks base method. +func (m *MockFullNode) UnLockWallet(arg0 context.Context, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnLockWallet", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnLockWallet indicates an expected call of UnLockWallet. +func (mr *MockFullNodeMockRecorder) UnLockWallet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnLockWallet", reflect.TypeOf((*MockFullNode)(nil).UnLockWallet), arg0, arg1) +} + +// VerifyEntry mocks base method. +func (m *MockFullNode) VerifyEntry(arg0, arg1 *types.BeaconEntry, arg2 abi.ChainEpoch) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyEntry", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + return ret0 +} + +// VerifyEntry indicates an expected call of VerifyEntry. +func (mr *MockFullNodeMockRecorder) VerifyEntry(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyEntry", reflect.TypeOf((*MockFullNode)(nil).VerifyEntry), arg0, arg1, arg2) +} + +// Version mocks base method. +func (m *MockFullNode) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0) +} + +// WalletAddresses mocks base method. +func (m *MockFullNode) WalletAddresses(arg0 context.Context) []address.Address { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletAddresses", arg0) + ret0, _ := ret[0].([]address.Address) + return ret0 +} + +// WalletAddresses indicates an expected call of WalletAddresses. +func (mr *MockFullNodeMockRecorder) WalletAddresses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletAddresses", reflect.TypeOf((*MockFullNode)(nil).WalletAddresses), arg0) +} + +// WalletBalance mocks base method. +func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletBalance indicates an expected call of WalletBalance. +func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1) +} + +// WalletDefaultAddress mocks base method. +func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletDefaultAddress indicates an expected call of WalletDefaultAddress. +func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0) +} + +// WalletDelete mocks base method. +func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletDelete indicates an expected call of WalletDelete. +func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1) +} + +// WalletExport mocks base method. +func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address, arg2 string) (*types.KeyInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletExport", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.KeyInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletExport indicates an expected call of WalletExport. +func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1, arg2) +} + +// WalletHas mocks base method. +func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1) +} + +// WalletImport mocks base method. +func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletImport indicates an expected call of WalletImport. +func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1) +} + +// WalletNewAddress mocks base method. +func (m *MockFullNode) WalletNewAddress(arg0 context.Context, arg1 byte) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletNewAddress", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletNewAddress indicates an expected call of WalletNewAddress. +func (mr *MockFullNodeMockRecorder) WalletNewAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNewAddress", reflect.TypeOf((*MockFullNode)(nil).WalletNewAddress), arg0, arg1) +} + +// WalletSetDefault mocks base method. +func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletSetDefault indicates an expected call of WalletSetDefault. +func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1) +} + +// WalletSign mocks base method. +func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 types.MsgMeta) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2, arg3) +} + +// WalletSignMessage mocks base method. +func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *internal.Message) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSignMessage indicates an expected call of WalletSignMessage. +func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2) +} + +// WalletState mocks base method. +func (m *MockFullNode) WalletState(arg0 context.Context) int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletState", arg0) + ret0, _ := ret[0].(int) + return ret0 +} + +// WalletState indicates an expected call of WalletState. +func (mr *MockFullNodeMockRecorder) WalletState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletState", reflect.TypeOf((*MockFullNode)(nil).WalletState), arg0) +} diff --git a/venus-shared/api/chain/v1/mpool.go b/venus-shared/api/chain/v1/mpool.go new file mode 100644 index 0000000000..945d8337e6 --- /dev/null +++ b/venus-shared/api/chain/v1/mpool.go @@ -0,0 +1,42 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMessagePool interface { + MpoolDeleteByAdress(ctx context.Context, addr address.Address) error //perm:admin + MpoolPublishByAddr(context.Context, address.Address) error //perm:write + MpoolPublishMessage(ctx context.Context, smsg *types.SignedMessage) error //perm:write + MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) //perm:write + MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read + MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error //perm:admin + MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read + MpoolSelects(context.Context, types.TipSetKey, []float64) ([][]*types.SignedMessage, error) //perm:read + MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) //perm:read + MpoolClear(ctx context.Context, local bool) error //perm:write + MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) //perm:write + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) //perm:sign + MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) //perm:write + MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) //perm:write + MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *types.MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign + MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) //perm:read + MpoolSub(ctx context.Context) (<-chan types.MpoolUpdate, error) //perm:read + GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) //perm:read + GasBatchEstimateMessageGas(ctx context.Context, estimateMessages []*types.EstimateMessage, fromNonce uint64, tsk types.TipSetKey) ([]*types.EstimateResult, error) //perm:read + GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (big.Int, error) //perm:read + GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (big.Int, error) //perm:read + GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) //perm:read + // MpoolCheckMessages performs logical checks on a batch of messages + MpoolCheckMessages(ctx context.Context, protos []*types.MessagePrototype) ([][]types.MessageCheckStatus, error) //perm:read + // MpoolCheckPendingMessages performs logical checks for all pending messages from a given address + MpoolCheckPendingMessages(ctx context.Context, addr address.Address) ([][]types.MessageCheckStatus, error) //perm:read + // MpoolCheckReplaceMessages performs logical checks on pending messages with replacement + MpoolCheckReplaceMessages(ctx context.Context, msg []*types.Message) ([][]types.MessageCheckStatus, error) //perm:read +} diff --git a/venus-shared/api/chain/v1/multisig.go b/venus-shared/api/chain/v1/multisig.go new file mode 100644 index 0000000000..48d145cbb6 --- /dev/null +++ b/venus-shared/api/chain/v1/multisig.go @@ -0,0 +1,30 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IMultiSig interface { + MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (*types.MessagePrototype, error) //perm:sign + MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) //perm:sign + MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (*types.MessagePrototype, error) //perm:sign + MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (*types.MessagePrototype, error) //perm:sign + MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (*types.MessagePrototype, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message + // It takes the following params: , , , , + // , , + MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*types.MessagePrototype, error) //perm:sign + MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) //perm:sign + MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) //perm:sign + MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) //perm:sign + MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*types.MessagePrototype, error) //perm:sign + MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) //perm:sign + MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*types.MessagePrototype, error) //perm:sign + MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*types.MessagePrototype, error) //perm:sign + MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) //perm:read +} diff --git a/venus-shared/api/chain/v1/network.go b/venus-shared/api/chain/v1/network.go new file mode 100644 index 0000000000..e8428a2612 --- /dev/null +++ b/venus-shared/api/chain/v1/network.go @@ -0,0 +1,47 @@ +package v1 + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/metrics" + network2 "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type INetwork interface { + NetFindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo //perm:read + NetGetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) //perm:read + NetConnectedness(context.Context, peer.ID) (network2.Connectedness, error) //perm:read + NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) //perm:read + NetConnect(ctx context.Context, pi peer.AddrInfo) error //perm:admin + NetPeers(ctx context.Context) ([]peer.AddrInfo, error) //perm:read + NetPeerInfo(ctx context.Context, p peer.ID) (*types.ExtendedPeerInfo, error) //perm:read + NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read + NetPing(ctx context.Context, p peer.ID) (time.Duration, error) //perm:read + NetAddrsListen(ctx context.Context) (peer.AddrInfo, error) //perm:read + NetDisconnect(ctx context.Context, p peer.ID) error //perm:admin + NetAutoNatStatus(context.Context) (types.NatInfo, error) //perm:read + NetPubsubScores(context.Context) ([]types.PubsubScore, error) //perm:read + ID(ctx context.Context) (peer.ID, error) //perm:read + + // NetBandwidthStats returns statistics about the nodes total bandwidth + // usage and current rate across all peers and protocols. + NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read + + // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth + // usage and current rate per peer + NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read + + // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth + // usage and current rate per protocol + NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read + + NetProtectAdd(ctx context.Context, acl []peer.ID) error //perm:admin + NetProtectRemove(ctx context.Context, acl []peer.ID) error //perm:admin + NetProtectList(ctx context.Context) ([]peer.ID, error) //perm:read +} diff --git a/venus-shared/api/chain/v1/paych.go b/venus-shared/api/chain/v1/paych.go new file mode 100644 index 0000000000..b899ec4487 --- /dev/null +++ b/venus-shared/api/chain/v1/paych.go @@ -0,0 +1,88 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" +) + +type IPaychan interface { + // PaychGet gets or creates a payment channel between address pair + // The specified amount will be reserved for use. If there aren't enough non-reserved funds + // available, funds will be added through an on-chain message. + // - When opts.OffChain is true, this call will not cause any messages to be sent to the chain (no automatic + // channel creation/funds adding). If the operation can't be performed without sending a message an error will be + // returned. Note that even when this option is specified, this call can be blocked by previous operations on the + // channel waiting for on-chain operations. + PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt, opts types.PaychGetOpts) (*types.ChannelInfo, error) //perm:sign + // PaychFund gets or creates a payment channel between address pair. + // The specified amount will be added to the channel through on-chain send for future use + PaychFund(ctx context.Context, from, to address.Address, amt types.BigInt) (*types.ChannelInfo, error) //perm:sign + // PaychAvailableFunds get the status of an outbound payment channel + // @pch: payment channel address + PaychAvailableFunds(ctx context.Context, ch address.Address) (*types.ChannelAvailableFunds, error) //perm:sign + // PaychAvailableFundsByFromTo get the status of an outbound payment channel + // @from: the payment channel sender + // @to: he payment channel recipient + PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*types.ChannelAvailableFunds, error) //perm:sign + // PaychGetWaitReady waits until the create channel / add funds message with the sentinel + // @sentinel: given message CID arrives. + // @ch: the returned channel address can safely be used against the Manager methods. + PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) //perm:sign + // PaychAllocateLane Allocate late creates a lane within a payment channel so that calls to + // CreatePaymentVoucher will automatically make vouchers only for the difference in total + PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign + // PaychNewPayment aggregate vouchers into a new lane + // @from: the payment channel sender + // @to: the payment channel recipient + // @vouchers: the outstanding (non-redeemed) vouchers + PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []types.VoucherSpec) (*types.PaymentInfo, error) //perm:sign + // PaychList list the addresses of all channels that have been created + PaychList(ctx context.Context) ([]address.Address, error) //perm:read + // PaychStatus get the payment channel status + // @pch: payment channel address + PaychStatus(ctx context.Context, pch address.Address) (*types.Status, error) //perm:read + // PaychSettle update payment channel status to settle + // After a settlement period (currently 12 hours) either party to the payment channel can call collect on chain + // @pch: payment channel address + PaychSettle(ctx context.Context, addr address.Address) (cid.Cid, error) //perm:sign + // PaychCollect update payment channel status to collect + // Collect sends the value of submitted vouchers to the channel recipient (the provider), + // and refunds the remaining channel balance to the channel creator (the client). + // @pch: payment channel address + PaychCollect(ctx context.Context, addr address.Address) (cid.Cid, error) //perm:sign + + // PaychVoucherCheckValid checks if the given voucher is valid (is or could become spendable at some point). + // If the channel is not in the store, fetches the channel from state (and checks that + // the channel To address is owned by the wallet). + // @pch: payment channel address + // @sv: voucher + PaychVoucherCheckValid(ctx context.Context, ch address.Address, sv *types.SignedVoucher) error //perm:read + // PaychVoucherCheckSpendable checks if the given voucher is currently spendable + // @pch: payment channel address + // @sv: voucher + PaychVoucherCheckSpendable(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (bool, error) //perm:read + // PaychVoucherAdd adds a voucher for an inbound channel. + // If the channel is not in the store, fetches the channel from state (and checks that + // the channel To address is owned by the wallet). + PaychVoucherAdd(ctx context.Context, ch address.Address, sv *types.SignedVoucher, proof []byte, minDelta big.Int) (big.Int, error) //perm:write + // PaychVoucherCreate creates a new signed voucher on the given payment channel + // with the given lane and amount. The value passed in is exactly the value + // that will be used to create the voucher, so if previous vouchers exist, the + // actual additional value of this voucher will only be the difference between + // the two. + // If there are insufficient funds in the channel to create the voucher, + // returns a nil voucher and the shortfall. + PaychVoucherCreate(ctx context.Context, pch address.Address, amt big.Int, lane uint64) (*types.VoucherCreateResult, error) //perm:sign + // PaychVoucherList list vouchers in payment channel + // @pch: payment channel address + PaychVoucherList(ctx context.Context, pch address.Address) ([]*types.SignedVoucher, error) //perm:write + // PaychVoucherSubmit Submit voucher to chain to update payment channel state + // @pch: payment channel address + // @sv: voucher in payment channel + PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) //perm:sign +} diff --git a/venus-shared/api/chain/v1/proxy_gen.go b/venus-shared/api/chain/v1/proxy_gen.go new file mode 100644 index 0000000000..fb82828c51 --- /dev/null +++ b/venus-shared/api/chain/v1/proxy_gen.go @@ -0,0 +1,904 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package v1 + +import ( + "context" + "encoding/json" + "time" + + address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/metrics" + network2 "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + lminer "github.com/filecoin-project/venus/venus-shared/actors/builtin/miner" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IBlockStoreStruct struct { + Internal struct { + ChainDeleteObj func(ctx context.Context, obj cid.Cid) error `perm:"admin"` + ChainHasObj func(ctx context.Context, obj cid.Cid) (bool, error) `perm:"read"` + ChainPutObj func(context.Context, blocks.Block) error `perm:"admin"` + ChainReadObj func(ctx context.Context, cid cid.Cid) ([]byte, error) `perm:"read"` + ChainStatObj func(ctx context.Context, obj cid.Cid, base cid.Cid) (types.ObjStat, error) `perm:"read"` + } +} + +func (s *IBlockStoreStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + return s.Internal.ChainDeleteObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ChainHasObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainPutObj(p0 context.Context, p1 blocks.Block) error { + return s.Internal.ChainPutObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return s.Internal.ChainReadObj(p0, p1) +} +func (s *IBlockStoreStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (types.ObjStat, error) { + return s.Internal.ChainStatObj(p0, p1, p2) +} + +type IAccountStruct struct { + Internal struct { + StateAccountKey func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + } +} + +func (s *IAccountStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateAccountKey(p0, p1, p2) +} + +type IActorStruct struct { + Internal struct { + ListActor func(ctx context.Context) (map[address.Address]*types.Actor, error) `perm:"read"` + StateGetActor func(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) `perm:"read"` + } +} + +func (s *IActorStruct) ListActor(p0 context.Context) (map[address.Address]*types.Actor, error) { + return s.Internal.ListActor(p0) +} +func (s *IActorStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return s.Internal.StateGetActor(p0, p1, p2) +} + +type IMinerStateStruct struct { + Internal struct { + StateAllMinerFaults func(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*types.Fault, error) `perm:"read"` + StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"` + StateCirculatingSupply func(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) `perm:"read"` + StateComputeDataCID func(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) `perm:"read"` + StateDealProviderCollateralBounds func(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (types.DealCollateralBounds, error) `perm:"read"` + StateDecodeParams func(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) `perm:"read"` + StateEncodeParams func(ctx context.Context, toActCode cid.Cid, method abi.MethodNum, params json.RawMessage) ([]byte, error) `perm:"read"` + StateGetAllocation func(ctx context.Context, clientAddr address.Address, allocationID types.AllocationId, tsk types.TipSetKey) (*types.Allocation, error) `perm:"read"` + StateGetAllocationForPendingDeal func(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.Allocation, error) `perm:"read"` + StateGetAllocations func(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[types.AllocationId]types.Allocation, error) `perm:"read"` + StateGetClaim func(ctx context.Context, providerAddr address.Address, claimID types.ClaimId, tsk types.TipSetKey) (*types.Claim, error) `perm:"read"` + StateGetClaims func(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[types.ClaimId]types.Claim, error) `perm:"read"` + StateListActors func(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) `perm:"read"` + StateListMessages func(ctx context.Context, match *types.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` + StateListMiners func(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) `perm:"read"` + StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + StateLookupRobustAddress func(context.Context, address.Address, types.TipSetKey) (address.Address, error) `perm:"read"` + StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MarketBalance, error) `perm:"read"` + StateMarketDeals func(ctx context.Context, tsk types.TipSetKey) (map[string]*types.MarketDeal, error) `perm:"read"` + StateMarketStorageDeal func(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*types.MarketDeal, error) `perm:"read"` + StateMinerActiveSectors func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) `perm:"read"` + StateMinerAllocated func(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) `perm:"read"` + StateMinerAvailableBalance func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + StateMinerDeadlines func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]types.Deadline, error) `perm:"read"` + StateMinerFaults func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + StateMinerInfo func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.MinerInfo, error) `perm:"read"` + StateMinerInitialPledgeCollateral func(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + StateMinerPartitions func(ctx context.Context, maddr address.Address, dlIdx uint64, tsk types.TipSetKey) ([]types.Partition, error) `perm:"read"` + StateMinerPower func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.MinerPower, error) `perm:"read"` + StateMinerPreCommitDepositForPower func(ctx context.Context, maddr address.Address, pci types.SectorPreCommitInfo, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + StateMinerProvingDeadline func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (*dline.Info, error) `perm:"read"` + StateMinerRecoveries func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + StateMinerSectorAllocated func(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) `perm:"read"` + StateMinerSectorCount func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.MinerSectors, error) `perm:"read"` + StateMinerSectorSize func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (abi.SectorSize, error) `perm:"read"` + StateMinerSectors func(ctx context.Context, maddr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*types.SectorOnChainInfo, error) `perm:"read"` + StateMinerWorkerAddress func(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + StateReadState func(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.ActorState, error) `perm:"read"` + StateSectorExpiration func(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"` + StateSectorGetInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorOnChainInfo, error) `perm:"read"` + StateSectorPartition func(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"` + StateSectorPreCommitInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*types.SectorPreCommitOnChainInfo, error) `perm:"read"` + StateVMCirculatingSupplyInternal func(ctx context.Context, tsk types.TipSetKey) (types.CirculatingSupply, error) `perm:"read"` + StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + } +} + +func (s *IMinerStateStruct) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*types.Fault, error) { + return s.Internal.StateAllMinerFaults(p0, p1, p2) +} +func (s *IMinerStateStruct) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { + return s.Internal.StateChangedActors(p0, p1, p2) +} +func (s *IMinerStateStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + return s.Internal.StateCirculatingSupply(p0, p1) +} +func (s *IMinerStateStruct) StateComputeDataCID(p0 context.Context, p1 address.Address, p2 abi.RegisteredSealProof, p3 []abi.DealID, p4 types.TipSetKey) (cid.Cid, error) { + return s.Internal.StateComputeDataCID(p0, p1, p2, p3, p4) +} +func (s *IMinerStateStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (types.DealCollateralBounds, error) { + return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { + return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4) +} +func (s *IMinerStateStruct) StateEncodeParams(p0 context.Context, p1 cid.Cid, p2 abi.MethodNum, p3 json.RawMessage) ([]byte, error) { + return s.Internal.StateEncodeParams(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 types.AllocationId, p3 types.TipSetKey) (*types.Allocation, error) { + return s.Internal.StateGetAllocation(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*types.Allocation, error) { + return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2) +} +func (s *IMinerStateStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[types.AllocationId]types.Allocation, error) { + return s.Internal.StateGetAllocations(p0, p1, p2) +} +func (s *IMinerStateStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 types.ClaimId, p3 types.TipSetKey) (*types.Claim, error) { + return s.Internal.StateGetClaim(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[types.ClaimId]types.Claim, error) { + return s.Internal.StateGetClaims(p0, p1, p2) +} +func (s *IMinerStateStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListActors(p0, p1) +} +func (s *IMinerStateStruct) StateListMessages(p0 context.Context, p1 *types.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { + return s.Internal.StateListMessages(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListMiners(p0, p1) +} +func (s *IMinerStateStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateLookupID(p0, p1, p2) +} +func (s *IMinerStateStruct) StateLookupRobustAddress(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateLookupRobustAddress(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.MarketBalance, error) { + return s.Internal.StateMarketBalance(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]*types.MarketDeal, error) { + return s.Internal.StateMarketDeals(p0, p1) +} +func (s *IMinerStateStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*types.MarketDeal, error) { + return s.Internal.StateMarketStorageDeal(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*types.SectorOnChainInfo, error) { + return s.Internal.StateMinerActiveSectors(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerAllocated(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*bitfield.BitField, error) { + return s.Internal.StateMinerAllocated(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (big.Int, error) { + return s.Internal.StateMinerAvailableBalance(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]types.Deadline, error) { + return s.Internal.StateMinerDeadlines(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerFaults(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.MinerInfo, error) { + return s.Internal.StateMinerInfo(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 types.SectorPreCommitInfo, p3 types.TipSetKey) (big.Int, error) { + return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]types.Partition, error) { + return s.Internal.StateMinerPartitions(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.MinerPower, error) { + return s.Internal.StateMinerPower(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 types.SectorPreCommitInfo, p3 types.TipSetKey) (big.Int, error) { + return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return s.Internal.StateMinerProvingDeadline(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerRecoveries(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.MinerSectors, error) { + return s.Internal.StateMinerSectorCount(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerSectorSize(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (abi.SectorSize, error) { + return s.Internal.StateMinerSectorSize(p0, p1, p2) +} +func (s *IMinerStateStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*types.SectorOnChainInfo, error) { + return s.Internal.StateMinerSectors(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateMinerWorkerAddress(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateMinerWorkerAddress(p0, p1, p2) +} +func (s *IMinerStateStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.ActorState, error) { + return s.Internal.StateReadState(p0, p1, p2) +} +func (s *IMinerStateStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { + return s.Internal.StateSectorExpiration(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*types.SectorOnChainInfo, error) { + return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { + return s.Internal.StateSectorPartition(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*types.SectorPreCommitOnChainInfo, error) { + return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3) +} +func (s *IMinerStateStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (types.CirculatingSupply, error) { + return s.Internal.StateVMCirculatingSupplyInternal(p0, p1) +} +func (s *IMinerStateStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifiedClientStatus(p0, p1, p2) +} + +type IChainInfoStruct struct { + Internal struct { + BlockTime func(ctx context.Context) time.Duration `perm:"read"` + ChainExport func(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) `perm:"read"` + ChainGetBlock func(ctx context.Context, id cid.Cid) (*types.BlockHeader, error) `perm:"read"` + ChainGetBlockMessages func(ctx context.Context, bid cid.Cid) (*types.BlockMessages, error) `perm:"read"` + ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"` + ChainGetMessage func(ctx context.Context, msgID cid.Cid) (*types.Message, error) `perm:"read"` + ChainGetMessagesInTipset func(ctx context.Context, key types.TipSetKey) ([]types.MessageCID, error) `perm:"read"` + ChainGetParentMessages func(ctx context.Context, bcid cid.Cid) ([]types.MessageCID, error) `perm:"read"` + ChainGetParentReceipts func(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` + ChainGetPath func(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*types.HeadChange, error) `perm:"read"` + ChainGetReceipts func(ctx context.Context, id cid.Cid) ([]types.MessageReceipt, error) `perm:"read"` + ChainGetTipSet func(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) `perm:"read"` + ChainGetTipSetAfterHeight func(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) `perm:"read"` + ChainGetTipSetByHeight func(ctx context.Context, height abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) `perm:"read"` + ChainHead func(ctx context.Context) (*types.TipSet, error) `perm:"read"` + ChainList func(ctx context.Context, tsKey types.TipSetKey, count int) ([]types.TipSetKey, error) `perm:"read"` + ChainNotify func(ctx context.Context) (<-chan []*types.HeadChange, error) `perm:"read"` + ChainSetHead func(ctx context.Context, key types.TipSetKey) error `perm:"admin"` + GetActor func(ctx context.Context, addr address.Address) (*types.Actor, error) `perm:"read"` + GetEntry func(ctx context.Context, height abi.ChainEpoch, round uint64) (*types.BeaconEntry, error) `perm:"read"` + GetFullBlock func(ctx context.Context, id cid.Cid) (*types.FullBlock, error) `perm:"read"` + GetParentStateRootActor func(ctx context.Context, ts *types.TipSet, addr address.Address) (*types.Actor, error) `perm:"read"` + MessageWait func(ctx context.Context, msgCid cid.Cid, confidence, lookback abi.ChainEpoch) (*types.ChainMessage, error) `perm:"read"` + ProtocolParameters func(ctx context.Context) (*types.ProtocolParams, error) `perm:"read"` + ResolveToKeyAddr func(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) `perm:"read"` + StateActorCodeCIDs func(context.Context, network.Version) (map[string]cid.Cid, error) `perm:"read"` + StateActorManifestCID func(context.Context, network.Version) (cid.Cid, error) `perm:"read"` + StateCall func(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*types.InvocResult, error) `perm:"read"` + StateGetBeaconEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` + StateGetNetworkParams func(ctx context.Context) (*types.NetworkParams, error) `perm:"read"` + StateGetRandomnessFromBeacon func(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) `perm:"read"` + StateGetRandomnessFromTickets func(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) `perm:"read"` + StateNetworkName func(ctx context.Context) (types.NetworkName, error) `perm:"read"` + StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (network.Version, error) `perm:"read"` + StateSearchMsg func(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) `perm:"read"` + StateVerifiedRegistryRootKey func(ctx context.Context, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + StateVerifierStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*types.MsgLookup, error) `perm:"read"` + VerifyEntry func(parent, child *types.BeaconEntry, height abi.ChainEpoch) bool `perm:"read"` + } +} + +func (s *IChainInfoStruct) BlockTime(p0 context.Context) time.Duration { + return s.Internal.BlockTime(p0) +} +func (s *IChainInfoStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + return s.Internal.ChainExport(p0, p1, p2, p3) +} +func (s *IChainInfoStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return s.Internal.ChainGetBlock(p0, p1) +} +func (s *IChainInfoStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*types.BlockMessages, error) { + return s.Internal.ChainGetBlockMessages(p0, p1) +} +func (s *IChainInfoStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainGetGenesis(p0) +} +func (s *IChainInfoStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.ChainGetMessage(p0, p1) +} +func (s *IChainInfoStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]types.MessageCID, error) { + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} +func (s *IChainInfoStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]types.MessageCID, error) { + return s.Internal.ChainGetParentMessages(p0, p1) +} +func (s *IChainInfoStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return s.Internal.ChainGetParentReceipts(p0, p1) +} +func (s *IChainInfoStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*types.HeadChange, error) { + return s.Internal.ChainGetPath(p0, p1, p2) +} +func (s *IChainInfoStruct) ChainGetReceipts(p0 context.Context, p1 cid.Cid) ([]types.MessageReceipt, error) { + return s.Internal.ChainGetReceipts(p0, p1) +} +func (s *IChainInfoStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSet(p0, p1) +} +func (s *IChainInfoStruct) ChainGetTipSetAfterHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSetAfterHeight(p0, p1, p2) +} +func (s *IChainInfoStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) +} +func (s *IChainInfoStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainHead(p0) +} +func (s *IChainInfoStruct) ChainList(p0 context.Context, p1 types.TipSetKey, p2 int) ([]types.TipSetKey, error) { + return s.Internal.ChainList(p0, p1, p2) +} +func (s *IChainInfoStruct) ChainNotify(p0 context.Context) (<-chan []*types.HeadChange, error) { + return s.Internal.ChainNotify(p0) +} +func (s *IChainInfoStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + return s.Internal.ChainSetHead(p0, p1) +} +func (s *IChainInfoStruct) GetActor(p0 context.Context, p1 address.Address) (*types.Actor, error) { + return s.Internal.GetActor(p0, p1) +} +func (s *IChainInfoStruct) GetEntry(p0 context.Context, p1 abi.ChainEpoch, p2 uint64) (*types.BeaconEntry, error) { + return s.Internal.GetEntry(p0, p1, p2) +} +func (s *IChainInfoStruct) GetFullBlock(p0 context.Context, p1 cid.Cid) (*types.FullBlock, error) { + return s.Internal.GetFullBlock(p0, p1) +} +func (s *IChainInfoStruct) GetParentStateRootActor(p0 context.Context, p1 *types.TipSet, p2 address.Address) (*types.Actor, error) { + return s.Internal.GetParentStateRootActor(p0, p1, p2) +} +func (s *IChainInfoStruct) MessageWait(p0 context.Context, p1 cid.Cid, p2, p3 abi.ChainEpoch) (*types.ChainMessage, error) { + return s.Internal.MessageWait(p0, p1, p2, p3) +} +func (s *IChainInfoStruct) ProtocolParameters(p0 context.Context) (*types.ProtocolParams, error) { + return s.Internal.ProtocolParameters(p0) +} +func (s *IChainInfoStruct) ResolveToKeyAddr(p0 context.Context, p1 address.Address, p2 *types.TipSet) (address.Address, error) { + return s.Internal.ResolveToKeyAddr(p0, p1, p2) +} +func (s *IChainInfoStruct) StateActorCodeCIDs(p0 context.Context, p1 network.Version) (map[string]cid.Cid, error) { + return s.Internal.StateActorCodeCIDs(p0, p1) +} +func (s *IChainInfoStruct) StateActorManifestCID(p0 context.Context, p1 network.Version) (cid.Cid, error) { + return s.Internal.StateActorManifestCID(p0, p1) +} +func (s *IChainInfoStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*types.InvocResult, error) { + return s.Internal.StateCall(p0, p1, p2) +} +func (s *IChainInfoStruct) StateGetBeaconEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + return s.Internal.StateGetBeaconEntry(p0, p1) +} +func (s *IChainInfoStruct) StateGetNetworkParams(p0 context.Context) (*types.NetworkParams, error) { + return s.Internal.StateGetNetworkParams(p0) +} +func (s *IChainInfoStruct) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) { + return s.Internal.StateGetRandomnessFromBeacon(p0, p1, p2, p3, p4) +} +func (s *IChainInfoStruct) StateGetRandomnessFromTickets(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) { + return s.Internal.StateGetRandomnessFromTickets(p0, p1, p2, p3, p4) +} +func (s *IChainInfoStruct) StateNetworkName(p0 context.Context) (types.NetworkName, error) { + return s.Internal.StateNetworkName(p0) +} +func (s *IChainInfoStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { + return s.Internal.StateNetworkVersion(p0, p1) +} +func (s *IChainInfoStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*types.MsgLookup, error) { + return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4) +} +func (s *IChainInfoStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + return s.Internal.StateVerifiedRegistryRootKey(p0, p1) +} +func (s *IChainInfoStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifierStatus(p0, p1, p2) +} +func (s *IChainInfoStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*types.MsgLookup, error) { + return s.Internal.StateWaitMsg(p0, p1, p2, p3, p4) +} +func (s *IChainInfoStruct) VerifyEntry(p0, p1 *types.BeaconEntry, p2 abi.ChainEpoch) bool { + return s.Internal.VerifyEntry(p0, p1, p2) +} + +type IChainStruct struct { + IAccountStruct + IActorStruct + IMinerStateStruct + IChainInfoStruct +} + +type IMarketStruct struct { + Internal struct { + StateMarketParticipants func(ctx context.Context, tsk types.TipSetKey) (map[string]types.MarketBalance, error) `perm:"read"` + } +} + +func (s *IMarketStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]types.MarketBalance, error) { + return s.Internal.StateMarketParticipants(p0, p1) +} + +type IMiningStruct struct { + Internal struct { + MinerCreateBlock func(ctx context.Context, bt *types.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` + MinerGetBaseInfo func(ctx context.Context, maddr address.Address, round abi.ChainEpoch, tsk types.TipSetKey) (*types.MiningBaseInfo, error) `perm:"read"` + } +} + +func (s *IMiningStruct) MinerCreateBlock(p0 context.Context, p1 *types.BlockTemplate) (*types.BlockMsg, error) { + return s.Internal.MinerCreateBlock(p0, p1) +} +func (s *IMiningStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*types.MiningBaseInfo, error) { + return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) +} + +type IMessagePoolStruct struct { + Internal struct { + GasBatchEstimateMessageGas func(ctx context.Context, estimateMessages []*types.EstimateMessage, fromNonce uint64, tsk types.TipSetKey) ([]*types.EstimateResult, error) `perm:"read"` + GasEstimateFeeCap func(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + GasEstimateGasLimit func(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) `perm:"read"` + GasEstimateGasPremium func(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + GasEstimateMessageGas func(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) `perm:"read"` + MpoolBatchPush func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + MpoolBatchPushMessage func(ctx context.Context, msgs []*types.Message, spec *types.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"` + MpoolBatchPushUntrusted func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + MpoolCheckMessages func(ctx context.Context, protos []*types.MessagePrototype) ([][]types.MessageCheckStatus, error) `perm:"read"` + MpoolCheckPendingMessages func(ctx context.Context, addr address.Address) ([][]types.MessageCheckStatus, error) `perm:"read"` + MpoolCheckReplaceMessages func(ctx context.Context, msg []*types.Message) ([][]types.MessageCheckStatus, error) `perm:"read"` + MpoolClear func(ctx context.Context, local bool) error `perm:"write"` + MpoolDeleteByAdress func(ctx context.Context, addr address.Address) error `perm:"admin"` + MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"` + MpoolGetNonce func(ctx context.Context, addr address.Address) (uint64, error) `perm:"read"` + MpoolPending func(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` + MpoolPublishByAddr func(context.Context, address.Address) error `perm:"write"` + MpoolPublishMessage func(ctx context.Context, smsg *types.SignedMessage) error `perm:"write"` + MpoolPush func(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) `perm:"write"` + MpoolPushMessage func(ctx context.Context, msg *types.Message, spec *types.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"` + MpoolPushUntrusted func(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) `perm:"write"` + MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"` + MpoolSelects func(context.Context, types.TipSetKey, []float64) ([][]*types.SignedMessage, error) `perm:"read"` + MpoolSetConfig func(ctx context.Context, cfg *types.MpoolConfig) error `perm:"admin"` + MpoolSub func(ctx context.Context) (<-chan types.MpoolUpdate, error) `perm:"read"` + } +} + +func (s *IMessagePoolStruct) GasBatchEstimateMessageGas(p0 context.Context, p1 []*types.EstimateMessage, p2 uint64, p3 types.TipSetKey) ([]*types.EstimateResult, error) { + return s.Internal.GasBatchEstimateMessageGas(p0, p1, p2, p3) +} +func (s *IMessagePoolStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (big.Int, error) { + return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3) +} +func (s *IMessagePoolStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + return s.Internal.GasEstimateGasLimit(p0, p1, p2) +} +func (s *IMessagePoolStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (big.Int, error) { + return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4) +} +func (s *IMessagePoolStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *types.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) +} +func (s *IMessagePoolStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPush(p0, p1) +} +func (s *IMessagePoolStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *types.MessageSendSpec) ([]*types.SignedMessage, error) { + return s.Internal.MpoolBatchPushMessage(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPushUntrusted(p0, p1) +} +func (s *IMessagePoolStruct) MpoolCheckMessages(p0 context.Context, p1 []*types.MessagePrototype) ([][]types.MessageCheckStatus, error) { + return s.Internal.MpoolCheckMessages(p0, p1) +} +func (s *IMessagePoolStruct) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]types.MessageCheckStatus, error) { + return s.Internal.MpoolCheckPendingMessages(p0, p1) +} +func (s *IMessagePoolStruct) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]types.MessageCheckStatus, error) { + return s.Internal.MpoolCheckReplaceMessages(p0, p1) +} +func (s *IMessagePoolStruct) MpoolClear(p0 context.Context, p1 bool) error { + return s.Internal.MpoolClear(p0, p1) +} +func (s *IMessagePoolStruct) MpoolDeleteByAdress(p0 context.Context, p1 address.Address) error { + return s.Internal.MpoolDeleteByAdress(p0, p1) +} +func (s *IMessagePoolStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + return s.Internal.MpoolGetConfig(p0) +} +func (s *IMessagePoolStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.MpoolGetNonce(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return s.Internal.MpoolPending(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPublishByAddr(p0 context.Context, p1 address.Address) error { + return s.Internal.MpoolPublishByAddr(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPublishMessage(p0 context.Context, p1 *types.SignedMessage) error { + return s.Internal.MpoolPublishMessage(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPush(p0, p1) +} +func (s *IMessagePoolStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *types.MessageSendSpec) (*types.SignedMessage, error) { + return s.Internal.MpoolPushMessage(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPushUntrusted(p0, p1) +} +func (s *IMessagePoolStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + return s.Internal.MpoolSelect(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolSelects(p0 context.Context, p1 types.TipSetKey, p2 []float64) ([][]*types.SignedMessage, error) { + return s.Internal.MpoolSelects(p0, p1, p2) +} +func (s *IMessagePoolStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + return s.Internal.MpoolSetConfig(p0, p1) +} +func (s *IMessagePoolStruct) MpoolSub(p0 context.Context) (<-chan types.MpoolUpdate, error) { + return s.Internal.MpoolSub(p0) +} + +type IMultiSigStruct struct { + Internal struct { + MsigAddApprove func(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (*types.MessagePrototype, error) `perm:"sign"` + MsigAddCancel func(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (*types.MessagePrototype, error) `perm:"sign"` + MsigAddPropose func(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (*types.MessagePrototype, error) `perm:"sign"` + MsigApprove func(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*types.MessagePrototype, error) `perm:"sign"` + MsigApproveTxnHash func(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) `perm:"sign"` + MsigCancel func(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*types.MessagePrototype, error) `perm:"sign"` + MsigCancelTxnHash func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*types.MessagePrototype, error) `perm:"sign"` + MsigCreate func(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (*types.MessagePrototype, error) `perm:"sign"` + MsigGetVested func(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) `perm:"read"` + MsigPropose func(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*types.MessagePrototype, error) `perm:"sign"` + MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*types.MessagePrototype, error) `perm:"sign"` + MsigSwapApprove func(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) `perm:"sign"` + MsigSwapCancel func(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) `perm:"sign"` + MsigSwapPropose func(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*types.MessagePrototype, error) `perm:"sign"` + } +} + +func (s *IMultiSigStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*types.MessagePrototype, error) { + return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*types.MessagePrototype, error) { + return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) +} +func (s *IMultiSigStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*types.MessagePrototype, error) { + return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) +} +func (s *IMultiSigStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*types.MessagePrototype, error) { + return s.Internal.MsigApprove(p0, p1, p2, p3) +} +func (s *IMultiSigStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*types.MessagePrototype, error) { + return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) +} +func (s *IMultiSigStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*types.MessagePrototype, error) { + return s.Internal.MsigCancel(p0, p1, p2, p3) +} +func (s *IMultiSigStruct) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*types.MessagePrototype, error) { + return s.Internal.MsigCancelTxnHash(p0, p1, p2, p3, p4, p5, p6, p7) +} +func (s *IMultiSigStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*types.MessagePrototype, error) { + return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetVested(p0, p1, p2, p3) +} +func (s *IMultiSigStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*types.MessagePrototype, error) { + return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*types.MessagePrototype, error) { + return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) +} +func (s *IMultiSigStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*types.MessagePrototype, error) { + return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMultiSigStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*types.MessagePrototype, error) { + return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) +} +func (s *IMultiSigStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*types.MessagePrototype, error) { + return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) +} + +type INetworkStruct struct { + Internal struct { + ID func(ctx context.Context) (peer.ID, error) `perm:"read"` + NetAddrsListen func(ctx context.Context) (peer.AddrInfo, error) `perm:"read"` + NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"` + NetAutoNatStatus func(context.Context) (types.NatInfo, error) `perm:"read"` + NetBandwidthStats func(ctx context.Context) (metrics.Stats, error) `perm:"read"` + NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"` + NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` + NetConnect func(ctx context.Context, pi peer.AddrInfo) error `perm:"admin"` + NetConnectedness func(context.Context, peer.ID) (network2.Connectedness, error) `perm:"read"` + NetDisconnect func(ctx context.Context, p peer.ID) error `perm:"admin"` + NetFindPeer func(ctx context.Context, p peer.ID) (peer.AddrInfo, error) `perm:"read"` + NetFindProvidersAsync func(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo `perm:"read"` + NetGetClosestPeers func(ctx context.Context, key string) ([]peer.ID, error) `perm:"read"` + NetPeerInfo func(ctx context.Context, p peer.ID) (*types.ExtendedPeerInfo, error) `perm:"read"` + NetPeers func(ctx context.Context) ([]peer.AddrInfo, error) `perm:"read"` + NetPing func(ctx context.Context, p peer.ID) (time.Duration, error) `perm:"read"` + NetProtectAdd func(ctx context.Context, acl []peer.ID) error `perm:"admin"` + NetProtectList func(ctx context.Context) ([]peer.ID, error) `perm:"read"` + NetProtectRemove func(ctx context.Context, acl []peer.ID) error `perm:"admin"` + NetPubsubScores func(context.Context) ([]types.PubsubScore, error) `perm:"read"` + } +} + +func (s *INetworkStruct) ID(p0 context.Context) (peer.ID, error) { return s.Internal.ID(p0) } +func (s *INetworkStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + return s.Internal.NetAddrsListen(p0) +} +func (s *INetworkStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { + return s.Internal.NetAgentVersion(p0, p1) +} +func (s *INetworkStruct) NetAutoNatStatus(p0 context.Context) (types.NatInfo, error) { + return s.Internal.NetAutoNatStatus(p0) +} +func (s *INetworkStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { + return s.Internal.NetBandwidthStats(p0) +} +func (s *INetworkStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { + return s.Internal.NetBandwidthStatsByPeer(p0) +} +func (s *INetworkStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { + return s.Internal.NetBandwidthStatsByProtocol(p0) +} +func (s *INetworkStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { + return s.Internal.NetConnect(p0, p1) +} +func (s *INetworkStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network2.Connectedness, error) { + return s.Internal.NetConnectedness(p0, p1) +} +func (s *INetworkStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error { + return s.Internal.NetDisconnect(p0, p1) +} +func (s *INetworkStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { + return s.Internal.NetFindPeer(p0, p1) +} +func (s *INetworkStruct) NetFindProvidersAsync(p0 context.Context, p1 cid.Cid, p2 int) <-chan peer.AddrInfo { + return s.Internal.NetFindProvidersAsync(p0, p1, p2) +} +func (s *INetworkStruct) NetGetClosestPeers(p0 context.Context, p1 string) ([]peer.ID, error) { + return s.Internal.NetGetClosestPeers(p0, p1) +} +func (s *INetworkStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*types.ExtendedPeerInfo, error) { + return s.Internal.NetPeerInfo(p0, p1) +} +func (s *INetworkStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { + return s.Internal.NetPeers(p0) +} +func (s *INetworkStruct) NetPing(p0 context.Context, p1 peer.ID) (time.Duration, error) { + return s.Internal.NetPing(p0, p1) +} +func (s *INetworkStruct) NetProtectAdd(p0 context.Context, p1 []peer.ID) error { + return s.Internal.NetProtectAdd(p0, p1) +} +func (s *INetworkStruct) NetProtectList(p0 context.Context) ([]peer.ID, error) { + return s.Internal.NetProtectList(p0) +} +func (s *INetworkStruct) NetProtectRemove(p0 context.Context, p1 []peer.ID) error { + return s.Internal.NetProtectRemove(p0, p1) +} +func (s *INetworkStruct) NetPubsubScores(p0 context.Context) ([]types.PubsubScore, error) { + return s.Internal.NetPubsubScores(p0) +} + +type IPaychanStruct struct { + Internal struct { + PaychAllocateLane func(ctx context.Context, ch address.Address) (uint64, error) `perm:"sign"` + PaychAvailableFunds func(ctx context.Context, ch address.Address) (*types.ChannelAvailableFunds, error) `perm:"sign"` + PaychAvailableFundsByFromTo func(ctx context.Context, from, to address.Address) (*types.ChannelAvailableFunds, error) `perm:"sign"` + PaychCollect func(ctx context.Context, addr address.Address) (cid.Cid, error) `perm:"sign"` + PaychFund func(ctx context.Context, from, to address.Address, amt types.BigInt) (*types.ChannelInfo, error) `perm:"sign"` + PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt, opts types.PaychGetOpts) (*types.ChannelInfo, error) `perm:"sign"` + PaychGetWaitReady func(ctx context.Context, sentinel cid.Cid) (address.Address, error) `perm:"sign"` + PaychList func(ctx context.Context) ([]address.Address, error) `perm:"read"` + PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []types.VoucherSpec) (*types.PaymentInfo, error) `perm:"sign"` + PaychSettle func(ctx context.Context, addr address.Address) (cid.Cid, error) `perm:"sign"` + PaychStatus func(ctx context.Context, pch address.Address) (*types.Status, error) `perm:"read"` + PaychVoucherAdd func(ctx context.Context, ch address.Address, sv *types.SignedVoucher, proof []byte, minDelta big.Int) (big.Int, error) `perm:"write"` + PaychVoucherCheckSpendable func(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (bool, error) `perm:"read"` + PaychVoucherCheckValid func(ctx context.Context, ch address.Address, sv *types.SignedVoucher) error `perm:"read"` + PaychVoucherCreate func(ctx context.Context, pch address.Address, amt big.Int, lane uint64) (*types.VoucherCreateResult, error) `perm:"sign"` + PaychVoucherList func(ctx context.Context, pch address.Address) ([]*types.SignedVoucher, error) `perm:"write"` + PaychVoucherSubmit func(ctx context.Context, ch address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) `perm:"sign"` + } +} + +func (s *IPaychanStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.PaychAllocateLane(p0, p1) +} +func (s *IPaychanStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*types.ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFunds(p0, p1) +} +func (s *IPaychanStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1, p2 address.Address) (*types.ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2) +} +func (s *IPaychanStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychCollect(p0, p1) +} +func (s *IPaychanStruct) PaychFund(p0 context.Context, p1, p2 address.Address, p3 types.BigInt) (*types.ChannelInfo, error) { + return s.Internal.PaychFund(p0, p1, p2, p3) +} +func (s *IPaychanStruct) PaychGet(p0 context.Context, p1, p2 address.Address, p3 types.BigInt, p4 types.PaychGetOpts) (*types.ChannelInfo, error) { + return s.Internal.PaychGet(p0, p1, p2, p3, p4) +} +func (s *IPaychanStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + return s.Internal.PaychGetWaitReady(p0, p1) +} +func (s *IPaychanStruct) PaychList(p0 context.Context) ([]address.Address, error) { + return s.Internal.PaychList(p0) +} +func (s *IPaychanStruct) PaychNewPayment(p0 context.Context, p1, p2 address.Address, p3 []types.VoucherSpec) (*types.PaymentInfo, error) { + return s.Internal.PaychNewPayment(p0, p1, p2, p3) +} +func (s *IPaychanStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychSettle(p0, p1) +} +func (s *IPaychanStruct) PaychStatus(p0 context.Context, p1 address.Address) (*types.Status, error) { + return s.Internal.PaychStatus(p0, p1) +} +func (s *IPaychanStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher, p3 []byte, p4 big.Int) (big.Int, error) { + return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4) +} +func (s *IPaychanStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4) +} +func (s *IPaychanStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher) error { + return s.Internal.PaychVoucherCheckValid(p0, p1, p2) +} +func (s *IPaychanStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 big.Int, p3 uint64) (*types.VoucherCreateResult, error) { + return s.Internal.PaychVoucherCreate(p0, p1, p2, p3) +} +func (s *IPaychanStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*types.SignedVoucher, error) { + return s.Internal.PaychVoucherList(p0, p1) +} +func (s *IPaychanStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *types.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4) +} + +type ISyncerStruct struct { + Internal struct { + ChainSyncHandleNewTipSet func(ctx context.Context, ci *types.ChainInfo) error `perm:"write"` + ChainTipSetWeight func(ctx context.Context, tsk types.TipSetKey) (big.Int, error) `perm:"read"` + Concurrent func(ctx context.Context) int64 `perm:"read"` + SetConcurrent func(ctx context.Context, concurrent int64) error `perm:"admin"` + SyncState func(ctx context.Context) (*types.SyncState, error) `perm:"read"` + SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"` + SyncerTracker func(ctx context.Context) *types.TargetTracker `perm:"read"` + } +} + +func (s *ISyncerStruct) ChainSyncHandleNewTipSet(p0 context.Context, p1 *types.ChainInfo) error { + return s.Internal.ChainSyncHandleNewTipSet(p0, p1) +} +func (s *ISyncerStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (big.Int, error) { + return s.Internal.ChainTipSetWeight(p0, p1) +} +func (s *ISyncerStruct) Concurrent(p0 context.Context) int64 { return s.Internal.Concurrent(p0) } +func (s *ISyncerStruct) SetConcurrent(p0 context.Context, p1 int64) error { + return s.Internal.SetConcurrent(p0, p1) +} +func (s *ISyncerStruct) SyncState(p0 context.Context) (*types.SyncState, error) { + return s.Internal.SyncState(p0) +} +func (s *ISyncerStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + return s.Internal.SyncSubmitBlock(p0, p1) +} +func (s *ISyncerStruct) SyncerTracker(p0 context.Context) *types.TargetTracker { + return s.Internal.SyncerTracker(p0) +} + +type IWalletStruct struct { + Internal struct { + HasPassword func(ctx context.Context) bool `perm:"admin"` + LockWallet func(ctx context.Context) error `perm:"admin"` + SetPassword func(ctx context.Context, password []byte) error `perm:"admin"` + UnLockWallet func(ctx context.Context, password []byte) error `perm:"admin"` + WalletAddresses func(ctx context.Context) []address.Address `perm:"admin"` + WalletBalance func(ctx context.Context, addr address.Address) (abi.TokenAmount, error) `perm:"read"` + WalletDefaultAddress func(ctx context.Context) (address.Address, error) `perm:"write"` + WalletDelete func(ctx context.Context, addr address.Address) error `perm:"admin"` + WalletExport func(ctx context.Context, addr address.Address, password string) (*types.KeyInfo, error) `perm:"admin"` + WalletHas func(ctx context.Context, addr address.Address) (bool, error) `perm:"write"` + WalletImport func(ctx context.Context, key *types.KeyInfo) (address.Address, error) `perm:"admin"` + WalletNewAddress func(ctx context.Context, protocol address.Protocol) (address.Address, error) `perm:"write"` + WalletSetDefault func(ctx context.Context, addr address.Address) error `perm:"write"` + WalletSign func(ctx context.Context, k address.Address, msg []byte, meta types.MsgMeta) (*crypto.Signature, error) `perm:"sign"` + WalletSignMessage func(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) `perm:"sign"` + WalletState func(ctx context.Context) int `perm:"admin"` + } +} + +func (s *IWalletStruct) HasPassword(p0 context.Context) bool { return s.Internal.HasPassword(p0) } +func (s *IWalletStruct) LockWallet(p0 context.Context) error { return s.Internal.LockWallet(p0) } +func (s *IWalletStruct) SetPassword(p0 context.Context, p1 []byte) error { + return s.Internal.SetPassword(p0, p1) +} +func (s *IWalletStruct) UnLockWallet(p0 context.Context, p1 []byte) error { + return s.Internal.UnLockWallet(p0, p1) +} +func (s *IWalletStruct) WalletAddresses(p0 context.Context) []address.Address { + return s.Internal.WalletAddresses(p0) +} +func (s *IWalletStruct) WalletBalance(p0 context.Context, p1 address.Address) (abi.TokenAmount, error) { + return s.Internal.WalletBalance(p0, p1) +} +func (s *IWalletStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + return s.Internal.WalletDefaultAddress(p0) +} +func (s *IWalletStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletDelete(p0, p1) +} +func (s *IWalletStruct) WalletExport(p0 context.Context, p1 address.Address, p2 string) (*types.KeyInfo, error) { + return s.Internal.WalletExport(p0, p1, p2) +} +func (s *IWalletStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1) +} +func (s *IWalletStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return s.Internal.WalletImport(p0, p1) +} +func (s *IWalletStruct) WalletNewAddress(p0 context.Context, p1 address.Protocol) (address.Address, error) { + return s.Internal.WalletNewAddress(p0, p1) +} +func (s *IWalletStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletSetDefault(p0, p1) +} +func (s *IWalletStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 types.MsgMeta) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2, p3) +} +func (s *IWalletStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + return s.Internal.WalletSignMessage(p0, p1, p2) +} +func (s *IWalletStruct) WalletState(p0 context.Context) int { return s.Internal.WalletState(p0) } + +type ICommonStruct struct { + Internal struct { + NodeStatus func(ctx context.Context, inclChainStatus bool) (types.NodeStatus, error) `perm:"read"` + StartTime func(context.Context) (time.Time, error) `perm:"read"` + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *ICommonStruct) NodeStatus(p0 context.Context, p1 bool) (types.NodeStatus, error) { + return s.Internal.NodeStatus(p0, p1) +} +func (s *ICommonStruct) StartTime(p0 context.Context) (time.Time, error) { + return s.Internal.StartTime(p0) +} +func (s *ICommonStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} + +type FullNodeStruct struct { + IBlockStoreStruct + IChainStruct + IMarketStruct + IMiningStruct + IMessagePoolStruct + IMultiSigStruct + INetworkStruct + IPaychanStruct + ISyncerStruct + IWalletStruct + ICommonStruct +} diff --git a/venus-shared/api/chain/v1/syncer.go b/venus-shared/api/chain/v1/syncer.go new file mode 100644 index 0000000000..fe2b71c916 --- /dev/null +++ b/venus-shared/api/chain/v1/syncer.go @@ -0,0 +1,19 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type ISyncer interface { + ChainSyncHandleNewTipSet(ctx context.Context, ci *types.ChainInfo) error //perm:write + SetConcurrent(ctx context.Context, concurrent int64) error //perm:admin + SyncerTracker(ctx context.Context) *types.TargetTracker //perm:read + Concurrent(ctx context.Context) int64 //perm:read + ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (big.Int, error) //perm:read + SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write + SyncState(ctx context.Context) (*types.SyncState, error) //perm:read +} diff --git a/venus-shared/api/chain/v1/wallet.go b/venus-shared/api/chain/v1/wallet.go new file mode 100644 index 0000000000..335dcdb303 --- /dev/null +++ b/venus-shared/api/chain/v1/wallet.go @@ -0,0 +1,30 @@ +package v1 + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type IWallet interface { + WalletSign(ctx context.Context, k address.Address, msg []byte, meta types.MsgMeta) (*crypto.Signature, error) //perm:sign + WalletExport(ctx context.Context, addr address.Address, password string) (*types.KeyInfo, error) //perm:admin + WalletImport(ctx context.Context, key *types.KeyInfo) (address.Address, error) //perm:admin + WalletDelete(ctx context.Context, addr address.Address) error //perm:admin + WalletHas(ctx context.Context, addr address.Address) (bool, error) //perm:write + WalletNewAddress(ctx context.Context, protocol address.Protocol) (address.Address, error) //perm:write + WalletBalance(ctx context.Context, addr address.Address) (abi.TokenAmount, error) //perm:read + WalletDefaultAddress(ctx context.Context) (address.Address, error) //perm:write + WalletAddresses(ctx context.Context) []address.Address //perm:admin + WalletSetDefault(ctx context.Context, addr address.Address) error //perm:write + WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) //perm:sign + LockWallet(ctx context.Context) error //perm:admin + UnLockWallet(ctx context.Context, password []byte) error //perm:admin + SetPassword(ctx context.Context, password []byte) error //perm:admin + HasPassword(ctx context.Context) bool //perm:admin + WalletState(ctx context.Context) int //perm:admin +} diff --git a/venus-shared/api/chain/version.go b/venus-shared/api/chain/version.go new file mode 100644 index 0000000000..ac7b660a61 --- /dev/null +++ b/venus-shared/api/chain/version.go @@ -0,0 +1,9 @@ +package chain + +import "github.com/filecoin-project/venus/venus-shared/types" + +// semver versions of the rpc api exposed +var ( + FullAPIVersion0 = types.NewVer(1, 5, 0) + FullAPIVersion1 = types.NewVer(2, 3, 0) +) diff --git a/venus-shared/api/endpoint.go b/venus-shared/api/endpoint.go new file mode 100644 index 0000000000..58f705b0ab --- /dev/null +++ b/venus-shared/api/endpoint.go @@ -0,0 +1,28 @@ +package api + +import ( + "fmt" + "net/url" +) + +func Endpoint(raw string, ver uint32) (string, error) { + u, err := url.Parse(raw) + if err != nil { + return "", fmt.Errorf("invalid url: %w", err) + } + + if u.Scheme == "" { + return "", fmt.Errorf("scheme is required") + } + + if u.Host == "" { + return "", fmt.Errorf("host is required") + } + + // raw url contains more than just scheme://host(:prot) + if u.Path != "" && u.Path != "/" { + return raw, nil + } + + return fmt.Sprintf("%s://%s/rpc/v%d", u.Scheme, u.Host, ver), nil +} diff --git a/venus-shared/api/endpoint_test.go b/venus-shared/api/endpoint_test.go new file mode 100644 index 0000000000..2e5e459b11 --- /dev/null +++ b/venus-shared/api/endpoint_test.go @@ -0,0 +1,94 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseHost(t *testing.T) { + cases := []struct { + raw string + ver uint32 + expected string + }{ + // valid + { + raw: "http://api.venus.io:1234", + ver: 1, + expected: "http://api.venus.io:1234/rpc/v1", + }, + { + raw: "http://api.venus.io:1234/", + ver: 1, + expected: "http://api.venus.io:1234/rpc/v1", + }, + { + raw: "http://api.venus.io:1234/rpc", + ver: 1, + expected: "http://api.venus.io:1234/rpc", + }, + + { + raw: "http://api.venus.io:1234/rpc/v0", + ver: 1, + expected: "http://api.venus.io:1234/rpc/v0", + }, + + // invalid: no scheme + { + raw: "://api.venus.io:1234", + ver: 1, + expected: "", + }, + { + raw: "://api.venus.io:1234/", + ver: 1, + expected: "", + }, + { + raw: "://api.venus.io:1234/rpc", + ver: 1, + expected: "", + }, + { + raw: "://api.venus.io:1234/rpc/v0", + ver: 1, + expected: "", + }, + + // invalid: no scheme 2 + { + raw: "api.venus.io:1234", + ver: 1, + expected: "", + }, + { + raw: "api.venus.io:1234/", + ver: 1, + expected: "", + }, + { + raw: "api.venus.io:1234/rpc", + ver: 1, + expected: "", + }, + { + raw: "api.venus.io:1234/rpc/v0", + ver: 1, + expected: "", + }, + } + + for i := range cases { + c := cases[i] + got, err := Endpoint(c.raw, c.ver) + if c.expected == "" { + require.Errorf(t, err, "%s is expected to be invalid, got %s", c.raw, got) + continue + } + + require.NoErrorf(t, err, "%s is expected to be valid", c.raw) + require.Equal(t, c.expected, got, "converted endpoint") + } +} diff --git a/venus-shared/api/gateway/v0/api.go b/venus-shared/api/gateway/v0/api.go new file mode 100644 index 0000000000..5119e730fa --- /dev/null +++ b/venus-shared/api/gateway/v0/api.go @@ -0,0 +1,13 @@ +package gateway + +import ( + "github.com/filecoin-project/venus/venus-shared/api" +) + +type IGateway interface { + IProofEvent + IWalletEvent + IMarketEvent + + api.Version +} diff --git a/venus-shared/api/gateway/v0/client_gen.go b/venus-shared/api/gateway/v0/client_gen.go new file mode 100644 index 0000000000..19b8c80707 --- /dev/null +++ b/venus-shared/api/gateway/v0/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package gateway + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 0 +const APINamespace = "gateway.IGateway" +const MethodNamespace = "Gateway" + +// NewIGatewayRPC creates a new httpparse jsonrpc remotecli. +func NewIGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (IGateway, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res IGatewayStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialIGatewayRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialIGatewayRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (IGateway, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res IGatewayStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/gateway/v0/market_event.go b/venus-shared/api/gateway/v0/market_event.go new file mode 100644 index 0000000000..e01af80b2c --- /dev/null +++ b/venus-shared/api/gateway/v0/market_event.go @@ -0,0 +1,30 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IMarketEvent interface { + IMarketClient + IMarketServiceProvider +} + +type IMarketClient interface { + ListMarketConnectionsState(ctx context.Context) ([]gtypes.MarketConnectionState, error) //perm:admin + IsUnsealed(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize) (bool, error) //perm:admin + SectorsUnsealPiece(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize, dest string) error //perm:admin +} + +type IMarketServiceProvider interface { + ResponseMarketEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenMarketEvent(ctx context.Context, policy *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read +} diff --git a/venus-shared/api/gateway/v0/method.md b/venus-shared/api/gateway/v0/method.md new file mode 100644 index 0000000000..7d2bab0304 --- /dev/null +++ b/venus-shared/api/gateway/v0/method.md @@ -0,0 +1,498 @@ +# Groups + +* [Gateway](#gateway) + * [Version](#version) +* [MarketClient](#marketclient) + * [IsUnsealed](#isunsealed) + * [ListMarketConnectionsState](#listmarketconnectionsstate) + * [SectorsUnsealPiece](#sectorsunsealpiece) +* [MarketServiceProvider](#marketserviceprovider) + * [ListenMarketEvent](#listenmarketevent) + * [ResponseMarketEvent](#responsemarketevent) +* [ProofClient](#proofclient) + * [ComputeProof](#computeproof) + * [ListConnectedMiners](#listconnectedminers) + * [ListMinerConnection](#listminerconnection) +* [ProofServiceProvider](#proofserviceprovider) + * [ListenProofEvent](#listenproofevent) + * [ResponseProofEvent](#responseproofevent) +* [WalletClient](#walletclient) + * [ListWalletInfo](#listwalletinfo) + * [ListWalletInfoByWallet](#listwalletinfobywallet) + * [WalletHas](#wallethas) + * [WalletSign](#walletsign) +* [WalletServiceProvider](#walletserviceprovider) + * [AddNewAddress](#addnewaddress) + * [ListenWalletEvent](#listenwalletevent) + * [RemoveAddress](#removeaddress) + * [ResponseWalletEvent](#responsewalletevent) + * [SupportNewAccount](#supportnewaccount) + +## Gateway + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + +## MarketClient + +### IsUnsealed + + +Perms: admin + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 10, + 1032 +] +``` + +Response: `true` + +### ListMarketConnectionsState + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Addr": "f01234", + "Conn": { + "Connections": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ], + "ConnectionCount": 123 + } + } +] +``` + +### SectorsUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 10, + 1032, + "string value" +] +``` + +Response: `{}` + +## MarketServiceProvider + +### ListenMarketEvent + + +Perms: read + +Inputs: +```json +[ + { + "Miner": "f01234" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### ResponseMarketEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +## ProofClient + +### ComputeProof + + +Perms: admin + +Inputs: +```json +[ + "f01234", + [ + { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], + "Bw==" +] +``` + +Response: +```json +[ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` + +### ListConnectedMiners + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### ListMinerConnection + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Connections": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ], + "ConnectionCount": 123 +} +``` + +## ProofServiceProvider + +### ListenProofEvent + + +Perms: read + +Inputs: +```json +[ + { + "MinerAddress": "f01234" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### ResponseProofEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +## WalletClient + +### ListWalletInfo + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Account": "string value", + "SupportAccounts": [ + "string value" + ], + "ConnectStates": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ] + } +] +``` + +### ListWalletInfoByWallet + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Account": "string value", + "SupportAccounts": [ + "string value" + ], + "ConnectStates": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ] +} +``` + +### WalletHas + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234" +] +``` + +Response: `true` + +### WalletSign + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": "message", + "Extra": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +## WalletServiceProvider + +### AddNewAddress + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + [ + "f01234" + ] +] +``` + +Response: `{}` + +### ListenWalletEvent + + +Perms: read + +Inputs: +```json +[ + { + "SupportAccounts": [ + "string value" + ], + "SignBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### RemoveAddress + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + [ + "f01234" + ] +] +``` + +Response: `{}` + +### ResponseWalletEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +### SupportNewAccount + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "string value" +] +``` + +Response: `{}` + diff --git a/venus-shared/api/gateway/v0/mock/mock_igateway.go b/venus-shared/api/gateway/v0/mock/mock_igateway.go new file mode 100644 index 0000000000..3214a11827 --- /dev/null +++ b/venus-shared/api/gateway/v0/mock/mock_igateway.go @@ -0,0 +1,336 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/gateway/v0 (interfaces: IGateway) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + proof "github.com/filecoin-project/go-state-types/proof" + storage "github.com/filecoin-project/specs-storage/storage" + types "github.com/filecoin-project/venus/venus-shared/types" + gateway "github.com/filecoin-project/venus/venus-shared/types/gateway" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockIGateway is a mock of IGateway interface. +type MockIGateway struct { + ctrl *gomock.Controller + recorder *MockIGatewayMockRecorder +} + +// MockIGatewayMockRecorder is the mock recorder for MockIGateway. +type MockIGatewayMockRecorder struct { + mock *MockIGateway +} + +// NewMockIGateway creates a new mock instance. +func NewMockIGateway(ctrl *gomock.Controller) *MockIGateway { + mock := &MockIGateway{ctrl: ctrl} + mock.recorder = &MockIGatewayMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIGateway) EXPECT() *MockIGatewayMockRecorder { + return m.recorder +} + +// AddNewAddress mocks base method. +func (m *MockIGateway) AddNewAddress(arg0 context.Context, arg1 types.UUID, arg2 []address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddNewAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddNewAddress indicates an expected call of AddNewAddress. +func (mr *MockIGatewayMockRecorder) AddNewAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewAddress", reflect.TypeOf((*MockIGateway)(nil).AddNewAddress), arg0, arg1, arg2) +} + +// ComputeProof mocks base method. +func (m *MockIGateway) ComputeProof(arg0 context.Context, arg1 address.Address, arg2 []proof.SectorInfo, arg3 abi.PoStRandomness) ([]proof.PoStProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ComputeProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]proof.PoStProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ComputeProof indicates an expected call of ComputeProof. +func (mr *MockIGatewayMockRecorder) ComputeProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComputeProof", reflect.TypeOf((*MockIGateway)(nil).ComputeProof), arg0, arg1, arg2, arg3) +} + +// IsUnsealed mocks base method. +func (m *MockIGateway) IsUnsealed(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 storage.SectorRef, arg4 types.PaddedByteIndex, arg5 abi.PaddedPieceSize) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsUnsealed indicates an expected call of IsUnsealed. +func (mr *MockIGatewayMockRecorder) IsUnsealed(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockIGateway)(nil).IsUnsealed), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// ListConnectedMiners mocks base method. +func (m *MockIGateway) ListConnectedMiners(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListConnectedMiners", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListConnectedMiners indicates an expected call of ListConnectedMiners. +func (mr *MockIGatewayMockRecorder) ListConnectedMiners(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListConnectedMiners", reflect.TypeOf((*MockIGateway)(nil).ListConnectedMiners), arg0) +} + +// ListMarketConnectionsState mocks base method. +func (m *MockIGateway) ListMarketConnectionsState(arg0 context.Context) ([]gateway.MarketConnectionState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMarketConnectionsState", arg0) + ret0, _ := ret[0].([]gateway.MarketConnectionState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMarketConnectionsState indicates an expected call of ListMarketConnectionsState. +func (mr *MockIGatewayMockRecorder) ListMarketConnectionsState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMarketConnectionsState", reflect.TypeOf((*MockIGateway)(nil).ListMarketConnectionsState), arg0) +} + +// ListMinerConnection mocks base method. +func (m *MockIGateway) ListMinerConnection(arg0 context.Context, arg1 address.Address) (*gateway.MinerState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMinerConnection", arg0, arg1) + ret0, _ := ret[0].(*gateway.MinerState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMinerConnection indicates an expected call of ListMinerConnection. +func (mr *MockIGatewayMockRecorder) ListMinerConnection(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMinerConnection", reflect.TypeOf((*MockIGateway)(nil).ListMinerConnection), arg0, arg1) +} + +// ListWalletInfo mocks base method. +func (m *MockIGateway) ListWalletInfo(arg0 context.Context) ([]*gateway.WalletDetail, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWalletInfo", arg0) + ret0, _ := ret[0].([]*gateway.WalletDetail) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWalletInfo indicates an expected call of ListWalletInfo. +func (mr *MockIGatewayMockRecorder) ListWalletInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWalletInfo", reflect.TypeOf((*MockIGateway)(nil).ListWalletInfo), arg0) +} + +// ListWalletInfoByWallet mocks base method. +func (m *MockIGateway) ListWalletInfoByWallet(arg0 context.Context, arg1 string) (*gateway.WalletDetail, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWalletInfoByWallet", arg0, arg1) + ret0, _ := ret[0].(*gateway.WalletDetail) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWalletInfoByWallet indicates an expected call of ListWalletInfoByWallet. +func (mr *MockIGatewayMockRecorder) ListWalletInfoByWallet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWalletInfoByWallet", reflect.TypeOf((*MockIGateway)(nil).ListWalletInfoByWallet), arg0, arg1) +} + +// ListenMarketEvent mocks base method. +func (m *MockIGateway) ListenMarketEvent(arg0 context.Context, arg1 *gateway.MarketRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenMarketEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenMarketEvent indicates an expected call of ListenMarketEvent. +func (mr *MockIGatewayMockRecorder) ListenMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenMarketEvent", reflect.TypeOf((*MockIGateway)(nil).ListenMarketEvent), arg0, arg1) +} + +// ListenProofEvent mocks base method. +func (m *MockIGateway) ListenProofEvent(arg0 context.Context, arg1 *gateway.ProofRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenProofEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenProofEvent indicates an expected call of ListenProofEvent. +func (mr *MockIGatewayMockRecorder) ListenProofEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenProofEvent", reflect.TypeOf((*MockIGateway)(nil).ListenProofEvent), arg0, arg1) +} + +// ListenWalletEvent mocks base method. +func (m *MockIGateway) ListenWalletEvent(arg0 context.Context, arg1 *gateway.WalletRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenWalletEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenWalletEvent indicates an expected call of ListenWalletEvent. +func (mr *MockIGatewayMockRecorder) ListenWalletEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenWalletEvent", reflect.TypeOf((*MockIGateway)(nil).ListenWalletEvent), arg0, arg1) +} + +// RemoveAddress mocks base method. +func (m *MockIGateway) RemoveAddress(arg0 context.Context, arg1 types.UUID, arg2 []address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveAddress indicates an expected call of RemoveAddress. +func (mr *MockIGatewayMockRecorder) RemoveAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAddress", reflect.TypeOf((*MockIGateway)(nil).RemoveAddress), arg0, arg1, arg2) +} + +// ResponseMarketEvent mocks base method. +func (m *MockIGateway) ResponseMarketEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseMarketEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseMarketEvent indicates an expected call of ResponseMarketEvent. +func (mr *MockIGatewayMockRecorder) ResponseMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseMarketEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseMarketEvent), arg0, arg1) +} + +// ResponseProofEvent mocks base method. +func (m *MockIGateway) ResponseProofEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseProofEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseProofEvent indicates an expected call of ResponseProofEvent. +func (mr *MockIGatewayMockRecorder) ResponseProofEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseProofEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseProofEvent), arg0, arg1) +} + +// ResponseWalletEvent mocks base method. +func (m *MockIGateway) ResponseWalletEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseWalletEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseWalletEvent indicates an expected call of ResponseWalletEvent. +func (mr *MockIGatewayMockRecorder) ResponseWalletEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseWalletEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseWalletEvent), arg0, arg1) +} + +// SectorsUnsealPiece mocks base method. +func (m *MockIGateway) SectorsUnsealPiece(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 storage.SectorRef, arg4 types.PaddedByteIndex, arg5 abi.PaddedPieceSize, arg6 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SectorsUnsealPiece", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(error) + return ret0 +} + +// SectorsUnsealPiece indicates an expected call of SectorsUnsealPiece. +func (mr *MockIGatewayMockRecorder) SectorsUnsealPiece(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SectorsUnsealPiece", reflect.TypeOf((*MockIGateway)(nil).SectorsUnsealPiece), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// SupportNewAccount mocks base method. +func (m *MockIGateway) SupportNewAccount(arg0 context.Context, arg1 types.UUID, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SupportNewAccount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SupportNewAccount indicates an expected call of SupportNewAccount. +func (mr *MockIGatewayMockRecorder) SupportNewAccount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportNewAccount", reflect.TypeOf((*MockIGateway)(nil).SupportNewAccount), arg0, arg1, arg2) +} + +// Version mocks base method. +func (m *MockIGateway) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockIGatewayMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockIGateway)(nil).Version), arg0) +} + +// WalletHas mocks base method. +func (m *MockIGateway) WalletHas(arg0 context.Context, arg1 string, arg2 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockIGatewayMockRecorder) WalletHas(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockIGateway)(nil).WalletHas), arg0, arg1, arg2) +} + +// WalletSign mocks base method. +func (m *MockIGateway) WalletSign(arg0 context.Context, arg1 string, arg2 address.Address, arg3 []byte, arg4 types.MsgMeta) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockIGatewayMockRecorder) WalletSign(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockIGateway)(nil).WalletSign), arg0, arg1, arg2, arg3, arg4) +} diff --git a/venus-shared/api/gateway/v0/proof_event.go b/venus-shared/api/gateway/v0/proof_event.go new file mode 100644 index 0000000000..38c9200747 --- /dev/null +++ b/venus-shared/api/gateway/v0/proof_event.go @@ -0,0 +1,27 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IProofEvent interface { + IProofClient + IProofServiceProvider +} + +type IProofClient interface { + ListConnectedMiners(ctx context.Context) ([]address.Address, error) //perm:admin + ListMinerConnection(ctx context.Context, addr address.Address) (*gtypes.MinerState, error) //perm:admin + ComputeProof(ctx context.Context, miner address.Address, sectorInfos []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:admin +} + +type IProofServiceProvider interface { + ResponseProofEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenProofEvent(ctx context.Context, policy *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read +} diff --git a/venus-shared/api/gateway/v0/proxy_gen.go b/venus-shared/api/gateway/v0/proxy_gen.go new file mode 100644 index 0000000000..06aab7cc61 --- /dev/null +++ b/venus-shared/api/gateway/v0/proxy_gen.go @@ -0,0 +1,157 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package gateway + +import ( + "context" + + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-storage/storage" + cid "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IProofClientStruct struct { + Internal struct { + ComputeProof func(ctx context.Context, miner address.Address, sectorInfos []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"admin"` + ListConnectedMiners func(ctx context.Context) ([]address.Address, error) `perm:"admin"` + ListMinerConnection func(ctx context.Context, addr address.Address) (*gtypes.MinerState, error) `perm:"admin"` + } +} + +func (s *IProofClientStruct) ComputeProof(p0 context.Context, p1 address.Address, p2 []builtin.SectorInfo, p3 abi.PoStRandomness) ([]builtin.PoStProof, error) { + return s.Internal.ComputeProof(p0, p1, p2, p3) +} +func (s *IProofClientStruct) ListConnectedMiners(p0 context.Context) ([]address.Address, error) { + return s.Internal.ListConnectedMiners(p0) +} +func (s *IProofClientStruct) ListMinerConnection(p0 context.Context, p1 address.Address) (*gtypes.MinerState, error) { + return s.Internal.ListMinerConnection(p0, p1) +} + +type IProofServiceProviderStruct struct { + Internal struct { + ListenProofEvent func(ctx context.Context, policy *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + ResponseProofEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + } +} + +func (s *IProofServiceProviderStruct) ListenProofEvent(p0 context.Context, p1 *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenProofEvent(p0, p1) +} +func (s *IProofServiceProviderStruct) ResponseProofEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseProofEvent(p0, p1) +} + +type IProofEventStruct struct { + IProofClientStruct + IProofServiceProviderStruct +} + +type IWalletClientStruct struct { + Internal struct { + ListWalletInfo func(ctx context.Context) ([]*gtypes.WalletDetail, error) `perm:"admin"` + ListWalletInfoByWallet func(ctx context.Context, wallet string) (*gtypes.WalletDetail, error) `perm:"admin"` + WalletHas func(ctx context.Context, account string, addr address.Address) (bool, error) `perm:"admin"` + WalletSign func(ctx context.Context, account string, addr address.Address, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) `perm:"admin"` + } +} + +func (s *IWalletClientStruct) ListWalletInfo(p0 context.Context) ([]*gtypes.WalletDetail, error) { + return s.Internal.ListWalletInfo(p0) +} +func (s *IWalletClientStruct) ListWalletInfoByWallet(p0 context.Context, p1 string) (*gtypes.WalletDetail, error) { + return s.Internal.ListWalletInfoByWallet(p0, p1) +} +func (s *IWalletClientStruct) WalletHas(p0 context.Context, p1 string, p2 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1, p2) +} +func (s *IWalletClientStruct) WalletSign(p0 context.Context, p1 string, p2 address.Address, p3 []byte, p4 types.MsgMeta) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2, p3, p4) +} + +type IWalletServiceProviderStruct struct { + Internal struct { + AddNewAddress func(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error `perm:"read"` + ListenWalletEvent func(ctx context.Context, policy *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + RemoveAddress func(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error `perm:"read"` + ResponseWalletEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + SupportNewAccount func(ctx context.Context, channelID types.UUID, account string) error `perm:"read"` + } +} + +func (s *IWalletServiceProviderStruct) AddNewAddress(p0 context.Context, p1 types.UUID, p2 []address.Address) error { + return s.Internal.AddNewAddress(p0, p1, p2) +} +func (s *IWalletServiceProviderStruct) ListenWalletEvent(p0 context.Context, p1 *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenWalletEvent(p0, p1) +} +func (s *IWalletServiceProviderStruct) RemoveAddress(p0 context.Context, p1 types.UUID, p2 []address.Address) error { + return s.Internal.RemoveAddress(p0, p1, p2) +} +func (s *IWalletServiceProviderStruct) ResponseWalletEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseWalletEvent(p0, p1) +} +func (s *IWalletServiceProviderStruct) SupportNewAccount(p0 context.Context, p1 types.UUID, p2 string) error { + return s.Internal.SupportNewAccount(p0, p1, p2) +} + +type IWalletEventStruct struct { + IWalletClientStruct + IWalletServiceProviderStruct +} + +type IMarketClientStruct struct { + Internal struct { + IsUnsealed func(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize) (bool, error) `perm:"admin"` + ListMarketConnectionsState func(ctx context.Context) ([]gtypes.MarketConnectionState, error) `perm:"admin"` + SectorsUnsealPiece func(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize, dest string) error `perm:"admin"` + } +} + +func (s *IMarketClientStruct) IsUnsealed(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 storage.SectorRef, p4 types.PaddedByteIndex, p5 abi.PaddedPieceSize) (bool, error) { + return s.Internal.IsUnsealed(p0, p1, p2, p3, p4, p5) +} +func (s *IMarketClientStruct) ListMarketConnectionsState(p0 context.Context) ([]gtypes.MarketConnectionState, error) { + return s.Internal.ListMarketConnectionsState(p0) +} +func (s *IMarketClientStruct) SectorsUnsealPiece(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 storage.SectorRef, p4 types.PaddedByteIndex, p5 abi.PaddedPieceSize, p6 string) error { + return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5, p6) +} + +type IMarketServiceProviderStruct struct { + Internal struct { + ListenMarketEvent func(ctx context.Context, policy *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + ResponseMarketEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + } +} + +func (s *IMarketServiceProviderStruct) ListenMarketEvent(p0 context.Context, p1 *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenMarketEvent(p0, p1) +} +func (s *IMarketServiceProviderStruct) ResponseMarketEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseMarketEvent(p0, p1) +} + +type IMarketEventStruct struct { + IMarketClientStruct + IMarketServiceProviderStruct +} + +type IGatewayStruct struct { + IProofEventStruct + IWalletEventStruct + IMarketEventStruct + + Internal struct { + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *IGatewayStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} diff --git a/venus-shared/api/gateway/v0/wallet_event.go b/venus-shared/api/gateway/v0/wallet_event.go new file mode 100644 index 0000000000..6b3b00de17 --- /dev/null +++ b/venus-shared/api/gateway/v0/wallet_event.go @@ -0,0 +1,31 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IWalletEvent interface { + IWalletClient + IWalletServiceProvider +} + +type IWalletClient interface { + ListWalletInfo(ctx context.Context) ([]*gtypes.WalletDetail, error) //perm:admin + ListWalletInfoByWallet(ctx context.Context, wallet string) (*gtypes.WalletDetail, error) //perm:admin + WalletHas(ctx context.Context, account string, addr address.Address) (bool, error) //perm:admin + WalletSign(ctx context.Context, account string, addr address.Address, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) //perm:admin +} + +type IWalletServiceProvider interface { + ResponseWalletEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenWalletEvent(ctx context.Context, policy *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read + SupportNewAccount(ctx context.Context, channelID types.UUID, account string) error //perm:read + AddNewAddress(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error //perm:read + RemoveAddress(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error //perm:read +} diff --git a/venus-shared/api/gateway/v1/api.go b/venus-shared/api/gateway/v1/api.go new file mode 100644 index 0000000000..5119e730fa --- /dev/null +++ b/venus-shared/api/gateway/v1/api.go @@ -0,0 +1,13 @@ +package gateway + +import ( + "github.com/filecoin-project/venus/venus-shared/api" +) + +type IGateway interface { + IProofEvent + IWalletEvent + IMarketEvent + + api.Version +} diff --git a/venus-shared/api/gateway/v1/client_gen.go b/venus-shared/api/gateway/v1/client_gen.go new file mode 100644 index 0000000000..4ea10610a6 --- /dev/null +++ b/venus-shared/api/gateway/v1/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package gateway + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 1 +const APINamespace = "gateway.IGateway" +const MethodNamespace = "Gateway" + +// NewIGatewayRPC creates a new httpparse jsonrpc remotecli. +func NewIGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (IGateway, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res IGatewayStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialIGatewayRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialIGatewayRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (IGateway, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res IGatewayStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/gateway/v1/market_event.go b/venus-shared/api/gateway/v1/market_event.go new file mode 100644 index 0000000000..e01af80b2c --- /dev/null +++ b/venus-shared/api/gateway/v1/market_event.go @@ -0,0 +1,30 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IMarketEvent interface { + IMarketClient + IMarketServiceProvider +} + +type IMarketClient interface { + ListMarketConnectionsState(ctx context.Context) ([]gtypes.MarketConnectionState, error) //perm:admin + IsUnsealed(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize) (bool, error) //perm:admin + SectorsUnsealPiece(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize, dest string) error //perm:admin +} + +type IMarketServiceProvider interface { + ResponseMarketEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenMarketEvent(ctx context.Context, policy *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read +} diff --git a/venus-shared/api/gateway/v1/method.md b/venus-shared/api/gateway/v1/method.md new file mode 100644 index 0000000000..c65366faad --- /dev/null +++ b/venus-shared/api/gateway/v1/method.md @@ -0,0 +1,501 @@ +# Groups + +* [Gateway](#gateway) + * [Version](#version) +* [MarketClient](#marketclient) + * [IsUnsealed](#isunsealed) + * [ListMarketConnectionsState](#listmarketconnectionsstate) + * [SectorsUnsealPiece](#sectorsunsealpiece) +* [MarketServiceProvider](#marketserviceprovider) + * [ListenMarketEvent](#listenmarketevent) + * [ResponseMarketEvent](#responsemarketevent) +* [ProofClient](#proofclient) + * [ComputeProof](#computeproof) + * [ListConnectedMiners](#listconnectedminers) + * [ListMinerConnection](#listminerconnection) +* [ProofServiceProvider](#proofserviceprovider) + * [ListenProofEvent](#listenproofevent) + * [ResponseProofEvent](#responseproofevent) +* [WalletClient](#walletclient) + * [ListWalletInfo](#listwalletinfo) + * [ListWalletInfoByWallet](#listwalletinfobywallet) + * [WalletHas](#wallethas) + * [WalletSign](#walletsign) +* [WalletServiceProvider](#walletserviceprovider) + * [AddNewAddress](#addnewaddress) + * [ListenWalletEvent](#listenwalletevent) + * [RemoveAddress](#removeaddress) + * [ResponseWalletEvent](#responsewalletevent) + * [SupportNewAccount](#supportnewaccount) + +## Gateway + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + +## MarketClient + +### IsUnsealed + + +Perms: admin + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 10, + 1032 +] +``` + +Response: `true` + +### ListMarketConnectionsState + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Addr": "f01234", + "Conn": { + "Connections": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ], + "ConnectionCount": 123 + } + } +] +``` + +### SectorsUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 10, + 1032, + "string value" +] +``` + +Response: `{}` + +## MarketServiceProvider + +### ListenMarketEvent + + +Perms: read + +Inputs: +```json +[ + { + "Miner": "f01234" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### ResponseMarketEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +## ProofClient + +### ComputeProof + + +Perms: admin + +Inputs: +```json +[ + "f01234", + [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], + "Bw==", + 10101, + 17 +] +``` + +Response: +```json +[ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` + +### ListConnectedMiners + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### ListMinerConnection + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Connections": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ], + "ConnectionCount": 123 +} +``` + +## ProofServiceProvider + +### ListenProofEvent + + +Perms: read + +Inputs: +```json +[ + { + "MinerAddress": "f01234" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### ResponseProofEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +## WalletClient + +### ListWalletInfo + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Account": "string value", + "SupportAccounts": [ + "string value" + ], + "ConnectStates": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ] + } +] +``` + +### ListWalletInfoByWallet + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Account": "string value", + "SupportAccounts": [ + "string value" + ], + "ConnectStates": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ] +} +``` + +### WalletHas + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234" +] +``` + +Response: `true` + +### WalletSign + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": "message", + "Extra": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +## WalletServiceProvider + +### AddNewAddress + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + [ + "f01234" + ] +] +``` + +Response: `{}` + +### ListenWalletEvent + + +Perms: read + +Inputs: +```json +[ + { + "SupportAccounts": [ + "string value" + ], + "SignBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### RemoveAddress + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + [ + "f01234" + ] +] +``` + +Response: `{}` + +### ResponseWalletEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +### SupportNewAccount + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "string value" +] +``` + +Response: `{}` + diff --git a/venus-shared/api/gateway/v1/mock/mock_igateway.go b/venus-shared/api/gateway/v1/mock/mock_igateway.go new file mode 100644 index 0000000000..28e59d6a6f --- /dev/null +++ b/venus-shared/api/gateway/v1/mock/mock_igateway.go @@ -0,0 +1,337 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/gateway/v1 (interfaces: IGateway) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + network "github.com/filecoin-project/go-state-types/network" + proof "github.com/filecoin-project/go-state-types/proof" + storage "github.com/filecoin-project/specs-storage/storage" + types "github.com/filecoin-project/venus/venus-shared/types" + gateway "github.com/filecoin-project/venus/venus-shared/types/gateway" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockIGateway is a mock of IGateway interface. +type MockIGateway struct { + ctrl *gomock.Controller + recorder *MockIGatewayMockRecorder +} + +// MockIGatewayMockRecorder is the mock recorder for MockIGateway. +type MockIGatewayMockRecorder struct { + mock *MockIGateway +} + +// NewMockIGateway creates a new mock instance. +func NewMockIGateway(ctrl *gomock.Controller) *MockIGateway { + mock := &MockIGateway{ctrl: ctrl} + mock.recorder = &MockIGatewayMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIGateway) EXPECT() *MockIGatewayMockRecorder { + return m.recorder +} + +// AddNewAddress mocks base method. +func (m *MockIGateway) AddNewAddress(arg0 context.Context, arg1 types.UUID, arg2 []address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddNewAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddNewAddress indicates an expected call of AddNewAddress. +func (mr *MockIGatewayMockRecorder) AddNewAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewAddress", reflect.TypeOf((*MockIGateway)(nil).AddNewAddress), arg0, arg1, arg2) +} + +// ComputeProof mocks base method. +func (m *MockIGateway) ComputeProof(arg0 context.Context, arg1 address.Address, arg2 []proof.ExtendedSectorInfo, arg3 abi.PoStRandomness, arg4 abi.ChainEpoch, arg5 network.Version) ([]proof.PoStProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ComputeProof", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].([]proof.PoStProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ComputeProof indicates an expected call of ComputeProof. +func (mr *MockIGatewayMockRecorder) ComputeProof(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComputeProof", reflect.TypeOf((*MockIGateway)(nil).ComputeProof), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// IsUnsealed mocks base method. +func (m *MockIGateway) IsUnsealed(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 storage.SectorRef, arg4 types.PaddedByteIndex, arg5 abi.PaddedPieceSize) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsUnsealed indicates an expected call of IsUnsealed. +func (mr *MockIGatewayMockRecorder) IsUnsealed(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockIGateway)(nil).IsUnsealed), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// ListConnectedMiners mocks base method. +func (m *MockIGateway) ListConnectedMiners(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListConnectedMiners", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListConnectedMiners indicates an expected call of ListConnectedMiners. +func (mr *MockIGatewayMockRecorder) ListConnectedMiners(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListConnectedMiners", reflect.TypeOf((*MockIGateway)(nil).ListConnectedMiners), arg0) +} + +// ListMarketConnectionsState mocks base method. +func (m *MockIGateway) ListMarketConnectionsState(arg0 context.Context) ([]gateway.MarketConnectionState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMarketConnectionsState", arg0) + ret0, _ := ret[0].([]gateway.MarketConnectionState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMarketConnectionsState indicates an expected call of ListMarketConnectionsState. +func (mr *MockIGatewayMockRecorder) ListMarketConnectionsState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMarketConnectionsState", reflect.TypeOf((*MockIGateway)(nil).ListMarketConnectionsState), arg0) +} + +// ListMinerConnection mocks base method. +func (m *MockIGateway) ListMinerConnection(arg0 context.Context, arg1 address.Address) (*gateway.MinerState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMinerConnection", arg0, arg1) + ret0, _ := ret[0].(*gateway.MinerState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMinerConnection indicates an expected call of ListMinerConnection. +func (mr *MockIGatewayMockRecorder) ListMinerConnection(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMinerConnection", reflect.TypeOf((*MockIGateway)(nil).ListMinerConnection), arg0, arg1) +} + +// ListWalletInfo mocks base method. +func (m *MockIGateway) ListWalletInfo(arg0 context.Context) ([]*gateway.WalletDetail, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWalletInfo", arg0) + ret0, _ := ret[0].([]*gateway.WalletDetail) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWalletInfo indicates an expected call of ListWalletInfo. +func (mr *MockIGatewayMockRecorder) ListWalletInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWalletInfo", reflect.TypeOf((*MockIGateway)(nil).ListWalletInfo), arg0) +} + +// ListWalletInfoByWallet mocks base method. +func (m *MockIGateway) ListWalletInfoByWallet(arg0 context.Context, arg1 string) (*gateway.WalletDetail, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWalletInfoByWallet", arg0, arg1) + ret0, _ := ret[0].(*gateway.WalletDetail) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWalletInfoByWallet indicates an expected call of ListWalletInfoByWallet. +func (mr *MockIGatewayMockRecorder) ListWalletInfoByWallet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWalletInfoByWallet", reflect.TypeOf((*MockIGateway)(nil).ListWalletInfoByWallet), arg0, arg1) +} + +// ListenMarketEvent mocks base method. +func (m *MockIGateway) ListenMarketEvent(arg0 context.Context, arg1 *gateway.MarketRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenMarketEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenMarketEvent indicates an expected call of ListenMarketEvent. +func (mr *MockIGatewayMockRecorder) ListenMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenMarketEvent", reflect.TypeOf((*MockIGateway)(nil).ListenMarketEvent), arg0, arg1) +} + +// ListenProofEvent mocks base method. +func (m *MockIGateway) ListenProofEvent(arg0 context.Context, arg1 *gateway.ProofRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenProofEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenProofEvent indicates an expected call of ListenProofEvent. +func (mr *MockIGatewayMockRecorder) ListenProofEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenProofEvent", reflect.TypeOf((*MockIGateway)(nil).ListenProofEvent), arg0, arg1) +} + +// ListenWalletEvent mocks base method. +func (m *MockIGateway) ListenWalletEvent(arg0 context.Context, arg1 *gateway.WalletRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenWalletEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenWalletEvent indicates an expected call of ListenWalletEvent. +func (mr *MockIGatewayMockRecorder) ListenWalletEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenWalletEvent", reflect.TypeOf((*MockIGateway)(nil).ListenWalletEvent), arg0, arg1) +} + +// RemoveAddress mocks base method. +func (m *MockIGateway) RemoveAddress(arg0 context.Context, arg1 types.UUID, arg2 []address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveAddress indicates an expected call of RemoveAddress. +func (mr *MockIGatewayMockRecorder) RemoveAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAddress", reflect.TypeOf((*MockIGateway)(nil).RemoveAddress), arg0, arg1, arg2) +} + +// ResponseMarketEvent mocks base method. +func (m *MockIGateway) ResponseMarketEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseMarketEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseMarketEvent indicates an expected call of ResponseMarketEvent. +func (mr *MockIGatewayMockRecorder) ResponseMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseMarketEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseMarketEvent), arg0, arg1) +} + +// ResponseProofEvent mocks base method. +func (m *MockIGateway) ResponseProofEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseProofEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseProofEvent indicates an expected call of ResponseProofEvent. +func (mr *MockIGatewayMockRecorder) ResponseProofEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseProofEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseProofEvent), arg0, arg1) +} + +// ResponseWalletEvent mocks base method. +func (m *MockIGateway) ResponseWalletEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseWalletEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseWalletEvent indicates an expected call of ResponseWalletEvent. +func (mr *MockIGatewayMockRecorder) ResponseWalletEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseWalletEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseWalletEvent), arg0, arg1) +} + +// SectorsUnsealPiece mocks base method. +func (m *MockIGateway) SectorsUnsealPiece(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 storage.SectorRef, arg4 types.PaddedByteIndex, arg5 abi.PaddedPieceSize, arg6 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SectorsUnsealPiece", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(error) + return ret0 +} + +// SectorsUnsealPiece indicates an expected call of SectorsUnsealPiece. +func (mr *MockIGatewayMockRecorder) SectorsUnsealPiece(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SectorsUnsealPiece", reflect.TypeOf((*MockIGateway)(nil).SectorsUnsealPiece), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// SupportNewAccount mocks base method. +func (m *MockIGateway) SupportNewAccount(arg0 context.Context, arg1 types.UUID, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SupportNewAccount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SupportNewAccount indicates an expected call of SupportNewAccount. +func (mr *MockIGatewayMockRecorder) SupportNewAccount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportNewAccount", reflect.TypeOf((*MockIGateway)(nil).SupportNewAccount), arg0, arg1, arg2) +} + +// Version mocks base method. +func (m *MockIGateway) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockIGatewayMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockIGateway)(nil).Version), arg0) +} + +// WalletHas mocks base method. +func (m *MockIGateway) WalletHas(arg0 context.Context, arg1 string, arg2 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockIGatewayMockRecorder) WalletHas(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockIGateway)(nil).WalletHas), arg0, arg1, arg2) +} + +// WalletSign mocks base method. +func (m *MockIGateway) WalletSign(arg0 context.Context, arg1 string, arg2 address.Address, arg3 []byte, arg4 types.MsgMeta) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockIGatewayMockRecorder) WalletSign(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockIGateway)(nil).WalletSign), arg0, arg1, arg2, arg3, arg4) +} diff --git a/venus-shared/api/gateway/v1/proof_event.go b/venus-shared/api/gateway/v1/proof_event.go new file mode 100644 index 0000000000..3b60fb7199 --- /dev/null +++ b/venus-shared/api/gateway/v1/proof_event.go @@ -0,0 +1,28 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IProofEvent interface { + IProofClient + IProofServiceProvider +} + +type IProofClient interface { + ListConnectedMiners(ctx context.Context) ([]address.Address, error) //perm:admin + ListMinerConnection(ctx context.Context, addr address.Address) (*gtypes.MinerState, error) //perm:admin + ComputeProof(ctx context.Context, miner address.Address, sectorInfos []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, height abi.ChainEpoch, nwVersion network.Version) ([]builtin.PoStProof, error) //perm:admin +} + +type IProofServiceProvider interface { + ResponseProofEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenProofEvent(ctx context.Context, policy *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read +} diff --git a/venus-shared/api/gateway/v1/proxy_gen.go b/venus-shared/api/gateway/v1/proxy_gen.go new file mode 100644 index 0000000000..3531b441d3 --- /dev/null +++ b/venus-shared/api/gateway/v1/proxy_gen.go @@ -0,0 +1,158 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package gateway + +import ( + "context" + + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/specs-storage/storage" + cid "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IProofClientStruct struct { + Internal struct { + ComputeProof func(ctx context.Context, miner address.Address, sectorInfos []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, height abi.ChainEpoch, nwVersion network.Version) ([]builtin.PoStProof, error) `perm:"admin"` + ListConnectedMiners func(ctx context.Context) ([]address.Address, error) `perm:"admin"` + ListMinerConnection func(ctx context.Context, addr address.Address) (*gtypes.MinerState, error) `perm:"admin"` + } +} + +func (s *IProofClientStruct) ComputeProof(p0 context.Context, p1 address.Address, p2 []builtin.ExtendedSectorInfo, p3 abi.PoStRandomness, p4 abi.ChainEpoch, p5 network.Version) ([]builtin.PoStProof, error) { + return s.Internal.ComputeProof(p0, p1, p2, p3, p4, p5) +} +func (s *IProofClientStruct) ListConnectedMiners(p0 context.Context) ([]address.Address, error) { + return s.Internal.ListConnectedMiners(p0) +} +func (s *IProofClientStruct) ListMinerConnection(p0 context.Context, p1 address.Address) (*gtypes.MinerState, error) { + return s.Internal.ListMinerConnection(p0, p1) +} + +type IProofServiceProviderStruct struct { + Internal struct { + ListenProofEvent func(ctx context.Context, policy *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + ResponseProofEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + } +} + +func (s *IProofServiceProviderStruct) ListenProofEvent(p0 context.Context, p1 *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenProofEvent(p0, p1) +} +func (s *IProofServiceProviderStruct) ResponseProofEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseProofEvent(p0, p1) +} + +type IProofEventStruct struct { + IProofClientStruct + IProofServiceProviderStruct +} + +type IWalletClientStruct struct { + Internal struct { + ListWalletInfo func(ctx context.Context) ([]*gtypes.WalletDetail, error) `perm:"admin"` + ListWalletInfoByWallet func(ctx context.Context, wallet string) (*gtypes.WalletDetail, error) `perm:"admin"` + WalletHas func(ctx context.Context, account string, addr address.Address) (bool, error) `perm:"admin"` + WalletSign func(ctx context.Context, account string, addr address.Address, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) `perm:"admin"` + } +} + +func (s *IWalletClientStruct) ListWalletInfo(p0 context.Context) ([]*gtypes.WalletDetail, error) { + return s.Internal.ListWalletInfo(p0) +} +func (s *IWalletClientStruct) ListWalletInfoByWallet(p0 context.Context, p1 string) (*gtypes.WalletDetail, error) { + return s.Internal.ListWalletInfoByWallet(p0, p1) +} +func (s *IWalletClientStruct) WalletHas(p0 context.Context, p1 string, p2 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1, p2) +} +func (s *IWalletClientStruct) WalletSign(p0 context.Context, p1 string, p2 address.Address, p3 []byte, p4 types.MsgMeta) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2, p3, p4) +} + +type IWalletServiceProviderStruct struct { + Internal struct { + AddNewAddress func(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error `perm:"read"` + ListenWalletEvent func(ctx context.Context, policy *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + RemoveAddress func(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error `perm:"read"` + ResponseWalletEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + SupportNewAccount func(ctx context.Context, channelID types.UUID, account string) error `perm:"read"` + } +} + +func (s *IWalletServiceProviderStruct) AddNewAddress(p0 context.Context, p1 types.UUID, p2 []address.Address) error { + return s.Internal.AddNewAddress(p0, p1, p2) +} +func (s *IWalletServiceProviderStruct) ListenWalletEvent(p0 context.Context, p1 *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenWalletEvent(p0, p1) +} +func (s *IWalletServiceProviderStruct) RemoveAddress(p0 context.Context, p1 types.UUID, p2 []address.Address) error { + return s.Internal.RemoveAddress(p0, p1, p2) +} +func (s *IWalletServiceProviderStruct) ResponseWalletEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseWalletEvent(p0, p1) +} +func (s *IWalletServiceProviderStruct) SupportNewAccount(p0 context.Context, p1 types.UUID, p2 string) error { + return s.Internal.SupportNewAccount(p0, p1, p2) +} + +type IWalletEventStruct struct { + IWalletClientStruct + IWalletServiceProviderStruct +} + +type IMarketClientStruct struct { + Internal struct { + IsUnsealed func(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize) (bool, error) `perm:"admin"` + ListMarketConnectionsState func(ctx context.Context) ([]gtypes.MarketConnectionState, error) `perm:"admin"` + SectorsUnsealPiece func(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize, dest string) error `perm:"admin"` + } +} + +func (s *IMarketClientStruct) IsUnsealed(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 storage.SectorRef, p4 types.PaddedByteIndex, p5 abi.PaddedPieceSize) (bool, error) { + return s.Internal.IsUnsealed(p0, p1, p2, p3, p4, p5) +} +func (s *IMarketClientStruct) ListMarketConnectionsState(p0 context.Context) ([]gtypes.MarketConnectionState, error) { + return s.Internal.ListMarketConnectionsState(p0) +} +func (s *IMarketClientStruct) SectorsUnsealPiece(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 storage.SectorRef, p4 types.PaddedByteIndex, p5 abi.PaddedPieceSize, p6 string) error { + return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5, p6) +} + +type IMarketServiceProviderStruct struct { + Internal struct { + ListenMarketEvent func(ctx context.Context, policy *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + ResponseMarketEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + } +} + +func (s *IMarketServiceProviderStruct) ListenMarketEvent(p0 context.Context, p1 *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenMarketEvent(p0, p1) +} +func (s *IMarketServiceProviderStruct) ResponseMarketEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseMarketEvent(p0, p1) +} + +type IMarketEventStruct struct { + IMarketClientStruct + IMarketServiceProviderStruct +} + +type IGatewayStruct struct { + IProofEventStruct + IWalletEventStruct + IMarketEventStruct + + Internal struct { + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *IGatewayStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} diff --git a/venus-shared/api/gateway/v1/wallet_event.go b/venus-shared/api/gateway/v1/wallet_event.go new file mode 100644 index 0000000000..6b3b00de17 --- /dev/null +++ b/venus-shared/api/gateway/v1/wallet_event.go @@ -0,0 +1,31 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IWalletEvent interface { + IWalletClient + IWalletServiceProvider +} + +type IWalletClient interface { + ListWalletInfo(ctx context.Context) ([]*gtypes.WalletDetail, error) //perm:admin + ListWalletInfoByWallet(ctx context.Context, wallet string) (*gtypes.WalletDetail, error) //perm:admin + WalletHas(ctx context.Context, account string, addr address.Address) (bool, error) //perm:admin + WalletSign(ctx context.Context, account string, addr address.Address, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) //perm:admin +} + +type IWalletServiceProvider interface { + ResponseWalletEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenWalletEvent(ctx context.Context, policy *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read + SupportNewAccount(ctx context.Context, channelID types.UUID, account string) error //perm:read + AddNewAddress(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error //perm:read + RemoveAddress(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error //perm:read +} diff --git a/venus-shared/api/gateway/v2/api.go b/venus-shared/api/gateway/v2/api.go new file mode 100644 index 0000000000..5119e730fa --- /dev/null +++ b/venus-shared/api/gateway/v2/api.go @@ -0,0 +1,13 @@ +package gateway + +import ( + "github.com/filecoin-project/venus/venus-shared/api" +) + +type IGateway interface { + IProofEvent + IWalletEvent + IMarketEvent + + api.Version +} diff --git a/venus-shared/api/gateway/v2/client_gen.go b/venus-shared/api/gateway/v2/client_gen.go new file mode 100644 index 0000000000..a245833b54 --- /dev/null +++ b/venus-shared/api/gateway/v2/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package gateway + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 2 +const APINamespace = "gateway.IGateway" +const MethodNamespace = "Gateway" + +// NewIGatewayRPC creates a new httpparse jsonrpc remotecli. +func NewIGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (IGateway, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res IGatewayStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialIGatewayRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialIGatewayRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (IGateway, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res IGatewayStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/gateway/v2/market_event.go b/venus-shared/api/gateway/v2/market_event.go new file mode 100644 index 0000000000..e01af80b2c --- /dev/null +++ b/venus-shared/api/gateway/v2/market_event.go @@ -0,0 +1,30 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IMarketEvent interface { + IMarketClient + IMarketServiceProvider +} + +type IMarketClient interface { + ListMarketConnectionsState(ctx context.Context) ([]gtypes.MarketConnectionState, error) //perm:admin + IsUnsealed(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize) (bool, error) //perm:admin + SectorsUnsealPiece(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize, dest string) error //perm:admin +} + +type IMarketServiceProvider interface { + ResponseMarketEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenMarketEvent(ctx context.Context, policy *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read +} diff --git a/venus-shared/api/gateway/v2/method.md b/venus-shared/api/gateway/v2/method.md new file mode 100644 index 0000000000..c65366faad --- /dev/null +++ b/venus-shared/api/gateway/v2/method.md @@ -0,0 +1,501 @@ +# Groups + +* [Gateway](#gateway) + * [Version](#version) +* [MarketClient](#marketclient) + * [IsUnsealed](#isunsealed) + * [ListMarketConnectionsState](#listmarketconnectionsstate) + * [SectorsUnsealPiece](#sectorsunsealpiece) +* [MarketServiceProvider](#marketserviceprovider) + * [ListenMarketEvent](#listenmarketevent) + * [ResponseMarketEvent](#responsemarketevent) +* [ProofClient](#proofclient) + * [ComputeProof](#computeproof) + * [ListConnectedMiners](#listconnectedminers) + * [ListMinerConnection](#listminerconnection) +* [ProofServiceProvider](#proofserviceprovider) + * [ListenProofEvent](#listenproofevent) + * [ResponseProofEvent](#responseproofevent) +* [WalletClient](#walletclient) + * [ListWalletInfo](#listwalletinfo) + * [ListWalletInfoByWallet](#listwalletinfobywallet) + * [WalletHas](#wallethas) + * [WalletSign](#walletsign) +* [WalletServiceProvider](#walletserviceprovider) + * [AddNewAddress](#addnewaddress) + * [ListenWalletEvent](#listenwalletevent) + * [RemoveAddress](#removeaddress) + * [ResponseWalletEvent](#responsewalletevent) + * [SupportNewAccount](#supportnewaccount) + +## Gateway + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + +## MarketClient + +### IsUnsealed + + +Perms: admin + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 10, + 1032 +] +``` + +Response: `true` + +### ListMarketConnectionsState + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Addr": "f01234", + "Conn": { + "Connections": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ], + "ConnectionCount": 123 + } + } +] +``` + +### SectorsUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 10, + 1032, + "string value" +] +``` + +Response: `{}` + +## MarketServiceProvider + +### ListenMarketEvent + + +Perms: read + +Inputs: +```json +[ + { + "Miner": "f01234" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### ResponseMarketEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +## ProofClient + +### ComputeProof + + +Perms: admin + +Inputs: +```json +[ + "f01234", + [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], + "Bw==", + 10101, + 17 +] +``` + +Response: +```json +[ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` + +### ListConnectedMiners + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### ListMinerConnection + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Connections": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ], + "ConnectionCount": 123 +} +``` + +## ProofServiceProvider + +### ListenProofEvent + + +Perms: read + +Inputs: +```json +[ + { + "MinerAddress": "f01234" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### ResponseProofEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +## WalletClient + +### ListWalletInfo + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Account": "string value", + "SupportAccounts": [ + "string value" + ], + "ConnectStates": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ] + } +] +``` + +### ListWalletInfoByWallet + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Account": "string value", + "SupportAccounts": [ + "string value" + ], + "ConnectStates": [ + { + "Addrs": [ + "f01234" + ], + "ChannelId": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Ip": "string value", + "RequestCount": 123, + "CreateTime": "0001-01-01T00:00:00Z" + } + ] +} +``` + +### WalletHas + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234" +] +``` + +Response: `true` + +### WalletSign + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": "message", + "Extra": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +## WalletServiceProvider + +### AddNewAddress + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + [ + "f01234" + ] +] +``` + +Response: `{}` + +### ListenWalletEvent + + +Perms: read + +Inputs: +```json +[ + { + "SupportAccounts": [ + "string value" + ], + "SignBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### RemoveAddress + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + [ + "f01234" + ] +] +``` + +Response: `{}` + +### ResponseWalletEvent + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +### SupportNewAccount + + +Perms: read + +Inputs: +```json +[ + "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "string value" +] +``` + +Response: `{}` + diff --git a/venus-shared/api/gateway/v2/mock/mock_igateway.go b/venus-shared/api/gateway/v2/mock/mock_igateway.go new file mode 100644 index 0000000000..4e4391e9e4 --- /dev/null +++ b/venus-shared/api/gateway/v2/mock/mock_igateway.go @@ -0,0 +1,337 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/gateway/v2 (interfaces: IGateway) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + network "github.com/filecoin-project/go-state-types/network" + proof "github.com/filecoin-project/go-state-types/proof" + storage "github.com/filecoin-project/specs-storage/storage" + types "github.com/filecoin-project/venus/venus-shared/types" + gateway "github.com/filecoin-project/venus/venus-shared/types/gateway" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockIGateway is a mock of IGateway interface. +type MockIGateway struct { + ctrl *gomock.Controller + recorder *MockIGatewayMockRecorder +} + +// MockIGatewayMockRecorder is the mock recorder for MockIGateway. +type MockIGatewayMockRecorder struct { + mock *MockIGateway +} + +// NewMockIGateway creates a new mock instance. +func NewMockIGateway(ctrl *gomock.Controller) *MockIGateway { + mock := &MockIGateway{ctrl: ctrl} + mock.recorder = &MockIGatewayMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIGateway) EXPECT() *MockIGatewayMockRecorder { + return m.recorder +} + +// AddNewAddress mocks base method. +func (m *MockIGateway) AddNewAddress(arg0 context.Context, arg1 types.UUID, arg2 []address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddNewAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddNewAddress indicates an expected call of AddNewAddress. +func (mr *MockIGatewayMockRecorder) AddNewAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewAddress", reflect.TypeOf((*MockIGateway)(nil).AddNewAddress), arg0, arg1, arg2) +} + +// ComputeProof mocks base method. +func (m *MockIGateway) ComputeProof(arg0 context.Context, arg1 address.Address, arg2 []proof.ExtendedSectorInfo, arg3 abi.PoStRandomness, arg4 abi.ChainEpoch, arg5 network.Version) ([]proof.PoStProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ComputeProof", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].([]proof.PoStProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ComputeProof indicates an expected call of ComputeProof. +func (mr *MockIGatewayMockRecorder) ComputeProof(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComputeProof", reflect.TypeOf((*MockIGateway)(nil).ComputeProof), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// IsUnsealed mocks base method. +func (m *MockIGateway) IsUnsealed(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 storage.SectorRef, arg4 types.PaddedByteIndex, arg5 abi.PaddedPieceSize) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsUnsealed indicates an expected call of IsUnsealed. +func (mr *MockIGatewayMockRecorder) IsUnsealed(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockIGateway)(nil).IsUnsealed), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// ListConnectedMiners mocks base method. +func (m *MockIGateway) ListConnectedMiners(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListConnectedMiners", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListConnectedMiners indicates an expected call of ListConnectedMiners. +func (mr *MockIGatewayMockRecorder) ListConnectedMiners(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListConnectedMiners", reflect.TypeOf((*MockIGateway)(nil).ListConnectedMiners), arg0) +} + +// ListMarketConnectionsState mocks base method. +func (m *MockIGateway) ListMarketConnectionsState(arg0 context.Context) ([]gateway.MarketConnectionState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMarketConnectionsState", arg0) + ret0, _ := ret[0].([]gateway.MarketConnectionState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMarketConnectionsState indicates an expected call of ListMarketConnectionsState. +func (mr *MockIGatewayMockRecorder) ListMarketConnectionsState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMarketConnectionsState", reflect.TypeOf((*MockIGateway)(nil).ListMarketConnectionsState), arg0) +} + +// ListMinerConnection mocks base method. +func (m *MockIGateway) ListMinerConnection(arg0 context.Context, arg1 address.Address) (*gateway.MinerState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMinerConnection", arg0, arg1) + ret0, _ := ret[0].(*gateway.MinerState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMinerConnection indicates an expected call of ListMinerConnection. +func (mr *MockIGatewayMockRecorder) ListMinerConnection(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMinerConnection", reflect.TypeOf((*MockIGateway)(nil).ListMinerConnection), arg0, arg1) +} + +// ListWalletInfo mocks base method. +func (m *MockIGateway) ListWalletInfo(arg0 context.Context) ([]*gateway.WalletDetail, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWalletInfo", arg0) + ret0, _ := ret[0].([]*gateway.WalletDetail) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWalletInfo indicates an expected call of ListWalletInfo. +func (mr *MockIGatewayMockRecorder) ListWalletInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWalletInfo", reflect.TypeOf((*MockIGateway)(nil).ListWalletInfo), arg0) +} + +// ListWalletInfoByWallet mocks base method. +func (m *MockIGateway) ListWalletInfoByWallet(arg0 context.Context, arg1 string) (*gateway.WalletDetail, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWalletInfoByWallet", arg0, arg1) + ret0, _ := ret[0].(*gateway.WalletDetail) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWalletInfoByWallet indicates an expected call of ListWalletInfoByWallet. +func (mr *MockIGatewayMockRecorder) ListWalletInfoByWallet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWalletInfoByWallet", reflect.TypeOf((*MockIGateway)(nil).ListWalletInfoByWallet), arg0, arg1) +} + +// ListenMarketEvent mocks base method. +func (m *MockIGateway) ListenMarketEvent(arg0 context.Context, arg1 *gateway.MarketRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenMarketEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenMarketEvent indicates an expected call of ListenMarketEvent. +func (mr *MockIGatewayMockRecorder) ListenMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenMarketEvent", reflect.TypeOf((*MockIGateway)(nil).ListenMarketEvent), arg0, arg1) +} + +// ListenProofEvent mocks base method. +func (m *MockIGateway) ListenProofEvent(arg0 context.Context, arg1 *gateway.ProofRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenProofEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenProofEvent indicates an expected call of ListenProofEvent. +func (mr *MockIGatewayMockRecorder) ListenProofEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenProofEvent", reflect.TypeOf((*MockIGateway)(nil).ListenProofEvent), arg0, arg1) +} + +// ListenWalletEvent mocks base method. +func (m *MockIGateway) ListenWalletEvent(arg0 context.Context, arg1 *gateway.WalletRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenWalletEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenWalletEvent indicates an expected call of ListenWalletEvent. +func (mr *MockIGatewayMockRecorder) ListenWalletEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenWalletEvent", reflect.TypeOf((*MockIGateway)(nil).ListenWalletEvent), arg0, arg1) +} + +// RemoveAddress mocks base method. +func (m *MockIGateway) RemoveAddress(arg0 context.Context, arg1 types.UUID, arg2 []address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveAddress indicates an expected call of RemoveAddress. +func (mr *MockIGatewayMockRecorder) RemoveAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAddress", reflect.TypeOf((*MockIGateway)(nil).RemoveAddress), arg0, arg1, arg2) +} + +// ResponseMarketEvent mocks base method. +func (m *MockIGateway) ResponseMarketEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseMarketEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseMarketEvent indicates an expected call of ResponseMarketEvent. +func (mr *MockIGatewayMockRecorder) ResponseMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseMarketEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseMarketEvent), arg0, arg1) +} + +// ResponseProofEvent mocks base method. +func (m *MockIGateway) ResponseProofEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseProofEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseProofEvent indicates an expected call of ResponseProofEvent. +func (mr *MockIGatewayMockRecorder) ResponseProofEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseProofEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseProofEvent), arg0, arg1) +} + +// ResponseWalletEvent mocks base method. +func (m *MockIGateway) ResponseWalletEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseWalletEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseWalletEvent indicates an expected call of ResponseWalletEvent. +func (mr *MockIGatewayMockRecorder) ResponseWalletEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseWalletEvent", reflect.TypeOf((*MockIGateway)(nil).ResponseWalletEvent), arg0, arg1) +} + +// SectorsUnsealPiece mocks base method. +func (m *MockIGateway) SectorsUnsealPiece(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 storage.SectorRef, arg4 types.PaddedByteIndex, arg5 abi.PaddedPieceSize, arg6 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SectorsUnsealPiece", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(error) + return ret0 +} + +// SectorsUnsealPiece indicates an expected call of SectorsUnsealPiece. +func (mr *MockIGatewayMockRecorder) SectorsUnsealPiece(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SectorsUnsealPiece", reflect.TypeOf((*MockIGateway)(nil).SectorsUnsealPiece), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// SupportNewAccount mocks base method. +func (m *MockIGateway) SupportNewAccount(arg0 context.Context, arg1 types.UUID, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SupportNewAccount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SupportNewAccount indicates an expected call of SupportNewAccount. +func (mr *MockIGatewayMockRecorder) SupportNewAccount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportNewAccount", reflect.TypeOf((*MockIGateway)(nil).SupportNewAccount), arg0, arg1, arg2) +} + +// Version mocks base method. +func (m *MockIGateway) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockIGatewayMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockIGateway)(nil).Version), arg0) +} + +// WalletHas mocks base method. +func (m *MockIGateway) WalletHas(arg0 context.Context, arg1 address.Address, arg2 []string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockIGatewayMockRecorder) WalletHas(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockIGateway)(nil).WalletHas), arg0, arg1, arg2) +} + +// WalletSign mocks base method. +func (m *MockIGateway) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []string, arg3 []byte, arg4 types.MsgMeta) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockIGatewayMockRecorder) WalletSign(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockIGateway)(nil).WalletSign), arg0, arg1, arg2, arg3, arg4) +} diff --git a/venus-shared/api/gateway/v2/proof_event.go b/venus-shared/api/gateway/v2/proof_event.go new file mode 100644 index 0000000000..3b60fb7199 --- /dev/null +++ b/venus-shared/api/gateway/v2/proof_event.go @@ -0,0 +1,28 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IProofEvent interface { + IProofClient + IProofServiceProvider +} + +type IProofClient interface { + ListConnectedMiners(ctx context.Context) ([]address.Address, error) //perm:admin + ListMinerConnection(ctx context.Context, addr address.Address) (*gtypes.MinerState, error) //perm:admin + ComputeProof(ctx context.Context, miner address.Address, sectorInfos []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, height abi.ChainEpoch, nwVersion network.Version) ([]builtin.PoStProof, error) //perm:admin +} + +type IProofServiceProvider interface { + ResponseProofEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenProofEvent(ctx context.Context, policy *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read +} diff --git a/venus-shared/api/gateway/v2/proxy_gen.go b/venus-shared/api/gateway/v2/proxy_gen.go new file mode 100644 index 0000000000..b89a4087c0 --- /dev/null +++ b/venus-shared/api/gateway/v2/proxy_gen.go @@ -0,0 +1,158 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package gateway + +import ( + "context" + + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/specs-storage/storage" + cid "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IProofClientStruct struct { + Internal struct { + ComputeProof func(ctx context.Context, miner address.Address, sectorInfos []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, height abi.ChainEpoch, nwVersion network.Version) ([]builtin.PoStProof, error) `perm:"admin"` + ListConnectedMiners func(ctx context.Context) ([]address.Address, error) `perm:"admin"` + ListMinerConnection func(ctx context.Context, addr address.Address) (*gtypes.MinerState, error) `perm:"admin"` + } +} + +func (s *IProofClientStruct) ComputeProof(p0 context.Context, p1 address.Address, p2 []builtin.ExtendedSectorInfo, p3 abi.PoStRandomness, p4 abi.ChainEpoch, p5 network.Version) ([]builtin.PoStProof, error) { + return s.Internal.ComputeProof(p0, p1, p2, p3, p4, p5) +} +func (s *IProofClientStruct) ListConnectedMiners(p0 context.Context) ([]address.Address, error) { + return s.Internal.ListConnectedMiners(p0) +} +func (s *IProofClientStruct) ListMinerConnection(p0 context.Context, p1 address.Address) (*gtypes.MinerState, error) { + return s.Internal.ListMinerConnection(p0, p1) +} + +type IProofServiceProviderStruct struct { + Internal struct { + ListenProofEvent func(ctx context.Context, policy *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + ResponseProofEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + } +} + +func (s *IProofServiceProviderStruct) ListenProofEvent(p0 context.Context, p1 *gtypes.ProofRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenProofEvent(p0, p1) +} +func (s *IProofServiceProviderStruct) ResponseProofEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseProofEvent(p0, p1) +} + +type IProofEventStruct struct { + IProofClientStruct + IProofServiceProviderStruct +} + +type IWalletClientStruct struct { + Internal struct { + ListWalletInfo func(ctx context.Context) ([]*gtypes.WalletDetail, error) `perm:"admin"` + ListWalletInfoByWallet func(ctx context.Context, wallet string) (*gtypes.WalletDetail, error) `perm:"admin"` + WalletHas func(ctx context.Context, addr address.Address, accounts []string) (bool, error) `perm:"admin"` + WalletSign func(ctx context.Context, addr address.Address, accounts []string, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) `perm:"admin"` + } +} + +func (s *IWalletClientStruct) ListWalletInfo(p0 context.Context) ([]*gtypes.WalletDetail, error) { + return s.Internal.ListWalletInfo(p0) +} +func (s *IWalletClientStruct) ListWalletInfoByWallet(p0 context.Context, p1 string) (*gtypes.WalletDetail, error) { + return s.Internal.ListWalletInfoByWallet(p0, p1) +} +func (s *IWalletClientStruct) WalletHas(p0 context.Context, p1 address.Address, p2 []string) (bool, error) { + return s.Internal.WalletHas(p0, p1, p2) +} +func (s *IWalletClientStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []string, p3 []byte, p4 types.MsgMeta) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2, p3, p4) +} + +type IWalletServiceProviderStruct struct { + Internal struct { + AddNewAddress func(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error `perm:"read"` + ListenWalletEvent func(ctx context.Context, policy *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + RemoveAddress func(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error `perm:"read"` + ResponseWalletEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + SupportNewAccount func(ctx context.Context, channelID types.UUID, account string) error `perm:"read"` + } +} + +func (s *IWalletServiceProviderStruct) AddNewAddress(p0 context.Context, p1 types.UUID, p2 []address.Address) error { + return s.Internal.AddNewAddress(p0, p1, p2) +} +func (s *IWalletServiceProviderStruct) ListenWalletEvent(p0 context.Context, p1 *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenWalletEvent(p0, p1) +} +func (s *IWalletServiceProviderStruct) RemoveAddress(p0 context.Context, p1 types.UUID, p2 []address.Address) error { + return s.Internal.RemoveAddress(p0, p1, p2) +} +func (s *IWalletServiceProviderStruct) ResponseWalletEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseWalletEvent(p0, p1) +} +func (s *IWalletServiceProviderStruct) SupportNewAccount(p0 context.Context, p1 types.UUID, p2 string) error { + return s.Internal.SupportNewAccount(p0, p1, p2) +} + +type IWalletEventStruct struct { + IWalletClientStruct + IWalletServiceProviderStruct +} + +type IMarketClientStruct struct { + Internal struct { + IsUnsealed func(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize) (bool, error) `perm:"admin"` + ListMarketConnectionsState func(ctx context.Context) ([]gtypes.MarketConnectionState, error) `perm:"admin"` + SectorsUnsealPiece func(ctx context.Context, miner address.Address, pieceCid cid.Cid, sector storage.SectorRef, offset types.PaddedByteIndex, size abi.PaddedPieceSize, dest string) error `perm:"admin"` + } +} + +func (s *IMarketClientStruct) IsUnsealed(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 storage.SectorRef, p4 types.PaddedByteIndex, p5 abi.PaddedPieceSize) (bool, error) { + return s.Internal.IsUnsealed(p0, p1, p2, p3, p4, p5) +} +func (s *IMarketClientStruct) ListMarketConnectionsState(p0 context.Context) ([]gtypes.MarketConnectionState, error) { + return s.Internal.ListMarketConnectionsState(p0) +} +func (s *IMarketClientStruct) SectorsUnsealPiece(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 storage.SectorRef, p4 types.PaddedByteIndex, p5 abi.PaddedPieceSize, p6 string) error { + return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5, p6) +} + +type IMarketServiceProviderStruct struct { + Internal struct { + ListenMarketEvent func(ctx context.Context, policy *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) `perm:"read"` + ResponseMarketEvent func(ctx context.Context, resp *gtypes.ResponseEvent) error `perm:"read"` + } +} + +func (s *IMarketServiceProviderStruct) ListenMarketEvent(p0 context.Context, p1 *gtypes.MarketRegisterPolicy) (<-chan *gtypes.RequestEvent, error) { + return s.Internal.ListenMarketEvent(p0, p1) +} +func (s *IMarketServiceProviderStruct) ResponseMarketEvent(p0 context.Context, p1 *gtypes.ResponseEvent) error { + return s.Internal.ResponseMarketEvent(p0, p1) +} + +type IMarketEventStruct struct { + IMarketClientStruct + IMarketServiceProviderStruct +} + +type IGatewayStruct struct { + IProofEventStruct + IWalletEventStruct + IMarketEventStruct + + Internal struct { + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *IGatewayStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} diff --git a/venus-shared/api/gateway/v2/wallet_event.go b/venus-shared/api/gateway/v2/wallet_event.go new file mode 100644 index 0000000000..4e4a19428e --- /dev/null +++ b/venus-shared/api/gateway/v2/wallet_event.go @@ -0,0 +1,31 @@ +package gateway + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/venus-shared/types" + gtypes "github.com/filecoin-project/venus/venus-shared/types/gateway" +) + +type IWalletEvent interface { + IWalletClient + IWalletServiceProvider +} + +type IWalletClient interface { + ListWalletInfo(ctx context.Context) ([]*gtypes.WalletDetail, error) //perm:admin + ListWalletInfoByWallet(ctx context.Context, wallet string) (*gtypes.WalletDetail, error) //perm:admin + WalletHas(ctx context.Context, addr address.Address, accounts []string) (bool, error) //perm:admin + WalletSign(ctx context.Context, addr address.Address, accounts []string, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) //perm:admin +} + +type IWalletServiceProvider interface { + ResponseWalletEvent(ctx context.Context, resp *gtypes.ResponseEvent) error //perm:read + ListenWalletEvent(ctx context.Context, policy *gtypes.WalletRegisterPolicy) (<-chan *gtypes.RequestEvent, error) //perm:read + SupportNewAccount(ctx context.Context, channelID types.UUID, account string) error //perm:read + AddNewAddress(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error //perm:read + RemoveAddress(ctx context.Context, channelID types.UUID, newAddrs []address.Address) error //perm:read +} diff --git a/venus-shared/api/market/api.go b/venus-shared/api/market/api.go new file mode 100644 index 0000000000..0c0f2439e8 --- /dev/null +++ b/venus-shared/api/market/api.go @@ -0,0 +1,184 @@ +package market + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/venus/venus-shared/api" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/gateway" + "github.com/filecoin-project/venus/venus-shared/types/market" +) + +type IMarket interface { + ActorList(context.Context) ([]market.User, error) //perm:read + ActorExist(ctx context.Context, addr address.Address) (bool, error) //perm:read + ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read + + MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write + MarketImportPublishedDeal(ctx context.Context, deal market.MinerDeal) error //perm:write + MarketListDeals(ctx context.Context, addrs []address.Address) ([]*types.MarketDeal, error) //perm:read + MarketListRetrievalDeals(ctx context.Context) ([]market.ProviderDealState, error) //perm:read + MarketGetDealUpdates(ctx context.Context) (<-chan market.MinerDeal, error) //perm:read + MarketListIncompleteDeals(ctx context.Context, mAddr address.Address) ([]market.MinerDeal, error) //perm:read + MarketSetAsk(ctx context.Context, mAddr address.Address, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin + MarketGetAsk(ctx context.Context, mAddr address.Address) (*market.SignedStorageAsk, error) //perm:read + MarketListAsk(ctx context.Context) ([]*market.SignedStorageAsk, error) //perm:read + MarketSetRetrievalAsk(ctx context.Context, mAddr address.Address, rask *retrievalmarket.Ask) error //perm:admin + MarketGetRetrievalAsk(ctx context.Context, mAddr address.Address) (*retrievalmarket.Ask, error) //perm:read + MarketListRetrievalAsk(ctx context.Context) ([]*market.RetrievalAsk, error) //perm:read + MarketListDataTransfers(ctx context.Context) ([]market.DataTransferChannel, error) //perm:write + MarketDataTransferUpdates(ctx context.Context) (<-chan market.DataTransferChannel, error) //perm:write + // MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + // MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + MarketPendingDeals(ctx context.Context) ([]market.PendingDealInfo, error) //perm:write + MarketPublishPendingDeals(ctx context.Context) error //perm:admin + + PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read + PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read + PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read + PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read + + DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin + OfflineDealImport(ctx context.Context, deal market.MinerDeal) error //perm:admin + + DealsConsiderOnlineStorageDeals(context.Context, address.Address) (bool, error) //perm:read + DealsSetConsiderOnlineStorageDeals(context.Context, address.Address, bool) error //perm:write + DealsConsiderOnlineRetrievalDeals(context.Context, address.Address) (bool, error) //perm:read + DealsSetConsiderOnlineRetrievalDeals(context.Context, address.Address, bool) error //perm:write + DealsPieceCidBlocklist(context.Context, address.Address) ([]cid.Cid, error) //perm:read + DealsSetPieceCidBlocklist(context.Context, address.Address, []cid.Cid) error //perm:write + DealsConsiderOfflineStorageDeals(context.Context, address.Address) (bool, error) //perm:read + DealsSetConsiderOfflineStorageDeals(context.Context, address.Address, bool) error //perm:write + DealsConsiderOfflineRetrievalDeals(context.Context, address.Address) (bool, error) //perm:read + DealsSetConsiderOfflineRetrievalDeals(context.Context, address.Address, bool) error //perm:write + DealsConsiderVerifiedStorageDeals(context.Context, address.Address) (bool, error) //perm:read + DealsSetConsiderVerifiedStorageDeals(context.Context, address.Address, bool) error //perm:write + DealsConsiderUnverifiedStorageDeals(context.Context, address.Address) (bool, error) //perm:read + DealsSetConsiderUnverifiedStorageDeals(context.Context, address.Address, bool) error //perm:write + // SectorGetExpectedSealDuration gets the time that a newly-created sector + // waits for more deals before it starts sealing + SectorGetExpectedSealDuration(context.Context, address.Address) (time.Duration, error) //perm:read + // SectorSetExpectedSealDuration sets the expected time for a sector to seal + SectorSetExpectedSealDuration(context.Context, address.Address, time.Duration) error //perm:write + DealsMaxStartDelay(context.Context, address.Address) (time.Duration, error) //perm:read + DealsSetMaxStartDelay(context.Context, address.Address, time.Duration) error //perm:write + MarketDataTransferPath(context.Context, address.Address) (string, error) //perm:admin + MarketSetDataTransferPath(context.Context, address.Address, string) error //perm:admin + DealsPublishMsgPeriod(context.Context, address.Address) (time.Duration, error) //perm:read + DealsSetPublishMsgPeriod(context.Context, address.Address, time.Duration) error //perm:write + MarketMaxDealsPerPublishMsg(context.Context, address.Address) (uint64, error) //perm:read + MarketSetMaxDealsPerPublishMsg(context.Context, address.Address, uint64) error //perm:write + DealsMaxProviderCollateralMultiplier(context.Context, address.Address) (uint64, error) //perm:read + DealsSetMaxProviderCollateralMultiplier(context.Context, address.Address, uint64) error //perm:write + DealsMaxPublishFee(context.Context, address.Address) (types.FIL, error) //perm:read + DealsSetMaxPublishFee(context.Context, address.Address, types.FIL) error //perm:write + MarketMaxBalanceAddFee(context.Context, address.Address) (types.FIL, error) //perm:read + MarketSetMaxBalanceAddFee(context.Context, address.Address, types.FIL) error //perm:write + + // messager + MessagerWaitMessage(ctx context.Context, mid cid.Cid) (*types.MsgLookup, error) //perm:read + MessagerPushMessage(ctx context.Context, msg *types.Message, meta *types.MessageSendSpec) (cid.Cid, error) //perm:write + MessagerGetMessage(ctx context.Context, mid cid.Cid) (*types.Message, error) //perm:read + + MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign + MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign + MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + + NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read + ID(context.Context) (peer.ID, error) //perm:read + + // DagstoreListShards returns information about all shards known to the + // DAG store. Only available on nodes running the markets subsystem. + DagstoreListShards(ctx context.Context) ([]market.DagstoreShardInfo, error) //perm:admin + + // DagstoreInitializeShard initializes an uninitialized shard. + // + // Initialization consists of fetching the shard's data (deal payload) from + // the storage subsystem, generating an index, and persisting the index + // to facilitate later retrievals, and/or to publish to external sources. + // + // This operation is intended to complement the initial migration. The + // migration registers a shard for every unique piece CID, with lazy + // initialization. Thus, shards are not initialized immediately to avoid + // IO activity competing with proving. Instead, shard are initialized + // when first accessed. This method forces the initialization of a shard by + // accessing it and immediately releasing it. This is useful to warm up the + // cache to facilitate subsequent retrievals, and to generate the indexes + // to publish them externally. + // + // This operation fails if the shard is not in ShardStateNew state. + // It blocks until initialization finishes. + DagstoreInitializeShard(ctx context.Context, key string) error //perm:admin + + // DagstoreRecoverShard attempts to recover a failed shard. + // + // This operation fails if the shard is not in ShardStateErrored state. + // It blocks until recovery finishes. If recovery failed, it returns the + // error. + DagstoreRecoverShard(ctx context.Context, key string) error //perm:admin + + // DagstoreInitializeAll initializes all uninitialized shards in bulk, + // according to the policy passed in the parameters. + // + // It is recommended to set a maximum concurrency to avoid extreme + // IO pressure if the storage subsystem has a large amount of deals. + // + // It returns a stream of events to report progress. + DagstoreInitializeAll(ctx context.Context, params market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) //perm:admin + + // DagstoreInitializeStorage initializes all pieces in specify storage + DagstoreInitializeStorage(context.Context, string, market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) //perm:admin + + // DagstoreGC runs garbage collection on the DAG store. + DagstoreGC(ctx context.Context) ([]market.DagstoreShardResult, error) //perm:admin + + MarkDealsAsPacking(ctx context.Context, miner address.Address, deals []abi.DealID) error //perm:write + UpdateDealOnPacking(ctx context.Context, miner address.Address, dealID abi.DealID, sectorid abi.SectorNumber, offset abi.PaddedPieceSize) error //perm:write + UpdateDealStatus(ctx context.Context, miner address.Address, dealID abi.DealID, pieceStatus market.PieceStatus) error //perm:write + GetDeals(ctx context.Context, miner address.Address, pageIndex, pageSize int) ([]*market.DealInfo, error) //perm:read + AssignUnPackedDeals(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, spec *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) //perm:write + GetUnPackedDeals(ctx context.Context, miner address.Address, spec *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) //perm:read + UpdateStorageDealStatus(ctx context.Context, dealProposalCid cid.Cid, state storagemarket.StorageDealStatus, pieceState market.PieceStatus) error //perm:write + // market event + ResponseMarketEvent(ctx context.Context, resp *gateway.ResponseEvent) error //perm:read + ListenMarketEvent(ctx context.Context, policy *gateway.MarketRegisterPolicy) (<-chan *gateway.RequestEvent, error) //perm:read + + // Paych + PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) //perm:read + + ImportV1Data(ctx context.Context, src string) error //perm:write + + AddFsPieceStorage(ctx context.Context, name string, path string, readonly bool) error //perm:admin + + AddS3PieceStorage(ctx context.Context, name, endpoit, bucket, subdir, accessKey, secretKey, token string, readonly bool) error //perm:admin + + RemovePieceStorage(ctx context.Context, name string) error //perm:admin + + ListPieceStorageInfos(ctx context.Context) market.PieceStorageInfos //perm:read + + // GetStorageDealStatistic get storage deal statistic information + // if set miner address to address.Undef, return all storage deal info + GetStorageDealStatistic(ctx context.Context, miner address.Address) (*market.StorageDealStatistic, error) //perm:read + + // GetRetrievalDealStatistic get retrieval deal statistic information + // todo address undefined is invalid, it is currently not possible to directly associate an order with a miner + GetRetrievalDealStatistic(ctx context.Context, miner address.Address) (*market.RetrievalDealStatistic, error) //perm:read + + api.Version +} diff --git a/venus-shared/api/market/client/api.go b/venus-shared/api/market/client/api.go new file mode 100644 index 0000000000..d638314945 --- /dev/null +++ b/venus-shared/api/market/client/api.go @@ -0,0 +1,93 @@ +package client + +import ( + "context" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/api" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/market" + "github.com/filecoin-project/venus/venus-shared/types/market/client" +) + +type IMarketClient interface { + // ClientImport imports file under the specified path into filestore. + ClientImport(ctx context.Context, ref client.FileRef) (*client.ImportRes, error) //perm:admin + // ClientRemoveImport removes file import + ClientRemoveImport(ctx context.Context, importID client.ImportID) error //perm:admin + // ClientStartDeal proposes a deal with a miner. + ClientStartDeal(ctx context.Context, params *client.StartDealParams) (*cid.Cid, error) //perm:admin + // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + ClientStatelessDeal(ctx context.Context, params *client.StartDealParams) (*cid.Cid, error) //perm:write + // ClientGetDealInfo returns the latest information about a given deal. + ClientGetDealInfo(context.Context, cid.Cid) (*client.DealInfo, error) //perm:read + // ClientListDeals returns information about the deals made by the local client. + ClientListDeals(ctx context.Context) ([]client.DealInfo, error) //perm:write + // ClientGetDealUpdates returns the status of updated deals + ClientGetDealUpdates(ctx context.Context) (<-chan client.DealInfo, error) //perm:write + // ClientGetDealStatus returns status given a code + ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read + // ClientHasLocal indicates whether a certain CID is locally stored. + ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write + // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). + ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]client.QueryOffer, error) //perm:read + // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. + ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (client.QueryOffer, error) //perm:read + // ClientRetrieve initiates the retrieval of a file, as specified in the order. + ClientRetrieve(ctx context.Context, params client.RetrievalOrder) (*client.RestrievalRes, error) //perm:admin + // ClientRetrieveWait waits for retrieval to be complete + ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin + // ClientExport exports a file stored in the local filestore to a system file + ClientExport(ctx context.Context, exportRef client.ExportRef, fileRef client.FileRef) error //perm:admin + ClientListRetrievals(ctx context.Context) ([]client.RetrievalInfo, error) //perm:write + // ClientGetRetrievalUpdates returns status of updated retrieval deals + ClientGetRetrievalUpdates(ctx context.Context) (<-chan client.RetrievalInfo, error) //perm:write + // ClientQueryAsk returns a signed StorageAsk from the specified miner. + ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read + // ClientCalcCommP calculates the CommP and data size of the specified CID + ClientDealPieceCID(ctx context.Context, root cid.Cid) (client.DataCIDSize, error) //perm:read + // ClientCalcCommP calculates the CommP for a specified file + ClientCalcCommP(ctx context.Context, inpath string) (*client.CommPRet, error) //perm:write + // ClientGenCar generates a CAR file for the specified file. + ClientGenCar(ctx context.Context, ref client.FileRef, outpath string) error //perm:write + // ClientDealSize calculates real deal data size + ClientDealSize(ctx context.Context, root cid.Cid) (client.DataSize, error) //perm:read + // ClientListTransfers returns the status of all ongoing transfers of data + ClientListDataTransfers(ctx context.Context) ([]market.DataTransferChannel, error) //perm:write + ClientDataTransferUpdates(ctx context.Context) (<-chan market.DataTransferChannel, error) //perm:write + // ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel + // which are stuck due to insufficient funds + ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write + + // ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID + ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write + + // ClientUnimport removes references to the specified file from filestore + // ClientUnimport(path string) + + // ClientListImports lists imported files and their root CIDs + ClientListImports(ctx context.Context) ([]client.Import, error) //perm:write + DefaultAddress(ctx context.Context) (address.Address, error) //perm:read + + MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:write + MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:read + MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:write + MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:write + MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:write + + MessagerWaitMessage(ctx context.Context, mid cid.Cid) (*types.MsgLookup, error) //perm:read + MessagerPushMessage(ctx context.Context, msg *types.Message, meta *types.MessageSendSpec) (cid.Cid, error) //perm:write + MessagerGetMessage(ctx context.Context, mid cid.Cid) (*types.Message, error) //perm:read + + api.Version +} diff --git a/venus-shared/api/market/client/client_gen.go b/venus-shared/api/market/client/client_gen.go new file mode 100644 index 0000000000..58b8b06eb3 --- /dev/null +++ b/venus-shared/api/market/client/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package client + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 0 +const APINamespace = "client.IMarketClient" +const MethodNamespace = "VENUS_MARKET_CLIENT" + +// NewIMarketClientRPC creates a new httpparse jsonrpc remotecli. +func NewIMarketClientRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (IMarketClient, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res IMarketClientStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialIMarketClientRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialIMarketClientRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (IMarketClient, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res IMarketClientStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/market/client/method.md b/venus-shared/api/market/client/method.md new file mode 100644 index 0000000000..df16d9c3b5 --- /dev/null +++ b/venus-shared/api/market/client/method.md @@ -0,0 +1,1266 @@ +# Groups + +* [MarketClient](#marketclient) + * [ClientCalcCommP](#clientcalccommp) + * [ClientCancelDataTransfer](#clientcanceldatatransfer) + * [ClientCancelRetrievalDeal](#clientcancelretrievaldeal) + * [ClientDataTransferUpdates](#clientdatatransferupdates) + * [ClientDealPieceCID](#clientdealpiececid) + * [ClientDealSize](#clientdealsize) + * [ClientExport](#clientexport) + * [ClientFindData](#clientfinddata) + * [ClientGenCar](#clientgencar) + * [ClientGetDealInfo](#clientgetdealinfo) + * [ClientGetDealStatus](#clientgetdealstatus) + * [ClientGetDealUpdates](#clientgetdealupdates) + * [ClientGetRetrievalUpdates](#clientgetretrievalupdates) + * [ClientHasLocal](#clienthaslocal) + * [ClientImport](#clientimport) + * [ClientListDataTransfers](#clientlistdatatransfers) + * [ClientListDeals](#clientlistdeals) + * [ClientListImports](#clientlistimports) + * [ClientListRetrievals](#clientlistretrievals) + * [ClientMinerQueryOffer](#clientminerqueryoffer) + * [ClientQueryAsk](#clientqueryask) + * [ClientRemoveImport](#clientremoveimport) + * [ClientRestartDataTransfer](#clientrestartdatatransfer) + * [ClientRetrieve](#clientretrieve) + * [ClientRetrieveTryRestartInsufficientFunds](#clientretrievetryrestartinsufficientfunds) + * [ClientRetrieveWait](#clientretrievewait) + * [ClientStartDeal](#clientstartdeal) + * [ClientStatelessDeal](#clientstatelessdeal) + * [DefaultAddress](#defaultaddress) + * [MarketAddBalance](#marketaddbalance) + * [MarketGetReserved](#marketgetreserved) + * [MarketReleaseFunds](#marketreleasefunds) + * [MarketReserveFunds](#marketreservefunds) + * [MarketWithdraw](#marketwithdraw) + * [MessagerGetMessage](#messagergetmessage) + * [MessagerPushMessage](#messagerpushmessage) + * [MessagerWaitMessage](#messagerwaitmessage) + * [Version](#version) + +## MarketClient + +### ClientCalcCommP +ClientCalcCommP calculates the CommP for a specified file + + +Perms: write + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1024 +} +``` + +### ClientCancelDataTransfer +ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### ClientCancelRetrievalDeal +ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID + + +Perms: write + +Inputs: +```json +[ + 5 +] +``` + +Response: `{}` + +### ClientDataTransferUpdates + + +Perms: write + +Inputs: `[]` + +Response: +```json +{ + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } +} +``` + +### ClientDealPieceCID +ClientCalcCommP calculates the CommP and data size of the specified CID + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PayloadSize": 9, + "PieceSize": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + +### ClientDealSize +ClientDealSize calculates real deal data size + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PayloadSize": 9, + "PieceSize": 1032 +} +``` + +### ClientExport +ClientExport exports a file stored in the local filestore to a system file + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DAGs": [ + { + "DataSelector": "/ipld/a/b/c", + "ExportMerkleProof": true + } + ], + "FromLocalCAR": "string value", + "DealID": 5 + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: `{}` + +### ClientFindData +ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + null +] +``` + +Response: +```json +[ + { + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + } +] +``` + +### ClientGenCar +ClientGenCar generates a CAR file for the specified file. + + +Perms: write + +Inputs: +```json +[ + { + "Path": "string value", + "IsCAR": true + }, + "string value" +] +``` + +Response: `{}` + +### ClientGetDealInfo +ClientGetDealInfo returns the latest information about a given deal. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +} +``` + +### ClientGetDealStatus +ClientGetDealStatus returns status given a code + + +Perms: read + +Inputs: +```json +[ + 42 +] +``` + +Response: `"string value"` + +### ClientGetDealUpdates +ClientGetDealUpdates returns the status of updated deals + + +Perms: write + +Inputs: `[]` + +Response: +```json +{ + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +} +``` + +### ClientGetRetrievalUpdates +ClientGetRetrievalUpdates returns status of updated retrieval deals + + +Perms: write + +Inputs: `[]` + +Response: +```json +{ + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Event": 5 +} +``` + +### ClientHasLocal +ClientHasLocal indicates whether a certain CID is locally stored. + + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `true` + +### ClientImport +ClientImport imports file under the specified path into filestore. + + +Perms: admin + +Inputs: +```json +[ + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ImportID": 1234 +} +``` + +### ClientListDataTransfers +ClientListTransfers returns the status of all ongoing transfers of data + + +Perms: write + +Inputs: `[]` + +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` + +### ClientListDeals +ClientListDeals returns information about the deals made by the local client. + + +Perms: write + +Inputs: `[]` + +Response: +```json +[ + { + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } + } +] +``` + +### ClientListImports +ClientUnimport removes references to the specified file from filestore +ClientUnimport(path string) + + +Perms: write + +Inputs: `[]` + +Response: +```json +[ + { + "Key": 1234, + "Err": "string value", + "Root": null, + "Source": "string value", + "FilePath": "string value", + "CARPath": "string value" + } +] +``` + +### ClientListRetrievals + + +Perms: write + +Inputs: `[]` + +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Event": 5 + } +] +``` + +### ClientMinerQueryOffer +ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + null +] +``` + +Response: +```json +{ + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } +} +``` + +### ClientQueryAsk +ClientQueryAsk returns a signed StorageAsk from the specified miner. + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "f01234" +] +``` + +Response: +```json +{ + "Price": "0", + "VerifiedPrice": "0", + "MinPieceSize": 1032, + "MaxPieceSize": 1032, + "Miner": "f01234", + "Timestamp": 10101, + "Expiry": 10101, + "SeqNo": 42 +} +``` + +### ClientRemoveImport +ClientRemoveImport removes file import + + +Perms: admin + +Inputs: +```json +[ + 1234 +] +``` + +Response: `{}` + +### ClientRestartDataTransfer +ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### ClientRetrieve +ClientRetrieve initiates the retrieval of a file, as specified in the order. + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "DataSelector": "/ipld/a/b/c", + "Size": 42, + "Total": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Client": "f01234", + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + }, + "RemoteStore": "102334ec-35a3-4b36-be9f-02883844503a" + } +] +``` + +Response: +```json +{ + "DealID": 5 +} +``` + +### ClientRetrieveTryRestartInsufficientFunds +ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel +which are stuck due to insufficient funds + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### ClientRetrieveWait +ClientRetrieveWait waits for retrieval to be complete + + +Perms: admin + +Inputs: +```json +[ + 5 +] +``` + +Response: `{}` + +### ClientStartDeal +ClientStartDeal proposes a deal with a miner. + + +Perms: admin + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "Wallet": "f01234", + "Miner": "f01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + +### ClientStatelessDeal +ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + + +Perms: write + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "Wallet": "f01234", + "Miner": "f01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + +### DefaultAddress + + +Perms: read + +Inputs: `[]` + +Response: `"f01234"` + +### MarketAddBalance + + +Perms: write + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketGetReserved + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + +### MarketReleaseFunds + + +Perms: write + +Inputs: +```json +[ + "f01234", + "0" +] +``` + +Response: `{}` + +### MarketReserveFunds + + +Perms: write + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketWithdraw + + +Perms: write + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MessagerGetMessage + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +### MessagerPushMessage + + +Perms: write + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MessagerWaitMessage + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + diff --git a/venus-shared/api/market/client/mock/mock_imarketclient.go b/venus-shared/api/market/client/mock/mock_imarketclient.go new file mode 100644 index 0000000000..345d04bde5 --- /dev/null +++ b/venus-shared/api/market/client/mock/mock_imarketclient.go @@ -0,0 +1,607 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/market/client (interfaces: IMarketClient) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" + big "github.com/filecoin-project/go-state-types/big" + internal "github.com/filecoin-project/venus/venus-shared/internal" + types "github.com/filecoin-project/venus/venus-shared/types" + market "github.com/filecoin-project/venus/venus-shared/types/market" + client "github.com/filecoin-project/venus/venus-shared/types/market/client" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// MockIMarketClient is a mock of IMarketClient interface. +type MockIMarketClient struct { + ctrl *gomock.Controller + recorder *MockIMarketClientMockRecorder +} + +// MockIMarketClientMockRecorder is the mock recorder for MockIMarketClient. +type MockIMarketClientMockRecorder struct { + mock *MockIMarketClient +} + +// NewMockIMarketClient creates a new mock instance. +func NewMockIMarketClient(ctrl *gomock.Controller) *MockIMarketClient { + mock := &MockIMarketClient{ctrl: ctrl} + mock.recorder = &MockIMarketClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIMarketClient) EXPECT() *MockIMarketClientMockRecorder { + return m.recorder +} + +// ClientCalcCommP mocks base method. +func (m *MockIMarketClient) ClientCalcCommP(arg0 context.Context, arg1 string) (*client.CommPRet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) + ret0, _ := ret[0].(*client.CommPRet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientCalcCommP indicates an expected call of ClientCalcCommP. +func (mr *MockIMarketClientMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockIMarketClient)(nil).ClientCalcCommP), arg0, arg1) +} + +// ClientCancelDataTransfer mocks base method. +func (m *MockIMarketClient) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer. +func (mr *MockIMarketClientMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockIMarketClient)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientCancelRetrievalDeal mocks base method. +func (m *MockIMarketClient) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal. +func (mr *MockIMarketClientMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockIMarketClient)(nil).ClientCancelRetrievalDeal), arg0, arg1) +} + +// ClientDataTransferUpdates mocks base method. +func (m *MockIMarketClient) ClientDataTransferUpdates(arg0 context.Context) (<-chan market.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) + ret0, _ := ret[0].(<-chan market.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates. +func (mr *MockIMarketClientMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockIMarketClient)(nil).ClientDataTransferUpdates), arg0) +} + +// ClientDealPieceCID mocks base method. +func (m *MockIMarketClient) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (client.DataCIDSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) + ret0, _ := ret[0].(client.DataCIDSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealPieceCID indicates an expected call of ClientDealPieceCID. +func (mr *MockIMarketClientMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockIMarketClient)(nil).ClientDealPieceCID), arg0, arg1) +} + +// ClientDealSize mocks base method. +func (m *MockIMarketClient) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (client.DataSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) + ret0, _ := ret[0].(client.DataSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealSize indicates an expected call of ClientDealSize. +func (mr *MockIMarketClientMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockIMarketClient)(nil).ClientDealSize), arg0, arg1) +} + +// ClientExport mocks base method. +func (m *MockIMarketClient) ClientExport(arg0 context.Context, arg1 client.ExportRef, arg2 client.FileRef) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientExport indicates an expected call of ClientExport. +func (mr *MockIMarketClientMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockIMarketClient)(nil).ClientExport), arg0, arg1, arg2) +} + +// ClientFindData mocks base method. +func (m *MockIMarketClient) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]client.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) + ret0, _ := ret[0].([]client.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientFindData indicates an expected call of ClientFindData. +func (mr *MockIMarketClientMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockIMarketClient)(nil).ClientFindData), arg0, arg1, arg2) +} + +// ClientGenCar mocks base method. +func (m *MockIMarketClient) ClientGenCar(arg0 context.Context, arg1 client.FileRef, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientGenCar indicates an expected call of ClientGenCar. +func (mr *MockIMarketClientMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockIMarketClient)(nil).ClientGenCar), arg0, arg1, arg2) +} + +// ClientGetDealInfo mocks base method. +func (m *MockIMarketClient) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*client.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) + ret0, _ := ret[0].(*client.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealInfo indicates an expected call of ClientGetDealInfo. +func (mr *MockIMarketClientMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockIMarketClient)(nil).ClientGetDealInfo), arg0, arg1) +} + +// ClientGetDealStatus mocks base method. +func (m *MockIMarketClient) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealStatus indicates an expected call of ClientGetDealStatus. +func (mr *MockIMarketClientMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockIMarketClient)(nil).ClientGetDealStatus), arg0, arg1) +} + +// ClientGetDealUpdates mocks base method. +func (m *MockIMarketClient) ClientGetDealUpdates(arg0 context.Context) (<-chan client.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) + ret0, _ := ret[0].(<-chan client.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates. +func (mr *MockIMarketClientMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockIMarketClient)(nil).ClientGetDealUpdates), arg0) +} + +// ClientGetRetrievalUpdates mocks base method. +func (m *MockIMarketClient) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan client.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0) + ret0, _ := ret[0].(<-chan client.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates. +func (mr *MockIMarketClientMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockIMarketClient)(nil).ClientGetRetrievalUpdates), arg0) +} + +// ClientHasLocal mocks base method. +func (m *MockIMarketClient) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientHasLocal indicates an expected call of ClientHasLocal. +func (mr *MockIMarketClientMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockIMarketClient)(nil).ClientHasLocal), arg0, arg1) +} + +// ClientImport mocks base method. +func (m *MockIMarketClient) ClientImport(arg0 context.Context, arg1 client.FileRef) (*client.ImportRes, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) + ret0, _ := ret[0].(*client.ImportRes) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientImport indicates an expected call of ClientImport. +func (mr *MockIMarketClientMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockIMarketClient)(nil).ClientImport), arg0, arg1) +} + +// ClientListDataTransfers mocks base method. +func (m *MockIMarketClient) ClientListDataTransfers(arg0 context.Context) ([]market.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) + ret0, _ := ret[0].([]market.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDataTransfers indicates an expected call of ClientListDataTransfers. +func (mr *MockIMarketClientMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockIMarketClient)(nil).ClientListDataTransfers), arg0) +} + +// ClientListDeals mocks base method. +func (m *MockIMarketClient) ClientListDeals(arg0 context.Context) ([]client.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDeals", arg0) + ret0, _ := ret[0].([]client.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDeals indicates an expected call of ClientListDeals. +func (mr *MockIMarketClientMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockIMarketClient)(nil).ClientListDeals), arg0) +} + +// ClientListImports mocks base method. +func (m *MockIMarketClient) ClientListImports(arg0 context.Context) ([]client.Import, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListImports", arg0) + ret0, _ := ret[0].([]client.Import) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListImports indicates an expected call of ClientListImports. +func (mr *MockIMarketClientMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockIMarketClient)(nil).ClientListImports), arg0) +} + +// ClientListRetrievals mocks base method. +func (m *MockIMarketClient) ClientListRetrievals(arg0 context.Context) ([]client.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListRetrievals", arg0) + ret0, _ := ret[0].([]client.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListRetrievals indicates an expected call of ClientListRetrievals. +func (mr *MockIMarketClientMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockIMarketClient)(nil).ClientListRetrievals), arg0) +} + +// ClientMinerQueryOffer mocks base method. +func (m *MockIMarketClient) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (client.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(client.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer. +func (mr *MockIMarketClientMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockIMarketClient)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) +} + +// ClientQueryAsk mocks base method. +func (m *MockIMarketClient) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) + ret0, _ := ret[0].(*storagemarket.StorageAsk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientQueryAsk indicates an expected call of ClientQueryAsk. +func (mr *MockIMarketClientMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockIMarketClient)(nil).ClientQueryAsk), arg0, arg1, arg2) +} + +// ClientRemoveImport mocks base method. +func (m *MockIMarketClient) ClientRemoveImport(arg0 context.Context, arg1 client.ImportID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRemoveImport indicates an expected call of ClientRemoveImport. +func (mr *MockIMarketClientMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockIMarketClient)(nil).ClientRemoveImport), arg0, arg1) +} + +// ClientRestartDataTransfer mocks base method. +func (m *MockIMarketClient) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer. +func (mr *MockIMarketClientMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockIMarketClient)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientRetrieve mocks base method. +func (m *MockIMarketClient) ClientRetrieve(arg0 context.Context, arg1 client.RetrievalOrder) (*client.RestrievalRes, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1) + ret0, _ := ret[0].(*client.RestrievalRes) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientRetrieve indicates an expected call of ClientRetrieve. +func (mr *MockIMarketClientMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockIMarketClient)(nil).ClientRetrieve), arg0, arg1) +} + +// ClientRetrieveTryRestartInsufficientFunds mocks base method. +func (m *MockIMarketClient) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds. +func (mr *MockIMarketClientMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockIMarketClient)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) +} + +// ClientRetrieveWait mocks base method. +func (m *MockIMarketClient) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieveWait indicates an expected call of ClientRetrieveWait. +func (mr *MockIMarketClientMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockIMarketClient)(nil).ClientRetrieveWait), arg0, arg1) +} + +// ClientStartDeal mocks base method. +func (m *MockIMarketClient) ClientStartDeal(arg0 context.Context, arg1 *client.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStartDeal indicates an expected call of ClientStartDeal. +func (mr *MockIMarketClientMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockIMarketClient)(nil).ClientStartDeal), arg0, arg1) +} + +// ClientStatelessDeal mocks base method. +func (m *MockIMarketClient) ClientStatelessDeal(arg0 context.Context, arg1 *client.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStatelessDeal indicates an expected call of ClientStatelessDeal. +func (mr *MockIMarketClientMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockIMarketClient)(nil).ClientStatelessDeal), arg0, arg1) +} + +// DefaultAddress mocks base method. +func (m *MockIMarketClient) DefaultAddress(arg0 context.Context) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DefaultAddress", arg0) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DefaultAddress indicates an expected call of DefaultAddress. +func (mr *MockIMarketClientMockRecorder) DefaultAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultAddress", reflect.TypeOf((*MockIMarketClient)(nil).DefaultAddress), arg0) +} + +// MarketAddBalance mocks base method. +func (m *MockIMarketClient) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketAddBalance indicates an expected call of MarketAddBalance. +func (mr *MockIMarketClientMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockIMarketClient)(nil).MarketAddBalance), arg0, arg1, arg2, arg3) +} + +// MarketGetReserved mocks base method. +func (m *MockIMarketClient) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetReserved indicates an expected call of MarketGetReserved. +func (mr *MockIMarketClientMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockIMarketClient)(nil).MarketGetReserved), arg0, arg1) +} + +// MarketReleaseFunds mocks base method. +func (m *MockIMarketClient) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketReleaseFunds indicates an expected call of MarketReleaseFunds. +func (mr *MockIMarketClientMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockIMarketClient)(nil).MarketReleaseFunds), arg0, arg1, arg2) +} + +// MarketReserveFunds mocks base method. +func (m *MockIMarketClient) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketReserveFunds indicates an expected call of MarketReserveFunds. +func (mr *MockIMarketClientMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockIMarketClient)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3) +} + +// MarketWithdraw mocks base method. +func (m *MockIMarketClient) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketWithdraw indicates an expected call of MarketWithdraw. +func (mr *MockIMarketClientMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockIMarketClient)(nil).MarketWithdraw), arg0, arg1, arg2, arg3) +} + +// MessagerGetMessage mocks base method. +func (m *MockIMarketClient) MessagerGetMessage(arg0 context.Context, arg1 cid.Cid) (*internal.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerGetMessage", arg0, arg1) + ret0, _ := ret[0].(*internal.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerGetMessage indicates an expected call of MessagerGetMessage. +func (mr *MockIMarketClientMockRecorder) MessagerGetMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerGetMessage", reflect.TypeOf((*MockIMarketClient)(nil).MessagerGetMessage), arg0, arg1) +} + +// MessagerPushMessage mocks base method. +func (m *MockIMarketClient) MessagerPushMessage(arg0 context.Context, arg1 *internal.Message, arg2 *types.MessageSendSpec) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerPushMessage indicates an expected call of MessagerPushMessage. +func (mr *MockIMarketClientMockRecorder) MessagerPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerPushMessage", reflect.TypeOf((*MockIMarketClient)(nil).MessagerPushMessage), arg0, arg1, arg2) +} + +// MessagerWaitMessage mocks base method. +func (m *MockIMarketClient) MessagerWaitMessage(arg0 context.Context, arg1 cid.Cid) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerWaitMessage", arg0, arg1) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerWaitMessage indicates an expected call of MessagerWaitMessage. +func (mr *MockIMarketClientMockRecorder) MessagerWaitMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerWaitMessage", reflect.TypeOf((*MockIMarketClient)(nil).MessagerWaitMessage), arg0, arg1) +} + +// Version mocks base method. +func (m *MockIMarketClient) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockIMarketClientMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockIMarketClient)(nil).Version), arg0) +} diff --git a/venus-shared/api/market/client/proxy_gen.go b/venus-shared/api/market/client/proxy_gen.go new file mode 100644 index 0000000000..e0a9bee577 --- /dev/null +++ b/venus-shared/api/market/client/proxy_gen.go @@ -0,0 +1,175 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package client + +import ( + "context" + + address "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/market" + "github.com/filecoin-project/venus/venus-shared/types/market/client" +) + +type IMarketClientStruct struct { + Internal struct { + ClientCalcCommP func(ctx context.Context, inpath string) (*client.CommPRet, error) `perm:"write"` + ClientCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` + ClientCancelRetrievalDeal func(ctx context.Context, dealid retrievalmarket.DealID) error `perm:"write"` + ClientDataTransferUpdates func(ctx context.Context) (<-chan market.DataTransferChannel, error) `perm:"write"` + ClientDealPieceCID func(ctx context.Context, root cid.Cid) (client.DataCIDSize, error) `perm:"read"` + ClientDealSize func(ctx context.Context, root cid.Cid) (client.DataSize, error) `perm:"read"` + ClientExport func(ctx context.Context, exportRef client.ExportRef, fileRef client.FileRef) error `perm:"admin"` + ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]client.QueryOffer, error) `perm:"read"` + ClientGenCar func(ctx context.Context, ref client.FileRef, outpath string) error `perm:"write"` + ClientGetDealInfo func(context.Context, cid.Cid) (*client.DealInfo, error) `perm:"read"` + ClientGetDealStatus func(ctx context.Context, statusCode uint64) (string, error) `perm:"read"` + ClientGetDealUpdates func(ctx context.Context) (<-chan client.DealInfo, error) `perm:"write"` + ClientGetRetrievalUpdates func(ctx context.Context) (<-chan client.RetrievalInfo, error) `perm:"write"` + ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` + ClientImport func(ctx context.Context, ref client.FileRef) (*client.ImportRes, error) `perm:"admin"` + ClientListDataTransfers func(ctx context.Context) ([]market.DataTransferChannel, error) `perm:"write"` + ClientListDeals func(ctx context.Context) ([]client.DealInfo, error) `perm:"write"` + ClientListImports func(ctx context.Context) ([]client.Import, error) `perm:"write"` + ClientListRetrievals func(ctx context.Context) ([]client.RetrievalInfo, error) `perm:"write"` + ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (client.QueryOffer, error) `perm:"read"` + ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` + ClientRemoveImport func(ctx context.Context, importID client.ImportID) error `perm:"admin"` + ClientRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` + ClientRetrieve func(ctx context.Context, params client.RetrievalOrder) (*client.RestrievalRes, error) `perm:"admin"` + ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"` + ClientRetrieveWait func(ctx context.Context, deal retrievalmarket.DealID) error `perm:"admin"` + ClientStartDeal func(ctx context.Context, params *client.StartDealParams) (*cid.Cid, error) `perm:"admin"` + ClientStatelessDeal func(ctx context.Context, params *client.StartDealParams) (*cid.Cid, error) `perm:"write"` + DefaultAddress func(ctx context.Context) (address.Address, error) `perm:"read"` + MarketAddBalance func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"write"` + MarketGetReserved func(ctx context.Context, addr address.Address) (types.BigInt, error) `perm:"read"` + MarketReleaseFunds func(ctx context.Context, addr address.Address, amt types.BigInt) error `perm:"write"` + MarketReserveFunds func(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"write"` + MarketWithdraw func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"write"` + MessagerGetMessage func(ctx context.Context, mid cid.Cid) (*types.Message, error) `perm:"read"` + MessagerPushMessage func(ctx context.Context, msg *types.Message, meta *types.MessageSendSpec) (cid.Cid, error) `perm:"write"` + MessagerWaitMessage func(ctx context.Context, mid cid.Cid) (*types.MsgLookup, error) `perm:"read"` + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *IMarketClientStruct) ClientCalcCommP(p0 context.Context, p1 string) (*client.CommPRet, error) { + return s.Internal.ClientCalcCommP(p0, p1) +} +func (s *IMarketClientStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3) +} +func (s *IMarketClientStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { + return s.Internal.ClientCancelRetrievalDeal(p0, p1) +} +func (s *IMarketClientStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan market.DataTransferChannel, error) { + return s.Internal.ClientDataTransferUpdates(p0) +} +func (s *IMarketClientStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (client.DataCIDSize, error) { + return s.Internal.ClientDealPieceCID(p0, p1) +} +func (s *IMarketClientStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (client.DataSize, error) { + return s.Internal.ClientDealSize(p0, p1) +} +func (s *IMarketClientStruct) ClientExport(p0 context.Context, p1 client.ExportRef, p2 client.FileRef) error { + return s.Internal.ClientExport(p0, p1, p2) +} +func (s *IMarketClientStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]client.QueryOffer, error) { + return s.Internal.ClientFindData(p0, p1, p2) +} +func (s *IMarketClientStruct) ClientGenCar(p0 context.Context, p1 client.FileRef, p2 string) error { + return s.Internal.ClientGenCar(p0, p1, p2) +} +func (s *IMarketClientStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*client.DealInfo, error) { + return s.Internal.ClientGetDealInfo(p0, p1) +} +func (s *IMarketClientStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { + return s.Internal.ClientGetDealStatus(p0, p1) +} +func (s *IMarketClientStruct) ClientGetDealUpdates(p0 context.Context) (<-chan client.DealInfo, error) { + return s.Internal.ClientGetDealUpdates(p0) +} +func (s *IMarketClientStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan client.RetrievalInfo, error) { + return s.Internal.ClientGetRetrievalUpdates(p0) +} +func (s *IMarketClientStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ClientHasLocal(p0, p1) +} +func (s *IMarketClientStruct) ClientImport(p0 context.Context, p1 client.FileRef) (*client.ImportRes, error) { + return s.Internal.ClientImport(p0, p1) +} +func (s *IMarketClientStruct) ClientListDataTransfers(p0 context.Context) ([]market.DataTransferChannel, error) { + return s.Internal.ClientListDataTransfers(p0) +} +func (s *IMarketClientStruct) ClientListDeals(p0 context.Context) ([]client.DealInfo, error) { + return s.Internal.ClientListDeals(p0) +} +func (s *IMarketClientStruct) ClientListImports(p0 context.Context) ([]client.Import, error) { + return s.Internal.ClientListImports(p0) +} +func (s *IMarketClientStruct) ClientListRetrievals(p0 context.Context) ([]client.RetrievalInfo, error) { + return s.Internal.ClientListRetrievals(p0) +} +func (s *IMarketClientStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (client.QueryOffer, error) { + return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) +} +func (s *IMarketClientStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { + return s.Internal.ClientQueryAsk(p0, p1, p2) +} +func (s *IMarketClientStruct) ClientRemoveImport(p0 context.Context, p1 client.ImportID) error { + return s.Internal.ClientRemoveImport(p0, p1) +} +func (s *IMarketClientStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3) +} +func (s *IMarketClientStruct) ClientRetrieve(p0 context.Context, p1 client.RetrievalOrder) (*client.RestrievalRes, error) { + return s.Internal.ClientRetrieve(p0, p1) +} +func (s *IMarketClientStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { + return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1) +} +func (s *IMarketClientStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { + return s.Internal.ClientRetrieveWait(p0, p1) +} +func (s *IMarketClientStruct) ClientStartDeal(p0 context.Context, p1 *client.StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStartDeal(p0, p1) +} +func (s *IMarketClientStruct) ClientStatelessDeal(p0 context.Context, p1 *client.StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStatelessDeal(p0, p1) +} +func (s *IMarketClientStruct) DefaultAddress(p0 context.Context) (address.Address, error) { + return s.Internal.DefaultAddress(p0) +} +func (s *IMarketClientStruct) MarketAddBalance(p0 context.Context, p1, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketAddBalance(p0, p1, p2, p3) +} +func (s *IMarketClientStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.MarketGetReserved(p0, p1) +} +func (s *IMarketClientStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + return s.Internal.MarketReleaseFunds(p0, p1, p2) +} +func (s *IMarketClientStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketReserveFunds(p0, p1, p2, p3) +} +func (s *IMarketClientStruct) MarketWithdraw(p0 context.Context, p1, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketWithdraw(p0, p1, p2, p3) +} +func (s *IMarketClientStruct) MessagerGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.MessagerGetMessage(p0, p1) +} +func (s *IMarketClientStruct) MessagerPushMessage(p0 context.Context, p1 *types.Message, p2 *types.MessageSendSpec) (cid.Cid, error) { + return s.Internal.MessagerPushMessage(p0, p1, p2) +} +func (s *IMarketClientStruct) MessagerWaitMessage(p0 context.Context, p1 cid.Cid) (*types.MsgLookup, error) { + return s.Internal.MessagerWaitMessage(p0, p1) +} +func (s *IMarketClientStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} diff --git a/venus-shared/api/market/client_gen.go b/venus-shared/api/market/client_gen.go new file mode 100644 index 0000000000..b60e3bc954 --- /dev/null +++ b/venus-shared/api/market/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package market + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 0 +const APINamespace = "market.IMarket" +const MethodNamespace = "VENUS_MARKET" + +// NewIMarketRPC creates a new httpparse jsonrpc remotecli. +func NewIMarketRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (IMarket, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res IMarketStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialIMarketRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialIMarketRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (IMarket, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res IMarketStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/market/method.md b/venus-shared/api/market/method.md new file mode 100644 index 0000000000..c6d09a64aa --- /dev/null +++ b/venus-shared/api/market/method.md @@ -0,0 +1,2272 @@ +# Groups + +* [Market](#market) + * [ActorExist](#actorexist) + * [ActorList](#actorlist) + * [ActorSectorSize](#actorsectorsize) + * [AddFsPieceStorage](#addfspiecestorage) + * [AddS3PieceStorage](#adds3piecestorage) + * [AssignUnPackedDeals](#assignunpackeddeals) + * [DagstoreGC](#dagstoregc) + * [DagstoreInitializeAll](#dagstoreinitializeall) + * [DagstoreInitializeShard](#dagstoreinitializeshard) + * [DagstoreInitializeStorage](#dagstoreinitializestorage) + * [DagstoreListShards](#dagstorelistshards) + * [DagstoreRecoverShard](#dagstorerecovershard) + * [DealsConsiderOfflineRetrievalDeals](#dealsconsiderofflineretrievaldeals) + * [DealsConsiderOfflineStorageDeals](#dealsconsiderofflinestoragedeals) + * [DealsConsiderOnlineRetrievalDeals](#dealsconsideronlineretrievaldeals) + * [DealsConsiderOnlineStorageDeals](#dealsconsideronlinestoragedeals) + * [DealsConsiderUnverifiedStorageDeals](#dealsconsiderunverifiedstoragedeals) + * [DealsConsiderVerifiedStorageDeals](#dealsconsiderverifiedstoragedeals) + * [DealsImportData](#dealsimportdata) + * [DealsMaxProviderCollateralMultiplier](#dealsmaxprovidercollateralmultiplier) + * [DealsMaxPublishFee](#dealsmaxpublishfee) + * [DealsMaxStartDelay](#dealsmaxstartdelay) + * [DealsPieceCidBlocklist](#dealspiececidblocklist) + * [DealsPublishMsgPeriod](#dealspublishmsgperiod) + * [DealsSetConsiderOfflineRetrievalDeals](#dealssetconsiderofflineretrievaldeals) + * [DealsSetConsiderOfflineStorageDeals](#dealssetconsiderofflinestoragedeals) + * [DealsSetConsiderOnlineRetrievalDeals](#dealssetconsideronlineretrievaldeals) + * [DealsSetConsiderOnlineStorageDeals](#dealssetconsideronlinestoragedeals) + * [DealsSetConsiderUnverifiedStorageDeals](#dealssetconsiderunverifiedstoragedeals) + * [DealsSetConsiderVerifiedStorageDeals](#dealssetconsiderverifiedstoragedeals) + * [DealsSetMaxProviderCollateralMultiplier](#dealssetmaxprovidercollateralmultiplier) + * [DealsSetMaxPublishFee](#dealssetmaxpublishfee) + * [DealsSetMaxStartDelay](#dealssetmaxstartdelay) + * [DealsSetPieceCidBlocklist](#dealssetpiececidblocklist) + * [DealsSetPublishMsgPeriod](#dealssetpublishmsgperiod) + * [GetDeals](#getdeals) + * [GetRetrievalDealStatistic](#getretrievaldealstatistic) + * [GetStorageDealStatistic](#getstoragedealstatistic) + * [GetUnPackedDeals](#getunpackeddeals) + * [ID](#id) + * [ImportV1Data](#importv1data) + * [ListPieceStorageInfos](#listpiecestorageinfos) + * [ListenMarketEvent](#listenmarketevent) + * [MarkDealsAsPacking](#markdealsaspacking) + * [MarketAddBalance](#marketaddbalance) + * [MarketCancelDataTransfer](#marketcanceldatatransfer) + * [MarketDataTransferPath](#marketdatatransferpath) + * [MarketDataTransferUpdates](#marketdatatransferupdates) + * [MarketGetAsk](#marketgetask) + * [MarketGetDealUpdates](#marketgetdealupdates) + * [MarketGetReserved](#marketgetreserved) + * [MarketGetRetrievalAsk](#marketgetretrievalask) + * [MarketImportDealData](#marketimportdealdata) + * [MarketImportPublishedDeal](#marketimportpublisheddeal) + * [MarketListAsk](#marketlistask) + * [MarketListDataTransfers](#marketlistdatatransfers) + * [MarketListDeals](#marketlistdeals) + * [MarketListIncompleteDeals](#marketlistincompletedeals) + * [MarketListRetrievalAsk](#marketlistretrievalask) + * [MarketListRetrievalDeals](#marketlistretrievaldeals) + * [MarketMaxBalanceAddFee](#marketmaxbalanceaddfee) + * [MarketMaxDealsPerPublishMsg](#marketmaxdealsperpublishmsg) + * [MarketPendingDeals](#marketpendingdeals) + * [MarketPublishPendingDeals](#marketpublishpendingdeals) + * [MarketReleaseFunds](#marketreleasefunds) + * [MarketReserveFunds](#marketreservefunds) + * [MarketRestartDataTransfer](#marketrestartdatatransfer) + * [MarketSetAsk](#marketsetask) + * [MarketSetDataTransferPath](#marketsetdatatransferpath) + * [MarketSetMaxBalanceAddFee](#marketsetmaxbalanceaddfee) + * [MarketSetMaxDealsPerPublishMsg](#marketsetmaxdealsperpublishmsg) + * [MarketSetRetrievalAsk](#marketsetretrievalask) + * [MarketWithdraw](#marketwithdraw) + * [MessagerGetMessage](#messagergetmessage) + * [MessagerPushMessage](#messagerpushmessage) + * [MessagerWaitMessage](#messagerwaitmessage) + * [NetAddrsListen](#netaddrslisten) + * [OfflineDealImport](#offlinedealimport) + * [PaychVoucherList](#paychvoucherlist) + * [PiecesGetCIDInfo](#piecesgetcidinfo) + * [PiecesGetPieceInfo](#piecesgetpieceinfo) + * [PiecesListCidInfos](#pieceslistcidinfos) + * [PiecesListPieces](#pieceslistpieces) + * [RemovePieceStorage](#removepiecestorage) + * [ResponseMarketEvent](#responsemarketevent) + * [SectorGetExpectedSealDuration](#sectorgetexpectedsealduration) + * [SectorSetExpectedSealDuration](#sectorsetexpectedsealduration) + * [UpdateDealOnPacking](#updatedealonpacking) + * [UpdateDealStatus](#updatedealstatus) + * [UpdateStorageDealStatus](#updatestoragedealstatus) + * [Version](#version) + +## Market + +### ActorExist + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### ActorList + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "Addr": "f01234", + "Account": "string value" + } +] +``` + +### ActorSectorSize + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `34359738368` + +### AddFsPieceStorage + + +Perms: admin + +Inputs: +```json +[ + "string value", + "string value", + true +] +``` + +Response: `{}` + +### AddS3PieceStorage + + +Perms: admin + +Inputs: +```json +[ + "string value", + "string value", + "string value", + "string value", + "string value", + "string value", + "string value", + true +] +``` + +Response: `{}` + +### AssignUnPackedDeals + + +Perms: write + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + }, + 34359738368, + { + "MaxPiece": 123, + "MaxPieceSize": 42, + "MinPiece": 123, + "MinPieceSize": 42, + "MinUsedSpace": 42, + "StartEpoch": 10101, + "EndEpoch": 10101 + } +] +``` + +Response: +```json +[ + { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0", + "Offset": 1032, + "Length": 1032, + "PayloadSize": 42, + "DealID": 5432, + "TotalStorageFee": "0", + "FastRetrieval": true, + "PublishCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } +] +``` + +### DagstoreGC +DagstoreGC runs garbage collection on the DAG store. + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Key": "string value", + "Success": true, + "Error": "string value" + } +] +``` + +### DagstoreInitializeAll +DagstoreInitializeAll initializes all uninitialized shards in bulk, +according to the policy passed in the parameters. + +It is recommended to set a maximum concurrency to avoid extreme +IO pressure if the storage subsystem has a large amount of deals. + +It returns a stream of events to report progress. + + +Perms: admin + +Inputs: +```json +[ + { + "MaxConcurrency": 123, + "IncludeSealed": true + } +] +``` + +Response: +```json +{ + "Key": "string value", + "Event": "string value", + "Success": true, + "Error": "string value", + "Total": 123, + "Current": 123 +} +``` + +### DagstoreInitializeShard +DagstoreInitializeShard initializes an uninitialized shard. + +Initialization consists of fetching the shard's data (deal payload) from +the storage subsystem, generating an index, and persisting the index +to facilitate later retrievals, and/or to publish to external sources. + +This operation is intended to complement the initial migration. The +migration registers a shard for every unique piece CID, with lazy +initialization. Thus, shards are not initialized immediately to avoid +IO activity competing with proving. Instead, shard are initialized +when first accessed. This method forces the initialization of a shard by +accessing it and immediately releasing it. This is useful to warm up the +cache to facilitate subsequent retrievals, and to generate the indexes +to publish them externally. + +This operation fails if the shard is not in ShardStateNew state. +It blocks until initialization finishes. + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### DagstoreInitializeStorage +DagstoreInitializeStorage initializes all pieces in specify storage + + +Perms: admin + +Inputs: +```json +[ + "string value", + { + "MaxConcurrency": 123, + "IncludeSealed": true + } +] +``` + +Response: +```json +{ + "Key": "string value", + "Event": "string value", + "Success": true, + "Error": "string value", + "Total": 123, + "Current": 123 +} +``` + +### DagstoreListShards +DagstoreListShards returns information about all shards known to the +DAG store. Only available on nodes running the markets subsystem. + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "Key": "string value", + "State": "string value", + "Error": "string value" + } +] +``` + +### DagstoreRecoverShard +DagstoreRecoverShard attempts to recover a failed shard. + +This operation fails if the shard is not in ShardStateErrored state. +It blocks until recovery finishes. If recovery failed, it returns the +error. + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### DealsConsiderOfflineRetrievalDeals + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### DealsConsiderOfflineStorageDeals + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### DealsConsiderOnlineRetrievalDeals + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### DealsConsiderOnlineStorageDeals + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### DealsConsiderUnverifiedStorageDeals + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### DealsConsiderVerifiedStorageDeals + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### DealsImportData + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "string value" +] +``` + +Response: `{}` + +### DealsMaxProviderCollateralMultiplier + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### DealsMaxPublishFee + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0 FIL"` + +### DealsMaxStartDelay + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `60000000000` + +### DealsPieceCidBlocklist + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### DealsPublishMsgPeriod + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `60000000000` + +### DealsSetConsiderOfflineRetrievalDeals + + +Perms: write + +Inputs: +```json +[ + "f01234", + true +] +``` + +Response: `{}` + +### DealsSetConsiderOfflineStorageDeals + + +Perms: write + +Inputs: +```json +[ + "f01234", + true +] +``` + +Response: `{}` + +### DealsSetConsiderOnlineRetrievalDeals + + +Perms: write + +Inputs: +```json +[ + "f01234", + true +] +``` + +Response: `{}` + +### DealsSetConsiderOnlineStorageDeals + + +Perms: write + +Inputs: +```json +[ + "f01234", + true +] +``` + +Response: `{}` + +### DealsSetConsiderUnverifiedStorageDeals + + +Perms: write + +Inputs: +```json +[ + "f01234", + true +] +``` + +Response: `{}` + +### DealsSetConsiderVerifiedStorageDeals + + +Perms: write + +Inputs: +```json +[ + "f01234", + true +] +``` + +Response: `{}` + +### DealsSetMaxProviderCollateralMultiplier + + +Perms: write + +Inputs: +```json +[ + "f01234", + 42 +] +``` + +Response: `{}` + +### DealsSetMaxPublishFee + + +Perms: write + +Inputs: +```json +[ + "f01234", + "0 FIL" +] +``` + +Response: `{}` + +### DealsSetMaxStartDelay + + +Perms: write + +Inputs: +```json +[ + "f01234", + 60000000000 +] +``` + +Response: `{}` + +### DealsSetPieceCidBlocklist + + +Perms: write + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] +] +``` + +Response: `{}` + +### DealsSetPublishMsgPeriod + + +Perms: write + +Inputs: +```json +[ + "f01234", + 60000000000 +] +``` + +Response: `{}` + +### GetDeals + + +Perms: read + +Inputs: +```json +[ + "f01234", + 123, + 123 +] +``` + +Response: +```json +[ + { + "DealID": 5432, + "SectorID": 9, + "Offset": 1032, + "Length": 1032, + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PublishCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "FastRetrieval": true, + "Status": "Undefine" + } +] +``` + +### GetRetrievalDealStatistic +GetRetrievalDealStatistic get retrieval deal statistic information +todo address undefined is invalid, it is currently not possible to directly associate an order with a miner + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "DealsStatus": { + "0": 9 + } +} +``` + +### GetStorageDealStatistic +GetStorageDealStatistic get storage deal statistic information +if set miner address to address.Undef, return all storage deal info + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "DealsStatus": { + "42": 9 + } +} +``` + +### GetUnPackedDeals + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "MaxPiece": 123, + "MaxPieceSize": 42, + "MinPiece": 123, + "MinPieceSize": 42, + "MinUsedSpace": 42, + "StartEpoch": 10101, + "EndEpoch": 10101 + } +] +``` + +Response: +```json +[ + { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0", + "Offset": 1032, + "Length": 1032, + "PayloadSize": 42, + "DealID": 5432, + "TotalStorageFee": "0", + "FastRetrieval": true, + "PublishCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } +] +``` + +### ID + + +Perms: read + +Inputs: `[]` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +### ImportV1Data + + +Perms: write + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### ListPieceStorageInfos + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "FsStorage": [ + { + "Path": "string value", + "Name": "string value", + "ReadOnly": true, + "Status": { + "Capacity": 9, + "Available": 9, + "Reserved": 9 + } + } + ], + "S3Storage": [ + { + "Name": "string value", + "ReadOnly": true, + "EndPoint": "string value", + "Bucket": "string value", + "SubDir": "string value", + "Status": { + "Capacity": 9, + "Available": 9, + "Reserved": 9 + } + } + ] +} +``` + +### ListenMarketEvent + + +Perms: read + +Inputs: +```json +[ + { + "Miner": "f01234" + } +] +``` + +Response: +```json +{ + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Method": "string value", + "Payload": "Ynl0ZSBhcnJheQ==" +} +``` + +### MarkDealsAsPacking + + +Perms: write + +Inputs: +```json +[ + "f01234", + [ + 5432 + ] +] +``` + +Response: `{}` + +### MarketAddBalance + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketCancelDataTransfer +MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### MarketDataTransferPath + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"string value"` + +### MarketDataTransferUpdates + + +Perms: write + +Inputs: `[]` + +Response: +```json +{ + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } +} +``` + +### MarketGetAsk + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Ask": { + "Price": "0", + "VerifiedPrice": "0", + "MinPieceSize": 1032, + "MaxPieceSize": 1032, + "Miner": "f01234", + "Timestamp": 10101, + "Expiry": 10101, + "SeqNo": 42 + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CreatedAt": 42, + "UpdatedAt": 42 +} +``` + +### MarketGetDealUpdates + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": "/some/path", + "PayloadSize": 42, + "MetadataPath": "/some/path", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "SectorNumber": 9, + "Offset": 1032, + "PieceStatus": "Undefine", + "InboundCAR": "string value", + "CreatedAt": 42, + "UpdatedAt": 42 +} +``` + +### MarketGetReserved + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + +### MarketGetRetrievalAsk + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "PricePerByte": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42 +} +``` + +### MarketImportDealData + + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "string value" +] +``` + +Response: `{}` + +### MarketImportPublishedDeal + + +Perms: write + +Inputs: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": "/some/path", + "PayloadSize": 42, + "MetadataPath": "/some/path", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "SectorNumber": 9, + "Offset": 1032, + "PieceStatus": "Undefine", + "InboundCAR": "string value", + "CreatedAt": 42, + "UpdatedAt": 42 + } +] +``` + +Response: `{}` + +### MarketListAsk + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "Ask": { + "Price": "0", + "VerifiedPrice": "0", + "MinPieceSize": 1032, + "MaxPieceSize": 1032, + "Miner": "f01234", + "Timestamp": 10101, + "Expiry": 10101, + "SeqNo": 42 + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CreatedAt": 42, + "UpdatedAt": 42 + } +] +``` + +### MarketListDataTransfers + + +Perms: write + +Inputs: `[]` + +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` + +### MarketListDeals + + +Perms: read + +Inputs: +```json +[ + [ + "f01234" + ] +] +``` + +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101, + "VerifiedClaim": 0 + } + } +] +``` + +### MarketListIncompleteDeals + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": "/some/path", + "PayloadSize": 42, + "MetadataPath": "/some/path", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "SectorNumber": 9, + "Offset": 1032, + "PieceStatus": "Undefine", + "InboundCAR": "string value", + "CreatedAt": 42, + "UpdatedAt": 42 + } +] +``` + +### MarketListRetrievalAsk + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "Miner": "f01234", + "PricePerByte": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "CreatedAt": 42, + "UpdatedAt": 42 + } +] +``` + +### MarketListRetrievalDeals + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "Selector": { + "Raw": "Ynl0ZSBhcnJheQ==" + }, + "PieceCID": null, + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "UnsealPrice": "0", + "StoreID": 42, + "SelStorageProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "Status": 0, + "Receiver": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "TotalSent": 42, + "FundsReceived": "0", + "Message": "string value", + "CurrentInterval": 42, + "LegacyProtocol": true, + "CreatedAt": 42, + "UpdatedAt": 42 + } +] +``` + +### MarketMaxBalanceAddFee + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0 FIL"` + +### MarketMaxDealsPerPublishMsg + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### MarketPendingDeals + + +Perms: write + +Inputs: `[]` + +Response: +```json +[ + { + "Deals": [ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ], + "PublishPeriodStart": "0001-01-01T00:00:00Z", + "PublishPeriod": 60000000000 + } +] +``` + +### MarketPublishPendingDeals + + +Perms: admin + +Inputs: `[]` + +Response: `{}` + +### MarketReleaseFunds + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "0" +] +``` + +Response: `{}` + +### MarketReserveFunds + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketRestartDataTransfer +MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### MarketSetAsk + + +Perms: admin + +Inputs: +```json +[ + "f01234", + "0", + "0", + 10101, + 1032, + 1032 +] +``` + +Response: `{}` + +### MarketSetDataTransferPath + + +Perms: admin + +Inputs: +```json +[ + "f01234", + "string value" +] +``` + +Response: `{}` + +### MarketSetMaxBalanceAddFee + + +Perms: write + +Inputs: +```json +[ + "f01234", + "0 FIL" +] +``` + +Response: `{}` + +### MarketSetMaxDealsPerPublishMsg + + +Perms: write + +Inputs: +```json +[ + "f01234", + 42 +] +``` + +Response: `{}` + +### MarketSetRetrievalAsk + + +Perms: admin + +Inputs: +```json +[ + "f01234", + { + "PricePerByte": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42 + } +] +``` + +Response: `{}` + +### MarketWithdraw + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MessagerGetMessage + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +### MessagerPushMessage + + +Perms: write + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0", + "GasOverEstimation": 12.3, + "GasOverPremium": 12.3 + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MessagerWaitMessage +messager + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### NetAddrsListen + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### OfflineDealImport + + +Perms: admin + +Inputs: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": "/some/path", + "PayloadSize": 42, + "MetadataPath": "/some/path", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "SectorNumber": 9, + "Offset": 1032, + "PieceStatus": "Undefine", + "InboundCAR": "string value", + "CreatedAt": 42, + "UpdatedAt": 42 + } +] +``` + +Response: `{}` + +### PaychVoucherList +Paych + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretHash": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +### PiecesGetCIDInfo + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceBlockLocations": [ + { + "RelOffset": 42, + "BlockSize": 42, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] +} +``` + +### PiecesGetPieceInfo + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Deals": [ + { + "DealID": 5432, + "SectorID": 9, + "Offset": 1032, + "Length": 1032 + } + ] +} +``` + +### PiecesListCidInfos + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### PiecesListPieces + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +### RemovePieceStorage + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### ResponseMarketEvent +market event + + +Perms: read + +Inputs: +```json +[ + { + "Id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Payload": "Ynl0ZSBhcnJheQ==", + "Error": "string value" + } +] +``` + +Response: `{}` + +### SectorGetExpectedSealDuration +SectorGetExpectedSealDuration gets the time that a newly-created sector +waits for more deals before it starts sealing + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `60000000000` + +### SectorSetExpectedSealDuration +SectorSetExpectedSealDuration sets the expected time for a sector to seal + + +Perms: write + +Inputs: +```json +[ + "f01234", + 60000000000 +] +``` + +Response: `{}` + +### UpdateDealOnPacking + + +Perms: write + +Inputs: +```json +[ + "f01234", + 5432, + 9, + 1032 +] +``` + +Response: `{}` + +### UpdateDealStatus + + +Perms: write + +Inputs: +```json +[ + "f01234", + 5432, + "Undefine" +] +``` + +Response: `{}` + +### UpdateStorageDealStatus + + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 42, + "Undefine" +] +``` + +Response: `{}` + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + diff --git a/venus-shared/api/market/mock/mock_imarket.go b/venus-shared/api/market/mock/mock_imarket.go new file mode 100644 index 0000000000..8258abd49e --- /dev/null +++ b/venus-shared/api/market/mock/mock_imarket.go @@ -0,0 +1,1377 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/market (interfaces: IMarket) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + time "time" + + address "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + piecestore "github.com/filecoin-project/go-fil-markets/piecestore" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + internal "github.com/filecoin-project/venus/venus-shared/internal" + types "github.com/filecoin-project/venus/venus-shared/types" + gateway "github.com/filecoin-project/venus/venus-shared/types/gateway" + market "github.com/filecoin-project/venus/venus-shared/types/market" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// MockIMarket is a mock of IMarket interface. +type MockIMarket struct { + ctrl *gomock.Controller + recorder *MockIMarketMockRecorder +} + +// MockIMarketMockRecorder is the mock recorder for MockIMarket. +type MockIMarketMockRecorder struct { + mock *MockIMarket +} + +// NewMockIMarket creates a new mock instance. +func NewMockIMarket(ctrl *gomock.Controller) *MockIMarket { + mock := &MockIMarket{ctrl: ctrl} + mock.recorder = &MockIMarketMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIMarket) EXPECT() *MockIMarketMockRecorder { + return m.recorder +} + +// ActorExist mocks base method. +func (m *MockIMarket) ActorExist(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ActorExist", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ActorExist indicates an expected call of ActorExist. +func (mr *MockIMarketMockRecorder) ActorExist(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActorExist", reflect.TypeOf((*MockIMarket)(nil).ActorExist), arg0, arg1) +} + +// ActorList mocks base method. +func (m *MockIMarket) ActorList(arg0 context.Context) ([]market.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ActorList", arg0) + ret0, _ := ret[0].([]market.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ActorList indicates an expected call of ActorList. +func (mr *MockIMarketMockRecorder) ActorList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActorList", reflect.TypeOf((*MockIMarket)(nil).ActorList), arg0) +} + +// ActorSectorSize mocks base method. +func (m *MockIMarket) ActorSectorSize(arg0 context.Context, arg1 address.Address) (abi.SectorSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ActorSectorSize", arg0, arg1) + ret0, _ := ret[0].(abi.SectorSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ActorSectorSize indicates an expected call of ActorSectorSize. +func (mr *MockIMarketMockRecorder) ActorSectorSize(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActorSectorSize", reflect.TypeOf((*MockIMarket)(nil).ActorSectorSize), arg0, arg1) +} + +// AddFsPieceStorage mocks base method. +func (m *MockIMarket) AddFsPieceStorage(arg0 context.Context, arg1, arg2 string, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddFsPieceStorage", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddFsPieceStorage indicates an expected call of AddFsPieceStorage. +func (mr *MockIMarketMockRecorder) AddFsPieceStorage(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddFsPieceStorage", reflect.TypeOf((*MockIMarket)(nil).AddFsPieceStorage), arg0, arg1, arg2, arg3) +} + +// AddS3PieceStorage mocks base method. +func (m *MockIMarket) AddS3PieceStorage(arg0 context.Context, arg1, arg2, arg3, arg4, arg5, arg6, arg7 string, arg8 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddS3PieceStorage", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddS3PieceStorage indicates an expected call of AddS3PieceStorage. +func (mr *MockIMarketMockRecorder) AddS3PieceStorage(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddS3PieceStorage", reflect.TypeOf((*MockIMarket)(nil).AddS3PieceStorage), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +} + +// AssignUnPackedDeals mocks base method. +func (m *MockIMarket) AssignUnPackedDeals(arg0 context.Context, arg1 abi.SectorID, arg2 abi.SectorSize, arg3 *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AssignUnPackedDeals", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*market.DealInfoIncludePath) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AssignUnPackedDeals indicates an expected call of AssignUnPackedDeals. +func (mr *MockIMarketMockRecorder) AssignUnPackedDeals(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssignUnPackedDeals", reflect.TypeOf((*MockIMarket)(nil).AssignUnPackedDeals), arg0, arg1, arg2, arg3) +} + +// DagstoreGC mocks base method. +func (m *MockIMarket) DagstoreGC(arg0 context.Context) ([]market.DagstoreShardResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DagstoreGC", arg0) + ret0, _ := ret[0].([]market.DagstoreShardResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DagstoreGC indicates an expected call of DagstoreGC. +func (mr *MockIMarketMockRecorder) DagstoreGC(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DagstoreGC", reflect.TypeOf((*MockIMarket)(nil).DagstoreGC), arg0) +} + +// DagstoreInitializeAll mocks base method. +func (m *MockIMarket) DagstoreInitializeAll(arg0 context.Context, arg1 market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DagstoreInitializeAll", arg0, arg1) + ret0, _ := ret[0].(<-chan market.DagstoreInitializeAllEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DagstoreInitializeAll indicates an expected call of DagstoreInitializeAll. +func (mr *MockIMarketMockRecorder) DagstoreInitializeAll(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DagstoreInitializeAll", reflect.TypeOf((*MockIMarket)(nil).DagstoreInitializeAll), arg0, arg1) +} + +// DagstoreInitializeShard mocks base method. +func (m *MockIMarket) DagstoreInitializeShard(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DagstoreInitializeShard", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DagstoreInitializeShard indicates an expected call of DagstoreInitializeShard. +func (mr *MockIMarketMockRecorder) DagstoreInitializeShard(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DagstoreInitializeShard", reflect.TypeOf((*MockIMarket)(nil).DagstoreInitializeShard), arg0, arg1) +} + +// DagstoreInitializeStorage mocks base method. +func (m *MockIMarket) DagstoreInitializeStorage(arg0 context.Context, arg1 string, arg2 market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DagstoreInitializeStorage", arg0, arg1, arg2) + ret0, _ := ret[0].(<-chan market.DagstoreInitializeAllEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DagstoreInitializeStorage indicates an expected call of DagstoreInitializeStorage. +func (mr *MockIMarketMockRecorder) DagstoreInitializeStorage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DagstoreInitializeStorage", reflect.TypeOf((*MockIMarket)(nil).DagstoreInitializeStorage), arg0, arg1, arg2) +} + +// DagstoreListShards mocks base method. +func (m *MockIMarket) DagstoreListShards(arg0 context.Context) ([]market.DagstoreShardInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DagstoreListShards", arg0) + ret0, _ := ret[0].([]market.DagstoreShardInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DagstoreListShards indicates an expected call of DagstoreListShards. +func (mr *MockIMarketMockRecorder) DagstoreListShards(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DagstoreListShards", reflect.TypeOf((*MockIMarket)(nil).DagstoreListShards), arg0) +} + +// DagstoreRecoverShard mocks base method. +func (m *MockIMarket) DagstoreRecoverShard(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DagstoreRecoverShard", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DagstoreRecoverShard indicates an expected call of DagstoreRecoverShard. +func (mr *MockIMarketMockRecorder) DagstoreRecoverShard(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DagstoreRecoverShard", reflect.TypeOf((*MockIMarket)(nil).DagstoreRecoverShard), arg0, arg1) +} + +// DealsConsiderOfflineRetrievalDeals mocks base method. +func (m *MockIMarket) DealsConsiderOfflineRetrievalDeals(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsConsiderOfflineRetrievalDeals", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsConsiderOfflineRetrievalDeals indicates an expected call of DealsConsiderOfflineRetrievalDeals. +func (mr *MockIMarketMockRecorder) DealsConsiderOfflineRetrievalDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsConsiderOfflineRetrievalDeals", reflect.TypeOf((*MockIMarket)(nil).DealsConsiderOfflineRetrievalDeals), arg0, arg1) +} + +// DealsConsiderOfflineStorageDeals mocks base method. +func (m *MockIMarket) DealsConsiderOfflineStorageDeals(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsConsiderOfflineStorageDeals", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsConsiderOfflineStorageDeals indicates an expected call of DealsConsiderOfflineStorageDeals. +func (mr *MockIMarketMockRecorder) DealsConsiderOfflineStorageDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsConsiderOfflineStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsConsiderOfflineStorageDeals), arg0, arg1) +} + +// DealsConsiderOnlineRetrievalDeals mocks base method. +func (m *MockIMarket) DealsConsiderOnlineRetrievalDeals(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsConsiderOnlineRetrievalDeals", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsConsiderOnlineRetrievalDeals indicates an expected call of DealsConsiderOnlineRetrievalDeals. +func (mr *MockIMarketMockRecorder) DealsConsiderOnlineRetrievalDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsConsiderOnlineRetrievalDeals", reflect.TypeOf((*MockIMarket)(nil).DealsConsiderOnlineRetrievalDeals), arg0, arg1) +} + +// DealsConsiderOnlineStorageDeals mocks base method. +func (m *MockIMarket) DealsConsiderOnlineStorageDeals(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsConsiderOnlineStorageDeals", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsConsiderOnlineStorageDeals indicates an expected call of DealsConsiderOnlineStorageDeals. +func (mr *MockIMarketMockRecorder) DealsConsiderOnlineStorageDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsConsiderOnlineStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsConsiderOnlineStorageDeals), arg0, arg1) +} + +// DealsConsiderUnverifiedStorageDeals mocks base method. +func (m *MockIMarket) DealsConsiderUnverifiedStorageDeals(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsConsiderUnverifiedStorageDeals", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsConsiderUnverifiedStorageDeals indicates an expected call of DealsConsiderUnverifiedStorageDeals. +func (mr *MockIMarketMockRecorder) DealsConsiderUnverifiedStorageDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsConsiderUnverifiedStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsConsiderUnverifiedStorageDeals), arg0, arg1) +} + +// DealsConsiderVerifiedStorageDeals mocks base method. +func (m *MockIMarket) DealsConsiderVerifiedStorageDeals(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsConsiderVerifiedStorageDeals", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsConsiderVerifiedStorageDeals indicates an expected call of DealsConsiderVerifiedStorageDeals. +func (mr *MockIMarketMockRecorder) DealsConsiderVerifiedStorageDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsConsiderVerifiedStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsConsiderVerifiedStorageDeals), arg0, arg1) +} + +// DealsImportData mocks base method. +func (m *MockIMarket) DealsImportData(arg0 context.Context, arg1 cid.Cid, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsImportData", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsImportData indicates an expected call of DealsImportData. +func (mr *MockIMarketMockRecorder) DealsImportData(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsImportData", reflect.TypeOf((*MockIMarket)(nil).DealsImportData), arg0, arg1, arg2) +} + +// DealsMaxProviderCollateralMultiplier mocks base method. +func (m *MockIMarket) DealsMaxProviderCollateralMultiplier(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsMaxProviderCollateralMultiplier", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsMaxProviderCollateralMultiplier indicates an expected call of DealsMaxProviderCollateralMultiplier. +func (mr *MockIMarketMockRecorder) DealsMaxProviderCollateralMultiplier(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsMaxProviderCollateralMultiplier", reflect.TypeOf((*MockIMarket)(nil).DealsMaxProviderCollateralMultiplier), arg0, arg1) +} + +// DealsMaxPublishFee mocks base method. +func (m *MockIMarket) DealsMaxPublishFee(arg0 context.Context, arg1 address.Address) (internal.FIL, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsMaxPublishFee", arg0, arg1) + ret0, _ := ret[0].(internal.FIL) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsMaxPublishFee indicates an expected call of DealsMaxPublishFee. +func (mr *MockIMarketMockRecorder) DealsMaxPublishFee(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsMaxPublishFee", reflect.TypeOf((*MockIMarket)(nil).DealsMaxPublishFee), arg0, arg1) +} + +// DealsMaxStartDelay mocks base method. +func (m *MockIMarket) DealsMaxStartDelay(arg0 context.Context, arg1 address.Address) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsMaxStartDelay", arg0, arg1) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsMaxStartDelay indicates an expected call of DealsMaxStartDelay. +func (mr *MockIMarketMockRecorder) DealsMaxStartDelay(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsMaxStartDelay", reflect.TypeOf((*MockIMarket)(nil).DealsMaxStartDelay), arg0, arg1) +} + +// DealsPieceCidBlocklist mocks base method. +func (m *MockIMarket) DealsPieceCidBlocklist(arg0 context.Context, arg1 address.Address) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsPieceCidBlocklist", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsPieceCidBlocklist indicates an expected call of DealsPieceCidBlocklist. +func (mr *MockIMarketMockRecorder) DealsPieceCidBlocklist(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsPieceCidBlocklist", reflect.TypeOf((*MockIMarket)(nil).DealsPieceCidBlocklist), arg0, arg1) +} + +// DealsPublishMsgPeriod mocks base method. +func (m *MockIMarket) DealsPublishMsgPeriod(arg0 context.Context, arg1 address.Address) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsPublishMsgPeriod", arg0, arg1) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DealsPublishMsgPeriod indicates an expected call of DealsPublishMsgPeriod. +func (mr *MockIMarketMockRecorder) DealsPublishMsgPeriod(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsPublishMsgPeriod", reflect.TypeOf((*MockIMarket)(nil).DealsPublishMsgPeriod), arg0, arg1) +} + +// DealsSetConsiderOfflineRetrievalDeals mocks base method. +func (m *MockIMarket) DealsSetConsiderOfflineRetrievalDeals(arg0 context.Context, arg1 address.Address, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetConsiderOfflineRetrievalDeals", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetConsiderOfflineRetrievalDeals indicates an expected call of DealsSetConsiderOfflineRetrievalDeals. +func (mr *MockIMarketMockRecorder) DealsSetConsiderOfflineRetrievalDeals(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetConsiderOfflineRetrievalDeals", reflect.TypeOf((*MockIMarket)(nil).DealsSetConsiderOfflineRetrievalDeals), arg0, arg1, arg2) +} + +// DealsSetConsiderOfflineStorageDeals mocks base method. +func (m *MockIMarket) DealsSetConsiderOfflineStorageDeals(arg0 context.Context, arg1 address.Address, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetConsiderOfflineStorageDeals", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetConsiderOfflineStorageDeals indicates an expected call of DealsSetConsiderOfflineStorageDeals. +func (mr *MockIMarketMockRecorder) DealsSetConsiderOfflineStorageDeals(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetConsiderOfflineStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsSetConsiderOfflineStorageDeals), arg0, arg1, arg2) +} + +// DealsSetConsiderOnlineRetrievalDeals mocks base method. +func (m *MockIMarket) DealsSetConsiderOnlineRetrievalDeals(arg0 context.Context, arg1 address.Address, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetConsiderOnlineRetrievalDeals", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetConsiderOnlineRetrievalDeals indicates an expected call of DealsSetConsiderOnlineRetrievalDeals. +func (mr *MockIMarketMockRecorder) DealsSetConsiderOnlineRetrievalDeals(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetConsiderOnlineRetrievalDeals", reflect.TypeOf((*MockIMarket)(nil).DealsSetConsiderOnlineRetrievalDeals), arg0, arg1, arg2) +} + +// DealsSetConsiderOnlineStorageDeals mocks base method. +func (m *MockIMarket) DealsSetConsiderOnlineStorageDeals(arg0 context.Context, arg1 address.Address, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetConsiderOnlineStorageDeals", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetConsiderOnlineStorageDeals indicates an expected call of DealsSetConsiderOnlineStorageDeals. +func (mr *MockIMarketMockRecorder) DealsSetConsiderOnlineStorageDeals(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetConsiderOnlineStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsSetConsiderOnlineStorageDeals), arg0, arg1, arg2) +} + +// DealsSetConsiderUnverifiedStorageDeals mocks base method. +func (m *MockIMarket) DealsSetConsiderUnverifiedStorageDeals(arg0 context.Context, arg1 address.Address, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetConsiderUnverifiedStorageDeals", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetConsiderUnverifiedStorageDeals indicates an expected call of DealsSetConsiderUnverifiedStorageDeals. +func (mr *MockIMarketMockRecorder) DealsSetConsiderUnverifiedStorageDeals(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetConsiderUnverifiedStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsSetConsiderUnverifiedStorageDeals), arg0, arg1, arg2) +} + +// DealsSetConsiderVerifiedStorageDeals mocks base method. +func (m *MockIMarket) DealsSetConsiderVerifiedStorageDeals(arg0 context.Context, arg1 address.Address, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetConsiderVerifiedStorageDeals", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetConsiderVerifiedStorageDeals indicates an expected call of DealsSetConsiderVerifiedStorageDeals. +func (mr *MockIMarketMockRecorder) DealsSetConsiderVerifiedStorageDeals(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetConsiderVerifiedStorageDeals", reflect.TypeOf((*MockIMarket)(nil).DealsSetConsiderVerifiedStorageDeals), arg0, arg1, arg2) +} + +// DealsSetMaxProviderCollateralMultiplier mocks base method. +func (m *MockIMarket) DealsSetMaxProviderCollateralMultiplier(arg0 context.Context, arg1 address.Address, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetMaxProviderCollateralMultiplier", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetMaxProviderCollateralMultiplier indicates an expected call of DealsSetMaxProviderCollateralMultiplier. +func (mr *MockIMarketMockRecorder) DealsSetMaxProviderCollateralMultiplier(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetMaxProviderCollateralMultiplier", reflect.TypeOf((*MockIMarket)(nil).DealsSetMaxProviderCollateralMultiplier), arg0, arg1, arg2) +} + +// DealsSetMaxPublishFee mocks base method. +func (m *MockIMarket) DealsSetMaxPublishFee(arg0 context.Context, arg1 address.Address, arg2 internal.FIL) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetMaxPublishFee", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetMaxPublishFee indicates an expected call of DealsSetMaxPublishFee. +func (mr *MockIMarketMockRecorder) DealsSetMaxPublishFee(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetMaxPublishFee", reflect.TypeOf((*MockIMarket)(nil).DealsSetMaxPublishFee), arg0, arg1, arg2) +} + +// DealsSetMaxStartDelay mocks base method. +func (m *MockIMarket) DealsSetMaxStartDelay(arg0 context.Context, arg1 address.Address, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetMaxStartDelay", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetMaxStartDelay indicates an expected call of DealsSetMaxStartDelay. +func (mr *MockIMarketMockRecorder) DealsSetMaxStartDelay(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetMaxStartDelay", reflect.TypeOf((*MockIMarket)(nil).DealsSetMaxStartDelay), arg0, arg1, arg2) +} + +// DealsSetPieceCidBlocklist mocks base method. +func (m *MockIMarket) DealsSetPieceCidBlocklist(arg0 context.Context, arg1 address.Address, arg2 []cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetPieceCidBlocklist", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetPieceCidBlocklist indicates an expected call of DealsSetPieceCidBlocklist. +func (mr *MockIMarketMockRecorder) DealsSetPieceCidBlocklist(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetPieceCidBlocklist", reflect.TypeOf((*MockIMarket)(nil).DealsSetPieceCidBlocklist), arg0, arg1, arg2) +} + +// DealsSetPublishMsgPeriod mocks base method. +func (m *MockIMarket) DealsSetPublishMsgPeriod(arg0 context.Context, arg1 address.Address, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DealsSetPublishMsgPeriod", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DealsSetPublishMsgPeriod indicates an expected call of DealsSetPublishMsgPeriod. +func (mr *MockIMarketMockRecorder) DealsSetPublishMsgPeriod(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DealsSetPublishMsgPeriod", reflect.TypeOf((*MockIMarket)(nil).DealsSetPublishMsgPeriod), arg0, arg1, arg2) +} + +// GetDeals mocks base method. +func (m *MockIMarket) GetDeals(arg0 context.Context, arg1 address.Address, arg2, arg3 int) ([]*market.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeals", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*market.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeals indicates an expected call of GetDeals. +func (mr *MockIMarketMockRecorder) GetDeals(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeals", reflect.TypeOf((*MockIMarket)(nil).GetDeals), arg0, arg1, arg2, arg3) +} + +// GetRetrievalDealStatistic mocks base method. +func (m *MockIMarket) GetRetrievalDealStatistic(arg0 context.Context, arg1 address.Address) (*market.RetrievalDealStatistic, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRetrievalDealStatistic", arg0, arg1) + ret0, _ := ret[0].(*market.RetrievalDealStatistic) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRetrievalDealStatistic indicates an expected call of GetRetrievalDealStatistic. +func (mr *MockIMarketMockRecorder) GetRetrievalDealStatistic(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRetrievalDealStatistic", reflect.TypeOf((*MockIMarket)(nil).GetRetrievalDealStatistic), arg0, arg1) +} + +// GetStorageDealStatistic mocks base method. +func (m *MockIMarket) GetStorageDealStatistic(arg0 context.Context, arg1 address.Address) (*market.StorageDealStatistic, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStorageDealStatistic", arg0, arg1) + ret0, _ := ret[0].(*market.StorageDealStatistic) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStorageDealStatistic indicates an expected call of GetStorageDealStatistic. +func (mr *MockIMarketMockRecorder) GetStorageDealStatistic(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStorageDealStatistic", reflect.TypeOf((*MockIMarket)(nil).GetStorageDealStatistic), arg0, arg1) +} + +// GetUnPackedDeals mocks base method. +func (m *MockIMarket) GetUnPackedDeals(arg0 context.Context, arg1 address.Address, arg2 *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUnPackedDeals", arg0, arg1, arg2) + ret0, _ := ret[0].([]*market.DealInfoIncludePath) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUnPackedDeals indicates an expected call of GetUnPackedDeals. +func (mr *MockIMarketMockRecorder) GetUnPackedDeals(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnPackedDeals", reflect.TypeOf((*MockIMarket)(nil).GetUnPackedDeals), arg0, arg1, arg2) +} + +// ID mocks base method. +func (m *MockIMarket) ID(arg0 context.Context) (peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID", arg0) + ret0, _ := ret[0].(peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ID indicates an expected call of ID. +func (mr *MockIMarketMockRecorder) ID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockIMarket)(nil).ID), arg0) +} + +// ImportV1Data mocks base method. +func (m *MockIMarket) ImportV1Data(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImportV1Data", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ImportV1Data indicates an expected call of ImportV1Data. +func (mr *MockIMarketMockRecorder) ImportV1Data(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportV1Data", reflect.TypeOf((*MockIMarket)(nil).ImportV1Data), arg0, arg1) +} + +// ListPieceStorageInfos mocks base method. +func (m *MockIMarket) ListPieceStorageInfos(arg0 context.Context) market.PieceStorageInfos { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPieceStorageInfos", arg0) + ret0, _ := ret[0].(market.PieceStorageInfos) + return ret0 +} + +// ListPieceStorageInfos indicates an expected call of ListPieceStorageInfos. +func (mr *MockIMarketMockRecorder) ListPieceStorageInfos(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPieceStorageInfos", reflect.TypeOf((*MockIMarket)(nil).ListPieceStorageInfos), arg0) +} + +// ListenMarketEvent mocks base method. +func (m *MockIMarket) ListenMarketEvent(arg0 context.Context, arg1 *gateway.MarketRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListenMarketEvent", arg0, arg1) + ret0, _ := ret[0].(<-chan *gateway.RequestEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListenMarketEvent indicates an expected call of ListenMarketEvent. +func (mr *MockIMarketMockRecorder) ListenMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenMarketEvent", reflect.TypeOf((*MockIMarket)(nil).ListenMarketEvent), arg0, arg1) +} + +// MarkDealsAsPacking mocks base method. +func (m *MockIMarket) MarkDealsAsPacking(arg0 context.Context, arg1 address.Address, arg2 []abi.DealID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkDealsAsPacking", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkDealsAsPacking indicates an expected call of MarkDealsAsPacking. +func (mr *MockIMarketMockRecorder) MarkDealsAsPacking(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkDealsAsPacking", reflect.TypeOf((*MockIMarket)(nil).MarkDealsAsPacking), arg0, arg1, arg2) +} + +// MarketAddBalance mocks base method. +func (m *MockIMarket) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketAddBalance indicates an expected call of MarketAddBalance. +func (mr *MockIMarketMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockIMarket)(nil).MarketAddBalance), arg0, arg1, arg2, arg3) +} + +// MarketCancelDataTransfer mocks base method. +func (m *MockIMarket) MarketCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketCancelDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketCancelDataTransfer indicates an expected call of MarketCancelDataTransfer. +func (mr *MockIMarketMockRecorder) MarketCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketCancelDataTransfer", reflect.TypeOf((*MockIMarket)(nil).MarketCancelDataTransfer), arg0, arg1, arg2, arg3) +} + +// MarketDataTransferPath mocks base method. +func (m *MockIMarket) MarketDataTransferPath(arg0 context.Context, arg1 address.Address) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketDataTransferPath", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketDataTransferPath indicates an expected call of MarketDataTransferPath. +func (mr *MockIMarketMockRecorder) MarketDataTransferPath(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketDataTransferPath", reflect.TypeOf((*MockIMarket)(nil).MarketDataTransferPath), arg0, arg1) +} + +// MarketDataTransferUpdates mocks base method. +func (m *MockIMarket) MarketDataTransferUpdates(arg0 context.Context) (<-chan market.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketDataTransferUpdates", arg0) + ret0, _ := ret[0].(<-chan market.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketDataTransferUpdates indicates an expected call of MarketDataTransferUpdates. +func (mr *MockIMarketMockRecorder) MarketDataTransferUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketDataTransferUpdates", reflect.TypeOf((*MockIMarket)(nil).MarketDataTransferUpdates), arg0) +} + +// MarketGetAsk mocks base method. +func (m *MockIMarket) MarketGetAsk(arg0 context.Context, arg1 address.Address) (*market.SignedStorageAsk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetAsk", arg0, arg1) + ret0, _ := ret[0].(*market.SignedStorageAsk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetAsk indicates an expected call of MarketGetAsk. +func (mr *MockIMarketMockRecorder) MarketGetAsk(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetAsk", reflect.TypeOf((*MockIMarket)(nil).MarketGetAsk), arg0, arg1) +} + +// MarketGetDealUpdates mocks base method. +func (m *MockIMarket) MarketGetDealUpdates(arg0 context.Context) (<-chan market.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetDealUpdates", arg0) + ret0, _ := ret[0].(<-chan market.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetDealUpdates indicates an expected call of MarketGetDealUpdates. +func (mr *MockIMarketMockRecorder) MarketGetDealUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetDealUpdates", reflect.TypeOf((*MockIMarket)(nil).MarketGetDealUpdates), arg0) +} + +// MarketGetReserved mocks base method. +func (m *MockIMarket) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetReserved indicates an expected call of MarketGetReserved. +func (mr *MockIMarketMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockIMarket)(nil).MarketGetReserved), arg0, arg1) +} + +// MarketGetRetrievalAsk mocks base method. +func (m *MockIMarket) MarketGetRetrievalAsk(arg0 context.Context, arg1 address.Address) (*retrievalmarket.Ask, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetRetrievalAsk", arg0, arg1) + ret0, _ := ret[0].(*retrievalmarket.Ask) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetRetrievalAsk indicates an expected call of MarketGetRetrievalAsk. +func (mr *MockIMarketMockRecorder) MarketGetRetrievalAsk(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetRetrievalAsk", reflect.TypeOf((*MockIMarket)(nil).MarketGetRetrievalAsk), arg0, arg1) +} + +// MarketImportDealData mocks base method. +func (m *MockIMarket) MarketImportDealData(arg0 context.Context, arg1 cid.Cid, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketImportDealData", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketImportDealData indicates an expected call of MarketImportDealData. +func (mr *MockIMarketMockRecorder) MarketImportDealData(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketImportDealData", reflect.TypeOf((*MockIMarket)(nil).MarketImportDealData), arg0, arg1, arg2) +} + +// MarketImportPublishedDeal mocks base method. +func (m *MockIMarket) MarketImportPublishedDeal(arg0 context.Context, arg1 market.MinerDeal) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketImportPublishedDeal", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketImportPublishedDeal indicates an expected call of MarketImportPublishedDeal. +func (mr *MockIMarketMockRecorder) MarketImportPublishedDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketImportPublishedDeal", reflect.TypeOf((*MockIMarket)(nil).MarketImportPublishedDeal), arg0, arg1) +} + +// MarketListAsk mocks base method. +func (m *MockIMarket) MarketListAsk(arg0 context.Context) ([]*market.SignedStorageAsk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketListAsk", arg0) + ret0, _ := ret[0].([]*market.SignedStorageAsk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketListAsk indicates an expected call of MarketListAsk. +func (mr *MockIMarketMockRecorder) MarketListAsk(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketListAsk", reflect.TypeOf((*MockIMarket)(nil).MarketListAsk), arg0) +} + +// MarketListDataTransfers mocks base method. +func (m *MockIMarket) MarketListDataTransfers(arg0 context.Context) ([]market.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketListDataTransfers", arg0) + ret0, _ := ret[0].([]market.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketListDataTransfers indicates an expected call of MarketListDataTransfers. +func (mr *MockIMarketMockRecorder) MarketListDataTransfers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketListDataTransfers", reflect.TypeOf((*MockIMarket)(nil).MarketListDataTransfers), arg0) +} + +// MarketListDeals mocks base method. +func (m *MockIMarket) MarketListDeals(arg0 context.Context, arg1 []address.Address) ([]*types.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketListDeals", arg0, arg1) + ret0, _ := ret[0].([]*types.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketListDeals indicates an expected call of MarketListDeals. +func (mr *MockIMarketMockRecorder) MarketListDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketListDeals", reflect.TypeOf((*MockIMarket)(nil).MarketListDeals), arg0, arg1) +} + +// MarketListIncompleteDeals mocks base method. +func (m *MockIMarket) MarketListIncompleteDeals(arg0 context.Context, arg1 address.Address) ([]market.MinerDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketListIncompleteDeals", arg0, arg1) + ret0, _ := ret[0].([]market.MinerDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketListIncompleteDeals indicates an expected call of MarketListIncompleteDeals. +func (mr *MockIMarketMockRecorder) MarketListIncompleteDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketListIncompleteDeals", reflect.TypeOf((*MockIMarket)(nil).MarketListIncompleteDeals), arg0, arg1) +} + +// MarketListRetrievalAsk mocks base method. +func (m *MockIMarket) MarketListRetrievalAsk(arg0 context.Context) ([]*market.RetrievalAsk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketListRetrievalAsk", arg0) + ret0, _ := ret[0].([]*market.RetrievalAsk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketListRetrievalAsk indicates an expected call of MarketListRetrievalAsk. +func (mr *MockIMarketMockRecorder) MarketListRetrievalAsk(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketListRetrievalAsk", reflect.TypeOf((*MockIMarket)(nil).MarketListRetrievalAsk), arg0) +} + +// MarketListRetrievalDeals mocks base method. +func (m *MockIMarket) MarketListRetrievalDeals(arg0 context.Context) ([]market.ProviderDealState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketListRetrievalDeals", arg0) + ret0, _ := ret[0].([]market.ProviderDealState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketListRetrievalDeals indicates an expected call of MarketListRetrievalDeals. +func (mr *MockIMarketMockRecorder) MarketListRetrievalDeals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketListRetrievalDeals", reflect.TypeOf((*MockIMarket)(nil).MarketListRetrievalDeals), arg0) +} + +// MarketMaxBalanceAddFee mocks base method. +func (m *MockIMarket) MarketMaxBalanceAddFee(arg0 context.Context, arg1 address.Address) (internal.FIL, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketMaxBalanceAddFee", arg0, arg1) + ret0, _ := ret[0].(internal.FIL) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketMaxBalanceAddFee indicates an expected call of MarketMaxBalanceAddFee. +func (mr *MockIMarketMockRecorder) MarketMaxBalanceAddFee(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketMaxBalanceAddFee", reflect.TypeOf((*MockIMarket)(nil).MarketMaxBalanceAddFee), arg0, arg1) +} + +// MarketMaxDealsPerPublishMsg mocks base method. +func (m *MockIMarket) MarketMaxDealsPerPublishMsg(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketMaxDealsPerPublishMsg", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketMaxDealsPerPublishMsg indicates an expected call of MarketMaxDealsPerPublishMsg. +func (mr *MockIMarketMockRecorder) MarketMaxDealsPerPublishMsg(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketMaxDealsPerPublishMsg", reflect.TypeOf((*MockIMarket)(nil).MarketMaxDealsPerPublishMsg), arg0, arg1) +} + +// MarketPendingDeals mocks base method. +func (m *MockIMarket) MarketPendingDeals(arg0 context.Context) ([]market.PendingDealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketPendingDeals", arg0) + ret0, _ := ret[0].([]market.PendingDealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketPendingDeals indicates an expected call of MarketPendingDeals. +func (mr *MockIMarketMockRecorder) MarketPendingDeals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketPendingDeals", reflect.TypeOf((*MockIMarket)(nil).MarketPendingDeals), arg0) +} + +// MarketPublishPendingDeals mocks base method. +func (m *MockIMarket) MarketPublishPendingDeals(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketPublishPendingDeals", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketPublishPendingDeals indicates an expected call of MarketPublishPendingDeals. +func (mr *MockIMarketMockRecorder) MarketPublishPendingDeals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketPublishPendingDeals", reflect.TypeOf((*MockIMarket)(nil).MarketPublishPendingDeals), arg0) +} + +// MarketReleaseFunds mocks base method. +func (m *MockIMarket) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketReleaseFunds indicates an expected call of MarketReleaseFunds. +func (mr *MockIMarketMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockIMarket)(nil).MarketReleaseFunds), arg0, arg1, arg2) +} + +// MarketReserveFunds mocks base method. +func (m *MockIMarket) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketReserveFunds indicates an expected call of MarketReserveFunds. +func (mr *MockIMarketMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockIMarket)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3) +} + +// MarketRestartDataTransfer mocks base method. +func (m *MockIMarket) MarketRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketRestartDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketRestartDataTransfer indicates an expected call of MarketRestartDataTransfer. +func (mr *MockIMarketMockRecorder) MarketRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketRestartDataTransfer", reflect.TypeOf((*MockIMarket)(nil).MarketRestartDataTransfer), arg0, arg1, arg2, arg3) +} + +// MarketSetAsk mocks base method. +func (m *MockIMarket) MarketSetAsk(arg0 context.Context, arg1 address.Address, arg2, arg3 big.Int, arg4 abi.ChainEpoch, arg5, arg6 abi.PaddedPieceSize) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketSetAsk", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketSetAsk indicates an expected call of MarketSetAsk. +func (mr *MockIMarketMockRecorder) MarketSetAsk(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketSetAsk", reflect.TypeOf((*MockIMarket)(nil).MarketSetAsk), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MarketSetDataTransferPath mocks base method. +func (m *MockIMarket) MarketSetDataTransferPath(arg0 context.Context, arg1 address.Address, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketSetDataTransferPath", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketSetDataTransferPath indicates an expected call of MarketSetDataTransferPath. +func (mr *MockIMarketMockRecorder) MarketSetDataTransferPath(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketSetDataTransferPath", reflect.TypeOf((*MockIMarket)(nil).MarketSetDataTransferPath), arg0, arg1, arg2) +} + +// MarketSetMaxBalanceAddFee mocks base method. +func (m *MockIMarket) MarketSetMaxBalanceAddFee(arg0 context.Context, arg1 address.Address, arg2 internal.FIL) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketSetMaxBalanceAddFee", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketSetMaxBalanceAddFee indicates an expected call of MarketSetMaxBalanceAddFee. +func (mr *MockIMarketMockRecorder) MarketSetMaxBalanceAddFee(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketSetMaxBalanceAddFee", reflect.TypeOf((*MockIMarket)(nil).MarketSetMaxBalanceAddFee), arg0, arg1, arg2) +} + +// MarketSetMaxDealsPerPublishMsg mocks base method. +func (m *MockIMarket) MarketSetMaxDealsPerPublishMsg(arg0 context.Context, arg1 address.Address, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketSetMaxDealsPerPublishMsg", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketSetMaxDealsPerPublishMsg indicates an expected call of MarketSetMaxDealsPerPublishMsg. +func (mr *MockIMarketMockRecorder) MarketSetMaxDealsPerPublishMsg(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketSetMaxDealsPerPublishMsg", reflect.TypeOf((*MockIMarket)(nil).MarketSetMaxDealsPerPublishMsg), arg0, arg1, arg2) +} + +// MarketSetRetrievalAsk mocks base method. +func (m *MockIMarket) MarketSetRetrievalAsk(arg0 context.Context, arg1 address.Address, arg2 *retrievalmarket.Ask) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketSetRetrievalAsk", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketSetRetrievalAsk indicates an expected call of MarketSetRetrievalAsk. +func (mr *MockIMarketMockRecorder) MarketSetRetrievalAsk(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketSetRetrievalAsk", reflect.TypeOf((*MockIMarket)(nil).MarketSetRetrievalAsk), arg0, arg1, arg2) +} + +// MarketWithdraw mocks base method. +func (m *MockIMarket) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketWithdraw indicates an expected call of MarketWithdraw. +func (mr *MockIMarketMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockIMarket)(nil).MarketWithdraw), arg0, arg1, arg2, arg3) +} + +// MessagerGetMessage mocks base method. +func (m *MockIMarket) MessagerGetMessage(arg0 context.Context, arg1 cid.Cid) (*internal.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerGetMessage", arg0, arg1) + ret0, _ := ret[0].(*internal.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerGetMessage indicates an expected call of MessagerGetMessage. +func (mr *MockIMarketMockRecorder) MessagerGetMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerGetMessage", reflect.TypeOf((*MockIMarket)(nil).MessagerGetMessage), arg0, arg1) +} + +// MessagerPushMessage mocks base method. +func (m *MockIMarket) MessagerPushMessage(arg0 context.Context, arg1 *internal.Message, arg2 *types.MessageSendSpec) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerPushMessage indicates an expected call of MessagerPushMessage. +func (mr *MockIMarketMockRecorder) MessagerPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerPushMessage", reflect.TypeOf((*MockIMarket)(nil).MessagerPushMessage), arg0, arg1, arg2) +} + +// MessagerWaitMessage mocks base method. +func (m *MockIMarket) MessagerWaitMessage(arg0 context.Context, arg1 cid.Cid) (*types.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessagerWaitMessage", arg0, arg1) + ret0, _ := ret[0].(*types.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessagerWaitMessage indicates an expected call of MessagerWaitMessage. +func (mr *MockIMarketMockRecorder) MessagerWaitMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessagerWaitMessage", reflect.TypeOf((*MockIMarket)(nil).MessagerWaitMessage), arg0, arg1) +} + +// NetAddrsListen mocks base method. +func (m *MockIMarket) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAddrsListen", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAddrsListen indicates an expected call of NetAddrsListen. +func (mr *MockIMarketMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockIMarket)(nil).NetAddrsListen), arg0) +} + +// OfflineDealImport mocks base method. +func (m *MockIMarket) OfflineDealImport(arg0 context.Context, arg1 market.MinerDeal) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OfflineDealImport", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// OfflineDealImport indicates an expected call of OfflineDealImport. +func (mr *MockIMarketMockRecorder) OfflineDealImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OfflineDealImport", reflect.TypeOf((*MockIMarket)(nil).OfflineDealImport), arg0, arg1) +} + +// PaychVoucherList mocks base method. +func (m *MockIMarket) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) + ret0, _ := ret[0].([]*paych.SignedVoucher) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherList indicates an expected call of PaychVoucherList. +func (mr *MockIMarketMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockIMarket)(nil).PaychVoucherList), arg0, arg1) +} + +// PiecesGetCIDInfo mocks base method. +func (m *MockIMarket) PiecesGetCIDInfo(arg0 context.Context, arg1 cid.Cid) (*piecestore.CIDInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PiecesGetCIDInfo", arg0, arg1) + ret0, _ := ret[0].(*piecestore.CIDInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PiecesGetCIDInfo indicates an expected call of PiecesGetCIDInfo. +func (mr *MockIMarketMockRecorder) PiecesGetCIDInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PiecesGetCIDInfo", reflect.TypeOf((*MockIMarket)(nil).PiecesGetCIDInfo), arg0, arg1) +} + +// PiecesGetPieceInfo mocks base method. +func (m *MockIMarket) PiecesGetPieceInfo(arg0 context.Context, arg1 cid.Cid) (*piecestore.PieceInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PiecesGetPieceInfo", arg0, arg1) + ret0, _ := ret[0].(*piecestore.PieceInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PiecesGetPieceInfo indicates an expected call of PiecesGetPieceInfo. +func (mr *MockIMarketMockRecorder) PiecesGetPieceInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PiecesGetPieceInfo", reflect.TypeOf((*MockIMarket)(nil).PiecesGetPieceInfo), arg0, arg1) +} + +// PiecesListCidInfos mocks base method. +func (m *MockIMarket) PiecesListCidInfos(arg0 context.Context) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PiecesListCidInfos", arg0) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PiecesListCidInfos indicates an expected call of PiecesListCidInfos. +func (mr *MockIMarketMockRecorder) PiecesListCidInfos(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PiecesListCidInfos", reflect.TypeOf((*MockIMarket)(nil).PiecesListCidInfos), arg0) +} + +// PiecesListPieces mocks base method. +func (m *MockIMarket) PiecesListPieces(arg0 context.Context) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PiecesListPieces", arg0) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PiecesListPieces indicates an expected call of PiecesListPieces. +func (mr *MockIMarketMockRecorder) PiecesListPieces(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PiecesListPieces", reflect.TypeOf((*MockIMarket)(nil).PiecesListPieces), arg0) +} + +// RemovePieceStorage mocks base method. +func (m *MockIMarket) RemovePieceStorage(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemovePieceStorage", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemovePieceStorage indicates an expected call of RemovePieceStorage. +func (mr *MockIMarketMockRecorder) RemovePieceStorage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePieceStorage", reflect.TypeOf((*MockIMarket)(nil).RemovePieceStorage), arg0, arg1) +} + +// ResponseMarketEvent mocks base method. +func (m *MockIMarket) ResponseMarketEvent(arg0 context.Context, arg1 *gateway.ResponseEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResponseMarketEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResponseMarketEvent indicates an expected call of ResponseMarketEvent. +func (mr *MockIMarketMockRecorder) ResponseMarketEvent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResponseMarketEvent", reflect.TypeOf((*MockIMarket)(nil).ResponseMarketEvent), arg0, arg1) +} + +// SectorGetExpectedSealDuration mocks base method. +func (m *MockIMarket) SectorGetExpectedSealDuration(arg0 context.Context, arg1 address.Address) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SectorGetExpectedSealDuration", arg0, arg1) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SectorGetExpectedSealDuration indicates an expected call of SectorGetExpectedSealDuration. +func (mr *MockIMarketMockRecorder) SectorGetExpectedSealDuration(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SectorGetExpectedSealDuration", reflect.TypeOf((*MockIMarket)(nil).SectorGetExpectedSealDuration), arg0, arg1) +} + +// SectorSetExpectedSealDuration mocks base method. +func (m *MockIMarket) SectorSetExpectedSealDuration(arg0 context.Context, arg1 address.Address, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SectorSetExpectedSealDuration", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SectorSetExpectedSealDuration indicates an expected call of SectorSetExpectedSealDuration. +func (mr *MockIMarketMockRecorder) SectorSetExpectedSealDuration(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SectorSetExpectedSealDuration", reflect.TypeOf((*MockIMarket)(nil).SectorSetExpectedSealDuration), arg0, arg1, arg2) +} + +// UpdateDealOnPacking mocks base method. +func (m *MockIMarket) UpdateDealOnPacking(arg0 context.Context, arg1 address.Address, arg2 abi.DealID, arg3 abi.SectorNumber, arg4 abi.PaddedPieceSize) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateDealOnPacking", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateDealOnPacking indicates an expected call of UpdateDealOnPacking. +func (mr *MockIMarketMockRecorder) UpdateDealOnPacking(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDealOnPacking", reflect.TypeOf((*MockIMarket)(nil).UpdateDealOnPacking), arg0, arg1, arg2, arg3, arg4) +} + +// UpdateDealStatus mocks base method. +func (m *MockIMarket) UpdateDealStatus(arg0 context.Context, arg1 address.Address, arg2 abi.DealID, arg3 market.PieceStatus) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateDealStatus", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateDealStatus indicates an expected call of UpdateDealStatus. +func (mr *MockIMarketMockRecorder) UpdateDealStatus(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDealStatus", reflect.TypeOf((*MockIMarket)(nil).UpdateDealStatus), arg0, arg1, arg2, arg3) +} + +// UpdateStorageDealStatus mocks base method. +func (m *MockIMarket) UpdateStorageDealStatus(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 market.PieceStatus) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateStorageDealStatus", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateStorageDealStatus indicates an expected call of UpdateStorageDealStatus. +func (mr *MockIMarketMockRecorder) UpdateStorageDealStatus(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStorageDealStatus", reflect.TypeOf((*MockIMarket)(nil).UpdateStorageDealStatus), arg0, arg1, arg2, arg3) +} + +// Version mocks base method. +func (m *MockIMarket) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockIMarketMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockIMarket)(nil).Version), arg0) +} diff --git a/venus-shared/api/market/proxy_gen.go b/venus-shared/api/market/proxy_gen.go new file mode 100644 index 0000000000..68285f839b --- /dev/null +++ b/venus-shared/api/market/proxy_gen.go @@ -0,0 +1,389 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package market + +import ( + "context" + "time" + + address "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/gateway" + "github.com/filecoin-project/venus/venus-shared/types/market" +) + +type IMarketStruct struct { + Internal struct { + ActorExist func(ctx context.Context, addr address.Address) (bool, error) `perm:"read"` + ActorList func(context.Context) ([]market.User, error) `perm:"read"` + ActorSectorSize func(context.Context, address.Address) (abi.SectorSize, error) `perm:"read"` + AddFsPieceStorage func(ctx context.Context, name string, path string, readonly bool) error `perm:"admin"` + AddS3PieceStorage func(ctx context.Context, name, endpoit, bucket, subdir, accessKey, secretKey, token string, readonly bool) error `perm:"admin"` + AssignUnPackedDeals func(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, spec *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) `perm:"write"` + DagstoreGC func(ctx context.Context) ([]market.DagstoreShardResult, error) `perm:"admin"` + DagstoreInitializeAll func(ctx context.Context, params market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) `perm:"admin"` + DagstoreInitializeShard func(ctx context.Context, key string) error `perm:"admin"` + DagstoreInitializeStorage func(context.Context, string, market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) `perm:"admin"` + DagstoreListShards func(ctx context.Context) ([]market.DagstoreShardInfo, error) `perm:"admin"` + DagstoreRecoverShard func(ctx context.Context, key string) error `perm:"admin"` + DealsConsiderOfflineRetrievalDeals func(context.Context, address.Address) (bool, error) `perm:"read"` + DealsConsiderOfflineStorageDeals func(context.Context, address.Address) (bool, error) `perm:"read"` + DealsConsiderOnlineRetrievalDeals func(context.Context, address.Address) (bool, error) `perm:"read"` + DealsConsiderOnlineStorageDeals func(context.Context, address.Address) (bool, error) `perm:"read"` + DealsConsiderUnverifiedStorageDeals func(context.Context, address.Address) (bool, error) `perm:"read"` + DealsConsiderVerifiedStorageDeals func(context.Context, address.Address) (bool, error) `perm:"read"` + DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"admin"` + DealsMaxProviderCollateralMultiplier func(context.Context, address.Address) (uint64, error) `perm:"read"` + DealsMaxPublishFee func(context.Context, address.Address) (types.FIL, error) `perm:"read"` + DealsMaxStartDelay func(context.Context, address.Address) (time.Duration, error) `perm:"read"` + DealsPieceCidBlocklist func(context.Context, address.Address) ([]cid.Cid, error) `perm:"read"` + DealsPublishMsgPeriod func(context.Context, address.Address) (time.Duration, error) `perm:"read"` + DealsSetConsiderOfflineRetrievalDeals func(context.Context, address.Address, bool) error `perm:"write"` + DealsSetConsiderOfflineStorageDeals func(context.Context, address.Address, bool) error `perm:"write"` + DealsSetConsiderOnlineRetrievalDeals func(context.Context, address.Address, bool) error `perm:"write"` + DealsSetConsiderOnlineStorageDeals func(context.Context, address.Address, bool) error `perm:"write"` + DealsSetConsiderUnverifiedStorageDeals func(context.Context, address.Address, bool) error `perm:"write"` + DealsSetConsiderVerifiedStorageDeals func(context.Context, address.Address, bool) error `perm:"write"` + DealsSetMaxProviderCollateralMultiplier func(context.Context, address.Address, uint64) error `perm:"write"` + DealsSetMaxPublishFee func(context.Context, address.Address, types.FIL) error `perm:"write"` + DealsSetMaxStartDelay func(context.Context, address.Address, time.Duration) error `perm:"write"` + DealsSetPieceCidBlocklist func(context.Context, address.Address, []cid.Cid) error `perm:"write"` + DealsSetPublishMsgPeriod func(context.Context, address.Address, time.Duration) error `perm:"write"` + GetDeals func(ctx context.Context, miner address.Address, pageIndex, pageSize int) ([]*market.DealInfo, error) `perm:"read"` + GetRetrievalDealStatistic func(ctx context.Context, miner address.Address) (*market.RetrievalDealStatistic, error) `perm:"read"` + GetStorageDealStatistic func(ctx context.Context, miner address.Address) (*market.StorageDealStatistic, error) `perm:"read"` + GetUnPackedDeals func(ctx context.Context, miner address.Address, spec *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) `perm:"read"` + ID func(context.Context) (peer.ID, error) `perm:"read"` + ImportV1Data func(ctx context.Context, src string) error `perm:"write"` + ListPieceStorageInfos func(ctx context.Context) market.PieceStorageInfos `perm:"read"` + ListenMarketEvent func(ctx context.Context, policy *gateway.MarketRegisterPolicy) (<-chan *gateway.RequestEvent, error) `perm:"read"` + MarkDealsAsPacking func(ctx context.Context, miner address.Address, deals []abi.DealID) error `perm:"write"` + MarketAddBalance func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"` + MarketCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` + MarketDataTransferPath func(context.Context, address.Address) (string, error) `perm:"admin"` + MarketDataTransferUpdates func(ctx context.Context) (<-chan market.DataTransferChannel, error) `perm:"write"` + MarketGetAsk func(ctx context.Context, mAddr address.Address) (*market.SignedStorageAsk, error) `perm:"read"` + MarketGetDealUpdates func(ctx context.Context) (<-chan market.MinerDeal, error) `perm:"read"` + MarketGetReserved func(ctx context.Context, addr address.Address) (types.BigInt, error) `perm:"sign"` + MarketGetRetrievalAsk func(ctx context.Context, mAddr address.Address) (*retrievalmarket.Ask, error) `perm:"read"` + MarketImportDealData func(ctx context.Context, propcid cid.Cid, path string) error `perm:"write"` + MarketImportPublishedDeal func(ctx context.Context, deal market.MinerDeal) error `perm:"write"` + MarketListAsk func(ctx context.Context) ([]*market.SignedStorageAsk, error) `perm:"read"` + MarketListDataTransfers func(ctx context.Context) ([]market.DataTransferChannel, error) `perm:"write"` + MarketListDeals func(ctx context.Context, addrs []address.Address) ([]*types.MarketDeal, error) `perm:"read"` + MarketListIncompleteDeals func(ctx context.Context, mAddr address.Address) ([]market.MinerDeal, error) `perm:"read"` + MarketListRetrievalAsk func(ctx context.Context) ([]*market.RetrievalAsk, error) `perm:"read"` + MarketListRetrievalDeals func(ctx context.Context) ([]market.ProviderDealState, error) `perm:"read"` + MarketMaxBalanceAddFee func(context.Context, address.Address) (types.FIL, error) `perm:"read"` + MarketMaxDealsPerPublishMsg func(context.Context, address.Address) (uint64, error) `perm:"read"` + MarketPendingDeals func(ctx context.Context) ([]market.PendingDealInfo, error) `perm:"write"` + MarketPublishPendingDeals func(ctx context.Context) error `perm:"admin"` + MarketReleaseFunds func(ctx context.Context, addr address.Address, amt types.BigInt) error `perm:"sign"` + MarketReserveFunds func(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"` + MarketRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` + MarketSetAsk func(ctx context.Context, mAddr address.Address, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"` + MarketSetDataTransferPath func(context.Context, address.Address, string) error `perm:"admin"` + MarketSetMaxBalanceAddFee func(context.Context, address.Address, types.FIL) error `perm:"write"` + MarketSetMaxDealsPerPublishMsg func(context.Context, address.Address, uint64) error `perm:"write"` + MarketSetRetrievalAsk func(ctx context.Context, mAddr address.Address, rask *retrievalmarket.Ask) error `perm:"admin"` + MarketWithdraw func(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) `perm:"sign"` + MessagerGetMessage func(ctx context.Context, mid cid.Cid) (*types.Message, error) `perm:"read"` + MessagerPushMessage func(ctx context.Context, msg *types.Message, meta *types.MessageSendSpec) (cid.Cid, error) `perm:"write"` + MessagerWaitMessage func(ctx context.Context, mid cid.Cid) (*types.MsgLookup, error) `perm:"read"` + NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"` + OfflineDealImport func(ctx context.Context, deal market.MinerDeal) error `perm:"admin"` + PaychVoucherList func(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) `perm:"read"` + PiecesGetCIDInfo func(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"` + PiecesGetPieceInfo func(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"` + PiecesListCidInfos func(ctx context.Context) ([]cid.Cid, error) `perm:"read"` + PiecesListPieces func(ctx context.Context) ([]cid.Cid, error) `perm:"read"` + RemovePieceStorage func(ctx context.Context, name string) error `perm:"admin"` + ResponseMarketEvent func(ctx context.Context, resp *gateway.ResponseEvent) error `perm:"read"` + SectorGetExpectedSealDuration func(context.Context, address.Address) (time.Duration, error) `perm:"read"` + SectorSetExpectedSealDuration func(context.Context, address.Address, time.Duration) error `perm:"write"` + UpdateDealOnPacking func(ctx context.Context, miner address.Address, dealID abi.DealID, sectorid abi.SectorNumber, offset abi.PaddedPieceSize) error `perm:"write"` + UpdateDealStatus func(ctx context.Context, miner address.Address, dealID abi.DealID, pieceStatus market.PieceStatus) error `perm:"write"` + UpdateStorageDealStatus func(ctx context.Context, dealProposalCid cid.Cid, state storagemarket.StorageDealStatus, pieceState market.PieceStatus) error `perm:"write"` + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *IMarketStruct) ActorExist(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.ActorExist(p0, p1) +} +func (s *IMarketStruct) ActorList(p0 context.Context) ([]market.User, error) { + return s.Internal.ActorList(p0) +} +func (s *IMarketStruct) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) { + return s.Internal.ActorSectorSize(p0, p1) +} +func (s *IMarketStruct) AddFsPieceStorage(p0 context.Context, p1 string, p2 string, p3 bool) error { + return s.Internal.AddFsPieceStorage(p0, p1, p2, p3) +} +func (s *IMarketStruct) AddS3PieceStorage(p0 context.Context, p1, p2, p3, p4, p5, p6, p7 string, p8 bool) error { + return s.Internal.AddS3PieceStorage(p0, p1, p2, p3, p4, p5, p6, p7, p8) +} +func (s *IMarketStruct) AssignUnPackedDeals(p0 context.Context, p1 abi.SectorID, p2 abi.SectorSize, p3 *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) { + return s.Internal.AssignUnPackedDeals(p0, p1, p2, p3) +} +func (s *IMarketStruct) DagstoreGC(p0 context.Context) ([]market.DagstoreShardResult, error) { + return s.Internal.DagstoreGC(p0) +} +func (s *IMarketStruct) DagstoreInitializeAll(p0 context.Context, p1 market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) { + return s.Internal.DagstoreInitializeAll(p0, p1) +} +func (s *IMarketStruct) DagstoreInitializeShard(p0 context.Context, p1 string) error { + return s.Internal.DagstoreInitializeShard(p0, p1) +} +func (s *IMarketStruct) DagstoreInitializeStorage(p0 context.Context, p1 string, p2 market.DagstoreInitializeAllParams) (<-chan market.DagstoreInitializeAllEvent, error) { + return s.Internal.DagstoreInitializeStorage(p0, p1, p2) +} +func (s *IMarketStruct) DagstoreListShards(p0 context.Context) ([]market.DagstoreShardInfo, error) { + return s.Internal.DagstoreListShards(p0) +} +func (s *IMarketStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error { + return s.Internal.DagstoreRecoverShard(p0, p1) +} +func (s *IMarketStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.DealsConsiderOfflineRetrievalDeals(p0, p1) +} +func (s *IMarketStruct) DealsConsiderOfflineStorageDeals(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.DealsConsiderOfflineStorageDeals(p0, p1) +} +func (s *IMarketStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.DealsConsiderOnlineRetrievalDeals(p0, p1) +} +func (s *IMarketStruct) DealsConsiderOnlineStorageDeals(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.DealsConsiderOnlineStorageDeals(p0, p1) +} +func (s *IMarketStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.DealsConsiderUnverifiedStorageDeals(p0, p1) +} +func (s *IMarketStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.DealsConsiderVerifiedStorageDeals(p0, p1) +} +func (s *IMarketStruct) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error { + return s.Internal.DealsImportData(p0, p1, p2) +} +func (s *IMarketStruct) DealsMaxProviderCollateralMultiplier(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.DealsMaxProviderCollateralMultiplier(p0, p1) +} +func (s *IMarketStruct) DealsMaxPublishFee(p0 context.Context, p1 address.Address) (types.FIL, error) { + return s.Internal.DealsMaxPublishFee(p0, p1) +} +func (s *IMarketStruct) DealsMaxStartDelay(p0 context.Context, p1 address.Address) (time.Duration, error) { + return s.Internal.DealsMaxStartDelay(p0, p1) +} +func (s *IMarketStruct) DealsPieceCidBlocklist(p0 context.Context, p1 address.Address) ([]cid.Cid, error) { + return s.Internal.DealsPieceCidBlocklist(p0, p1) +} +func (s *IMarketStruct) DealsPublishMsgPeriod(p0 context.Context, p1 address.Address) (time.Duration, error) { + return s.Internal.DealsPublishMsgPeriod(p0, p1) +} +func (s *IMarketStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 address.Address, p2 bool) error { + return s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 address.Address, p2 bool) error { + return s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 address.Address, p2 bool) error { + return s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 address.Address, p2 bool) error { + return s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 address.Address, p2 bool) error { + return s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 address.Address, p2 bool) error { + return s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetMaxProviderCollateralMultiplier(p0 context.Context, p1 address.Address, p2 uint64) error { + return s.Internal.DealsSetMaxProviderCollateralMultiplier(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetMaxPublishFee(p0 context.Context, p1 address.Address, p2 types.FIL) error { + return s.Internal.DealsSetMaxPublishFee(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetMaxStartDelay(p0 context.Context, p1 address.Address, p2 time.Duration) error { + return s.Internal.DealsSetMaxStartDelay(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 address.Address, p2 []cid.Cid) error { + return s.Internal.DealsSetPieceCidBlocklist(p0, p1, p2) +} +func (s *IMarketStruct) DealsSetPublishMsgPeriod(p0 context.Context, p1 address.Address, p2 time.Duration) error { + return s.Internal.DealsSetPublishMsgPeriod(p0, p1, p2) +} +func (s *IMarketStruct) GetDeals(p0 context.Context, p1 address.Address, p2, p3 int) ([]*market.DealInfo, error) { + return s.Internal.GetDeals(p0, p1, p2, p3) +} +func (s *IMarketStruct) GetRetrievalDealStatistic(p0 context.Context, p1 address.Address) (*market.RetrievalDealStatistic, error) { + return s.Internal.GetRetrievalDealStatistic(p0, p1) +} +func (s *IMarketStruct) GetStorageDealStatistic(p0 context.Context, p1 address.Address) (*market.StorageDealStatistic, error) { + return s.Internal.GetStorageDealStatistic(p0, p1) +} +func (s *IMarketStruct) GetUnPackedDeals(p0 context.Context, p1 address.Address, p2 *market.GetDealSpec) ([]*market.DealInfoIncludePath, error) { + return s.Internal.GetUnPackedDeals(p0, p1, p2) +} +func (s *IMarketStruct) ID(p0 context.Context) (peer.ID, error) { return s.Internal.ID(p0) } +func (s *IMarketStruct) ImportV1Data(p0 context.Context, p1 string) error { + return s.Internal.ImportV1Data(p0, p1) +} +func (s *IMarketStruct) ListPieceStorageInfos(p0 context.Context) market.PieceStorageInfos { + return s.Internal.ListPieceStorageInfos(p0) +} +func (s *IMarketStruct) ListenMarketEvent(p0 context.Context, p1 *gateway.MarketRegisterPolicy) (<-chan *gateway.RequestEvent, error) { + return s.Internal.ListenMarketEvent(p0, p1) +} +func (s *IMarketStruct) MarkDealsAsPacking(p0 context.Context, p1 address.Address, p2 []abi.DealID) error { + return s.Internal.MarkDealsAsPacking(p0, p1, p2) +} +func (s *IMarketStruct) MarketAddBalance(p0 context.Context, p1, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketAddBalance(p0, p1, p2, p3) +} +func (s *IMarketStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3) +} +func (s *IMarketStruct) MarketDataTransferPath(p0 context.Context, p1 address.Address) (string, error) { + return s.Internal.MarketDataTransferPath(p0, p1) +} +func (s *IMarketStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan market.DataTransferChannel, error) { + return s.Internal.MarketDataTransferUpdates(p0) +} +func (s *IMarketStruct) MarketGetAsk(p0 context.Context, p1 address.Address) (*market.SignedStorageAsk, error) { + return s.Internal.MarketGetAsk(p0, p1) +} +func (s *IMarketStruct) MarketGetDealUpdates(p0 context.Context) (<-chan market.MinerDeal, error) { + return s.Internal.MarketGetDealUpdates(p0) +} +func (s *IMarketStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.MarketGetReserved(p0, p1) +} +func (s *IMarketStruct) MarketGetRetrievalAsk(p0 context.Context, p1 address.Address) (*retrievalmarket.Ask, error) { + return s.Internal.MarketGetRetrievalAsk(p0, p1) +} +func (s *IMarketStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { + return s.Internal.MarketImportDealData(p0, p1, p2) +} +func (s *IMarketStruct) MarketImportPublishedDeal(p0 context.Context, p1 market.MinerDeal) error { + return s.Internal.MarketImportPublishedDeal(p0, p1) +} +func (s *IMarketStruct) MarketListAsk(p0 context.Context) ([]*market.SignedStorageAsk, error) { + return s.Internal.MarketListAsk(p0) +} +func (s *IMarketStruct) MarketListDataTransfers(p0 context.Context) ([]market.DataTransferChannel, error) { + return s.Internal.MarketListDataTransfers(p0) +} +func (s *IMarketStruct) MarketListDeals(p0 context.Context, p1 []address.Address) ([]*types.MarketDeal, error) { + return s.Internal.MarketListDeals(p0, p1) +} +func (s *IMarketStruct) MarketListIncompleteDeals(p0 context.Context, p1 address.Address) ([]market.MinerDeal, error) { + return s.Internal.MarketListIncompleteDeals(p0, p1) +} +func (s *IMarketStruct) MarketListRetrievalAsk(p0 context.Context) ([]*market.RetrievalAsk, error) { + return s.Internal.MarketListRetrievalAsk(p0) +} +func (s *IMarketStruct) MarketListRetrievalDeals(p0 context.Context) ([]market.ProviderDealState, error) { + return s.Internal.MarketListRetrievalDeals(p0) +} +func (s *IMarketStruct) MarketMaxBalanceAddFee(p0 context.Context, p1 address.Address) (types.FIL, error) { + return s.Internal.MarketMaxBalanceAddFee(p0, p1) +} +func (s *IMarketStruct) MarketMaxDealsPerPublishMsg(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.MarketMaxDealsPerPublishMsg(p0, p1) +} +func (s *IMarketStruct) MarketPendingDeals(p0 context.Context) ([]market.PendingDealInfo, error) { + return s.Internal.MarketPendingDeals(p0) +} +func (s *IMarketStruct) MarketPublishPendingDeals(p0 context.Context) error { + return s.Internal.MarketPublishPendingDeals(p0) +} +func (s *IMarketStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + return s.Internal.MarketReleaseFunds(p0, p1, p2) +} +func (s *IMarketStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketReserveFunds(p0, p1, p2, p3) +} +func (s *IMarketStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3) +} +func (s *IMarketStruct) MarketSetAsk(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 types.BigInt, p4 abi.ChainEpoch, p5 abi.PaddedPieceSize, p6 abi.PaddedPieceSize) error { + return s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5, p6) +} +func (s *IMarketStruct) MarketSetDataTransferPath(p0 context.Context, p1 address.Address, p2 string) error { + return s.Internal.MarketSetDataTransferPath(p0, p1, p2) +} +func (s *IMarketStruct) MarketSetMaxBalanceAddFee(p0 context.Context, p1 address.Address, p2 types.FIL) error { + return s.Internal.MarketSetMaxBalanceAddFee(p0, p1, p2) +} +func (s *IMarketStruct) MarketSetMaxDealsPerPublishMsg(p0 context.Context, p1 address.Address, p2 uint64) error { + return s.Internal.MarketSetMaxDealsPerPublishMsg(p0, p1, p2) +} +func (s *IMarketStruct) MarketSetRetrievalAsk(p0 context.Context, p1 address.Address, p2 *retrievalmarket.Ask) error { + return s.Internal.MarketSetRetrievalAsk(p0, p1, p2) +} +func (s *IMarketStruct) MarketWithdraw(p0 context.Context, p1, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketWithdraw(p0, p1, p2, p3) +} +func (s *IMarketStruct) MessagerGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.MessagerGetMessage(p0, p1) +} +func (s *IMarketStruct) MessagerPushMessage(p0 context.Context, p1 *types.Message, p2 *types.MessageSendSpec) (cid.Cid, error) { + return s.Internal.MessagerPushMessage(p0, p1, p2) +} +func (s *IMarketStruct) MessagerWaitMessage(p0 context.Context, p1 cid.Cid) (*types.MsgLookup, error) { + return s.Internal.MessagerWaitMessage(p0, p1) +} +func (s *IMarketStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + return s.Internal.NetAddrsListen(p0) +} +func (s *IMarketStruct) OfflineDealImport(p0 context.Context, p1 market.MinerDeal) error { + return s.Internal.OfflineDealImport(p0, p1) +} +func (s *IMarketStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { + return s.Internal.PaychVoucherList(p0, p1) +} +func (s *IMarketStruct) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) { + return s.Internal.PiecesGetCIDInfo(p0, p1) +} +func (s *IMarketStruct) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) { + return s.Internal.PiecesGetPieceInfo(p0, p1) +} +func (s *IMarketStruct) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) { + return s.Internal.PiecesListCidInfos(p0) +} +func (s *IMarketStruct) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) { + return s.Internal.PiecesListPieces(p0) +} +func (s *IMarketStruct) RemovePieceStorage(p0 context.Context, p1 string) error { + return s.Internal.RemovePieceStorage(p0, p1) +} +func (s *IMarketStruct) ResponseMarketEvent(p0 context.Context, p1 *gateway.ResponseEvent) error { + return s.Internal.ResponseMarketEvent(p0, p1) +} +func (s *IMarketStruct) SectorGetExpectedSealDuration(p0 context.Context, p1 address.Address) (time.Duration, error) { + return s.Internal.SectorGetExpectedSealDuration(p0, p1) +} +func (s *IMarketStruct) SectorSetExpectedSealDuration(p0 context.Context, p1 address.Address, p2 time.Duration) error { + return s.Internal.SectorSetExpectedSealDuration(p0, p1, p2) +} +func (s *IMarketStruct) UpdateDealOnPacking(p0 context.Context, p1 address.Address, p2 abi.DealID, p3 abi.SectorNumber, p4 abi.PaddedPieceSize) error { + return s.Internal.UpdateDealOnPacking(p0, p1, p2, p3, p4) +} +func (s *IMarketStruct) UpdateDealStatus(p0 context.Context, p1 address.Address, p2 abi.DealID, p3 market.PieceStatus) error { + return s.Internal.UpdateDealStatus(p0, p1, p2, p3) +} +func (s *IMarketStruct) UpdateStorageDealStatus(p0 context.Context, p1 cid.Cid, p2 storagemarket.StorageDealStatus, p3 market.PieceStatus) error { + return s.Internal.UpdateStorageDealStatus(p0, p1, p2, p3) +} +func (s *IMarketStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} diff --git a/venus-shared/api/messager/api.go b/venus-shared/api/messager/api.go new file mode 100644 index 0000000000..42a38e82b1 --- /dev/null +++ b/venus-shared/api/messager/api.go @@ -0,0 +1,70 @@ +package messager + +import ( + "context" + "time" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/api" + "github.com/filecoin-project/venus/venus-shared/types" + mtypes "github.com/filecoin-project/venus/venus-shared/types/messager" +) + +type IMessager interface { + HasMessageByUid(ctx context.Context, id string) (bool, error) //perm:read + WaitMessage(ctx context.Context, id string, confidence uint64) (*mtypes.Message, error) //perm:read + PushMessage(ctx context.Context, msg *types.Message, meta *mtypes.SendSpec) (string, error) //perm:write + PushMessageWithId(ctx context.Context, id string, msg *types.Message, meta *mtypes.SendSpec) (string, error) //perm:write + GetMessageByUid(ctx context.Context, id string) (*mtypes.Message, error) //perm:read + GetMessageBySignedCid(ctx context.Context, cid cid.Cid) (*mtypes.Message, error) //perm:read + GetMessageByUnsignedCid(ctx context.Context, cid cid.Cid) (*mtypes.Message, error) //perm:read + GetMessageByFromAndNonce(ctx context.Context, from address.Address, nonce uint64) (*mtypes.Message, error) //perm:read + ListMessage(ctx context.Context) ([]*mtypes.Message, error) //perm:admin + ListMessageByFromState(ctx context.Context, from address.Address, state mtypes.MessageState, isAsc bool, pageIndex, pageSize int) ([]*mtypes.Message, error) //perm:admin + ListMessageByAddress(ctx context.Context, addr address.Address) ([]*mtypes.Message, error) //perm:admin + ListFailedMessage(ctx context.Context) ([]*mtypes.Message, error) //perm:admin + ListBlockedMessage(ctx context.Context, addr address.Address, d time.Duration) ([]*mtypes.Message, error) //perm:admin + UpdateMessageStateByID(ctx context.Context, id string, state mtypes.MessageState) error //perm:admin + UpdateAllFilledMessage(ctx context.Context) (int, error) //perm:admin + UpdateFilledMessageByID(ctx context.Context, id string) (string, error) //perm:admin + ReplaceMessage(ctx context.Context, params *mtypes.ReplacMessageParams) (cid.Cid, error) //perm:admin + RepublishMessage(ctx context.Context, id string) error //perm:admin + MarkBadMessage(ctx context.Context, id string) error //perm:admin + RecoverFailedMsg(ctx context.Context, addr address.Address) ([]string, error) //perm:admin + + GetAddress(ctx context.Context, addr address.Address) (*mtypes.Address, error) //perm:admin + HasAddress(ctx context.Context, addr address.Address) (bool, error) //perm:read + WalletHas(ctx context.Context, addr address.Address) (bool, error) //perm:read + ListAddress(ctx context.Context) ([]*mtypes.Address, error) //perm:admin + UpdateNonce(ctx context.Context, addr address.Address, nonce uint64) error //perm:admin + DeleteAddress(ctx context.Context, addr address.Address) error //perm:admin + ForbiddenAddress(ctx context.Context, addr address.Address) error //perm:admin + ActiveAddress(ctx context.Context, addr address.Address) error //perm:admin + SetSelectMsgNum(ctx context.Context, addr address.Address, num uint64) error //perm:admin + SetFeeParams(ctx context.Context, params *mtypes.AddressSpec) error //perm:admin + ClearUnFillMessage(ctx context.Context, addr address.Address) (int, error) //perm:admin + + GetSharedParams(ctx context.Context) (*mtypes.SharedSpec, error) //perm:admin + SetSharedParams(ctx context.Context, params *mtypes.SharedSpec) error //perm:admin + + SaveNode(ctx context.Context, node *mtypes.Node) error //perm:admin + GetNode(ctx context.Context, name string) (*mtypes.Node, error) //perm:admin + HasNode(ctx context.Context, name string) (bool, error) //perm:admin + ListNode(ctx context.Context) ([]*mtypes.Node, error) //perm:admin + DeleteNode(ctx context.Context, name string) error //perm:admin + + SetLogLevel(ctx context.Context, subsystem, level string) error //perm:admin + LogList(context.Context) ([]string, error) //perm:write + + Send(ctx context.Context, params mtypes.QuickSendParams) (string, error) //perm:admin + + NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) //perm:read + NetPeers(ctx context.Context) ([]peer.AddrInfo, error) //perm:read + NetConnect(ctx context.Context, pi peer.AddrInfo) error //perm:admin + NetAddrsListen(ctx context.Context) (peer.AddrInfo, error) //perm:read + + api.Version +} diff --git a/venus-shared/api/messager/client_gen.go b/venus-shared/api/messager/client_gen.go new file mode 100644 index 0000000000..f569dcd201 --- /dev/null +++ b/venus-shared/api/messager/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package messager + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 0 +const APINamespace = "messager.IMessager" +const MethodNamespace = "Message" + +// NewIMessagerRPC creates a new httpparse jsonrpc remotecli. +func NewIMessagerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (IMessager, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res IMessagerStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialIMessagerRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialIMessagerRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (IMessager, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res IMessagerStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/messager/method.md b/venus-shared/api/messager/method.md new file mode 100644 index 0000000000..5f288a46b4 --- /dev/null +++ b/venus-shared/api/messager/method.md @@ -0,0 +1,1384 @@ +# Groups + +* [Messager](#messager) + * [ActiveAddress](#activeaddress) + * [ClearUnFillMessage](#clearunfillmessage) + * [DeleteAddress](#deleteaddress) + * [DeleteNode](#deletenode) + * [ForbiddenAddress](#forbiddenaddress) + * [GetAddress](#getaddress) + * [GetMessageByFromAndNonce](#getmessagebyfromandnonce) + * [GetMessageBySignedCid](#getmessagebysignedcid) + * [GetMessageByUid](#getmessagebyuid) + * [GetMessageByUnsignedCid](#getmessagebyunsignedcid) + * [GetNode](#getnode) + * [GetSharedParams](#getsharedparams) + * [HasAddress](#hasaddress) + * [HasMessageByUid](#hasmessagebyuid) + * [HasNode](#hasnode) + * [ListAddress](#listaddress) + * [ListBlockedMessage](#listblockedmessage) + * [ListFailedMessage](#listfailedmessage) + * [ListMessage](#listmessage) + * [ListMessageByAddress](#listmessagebyaddress) + * [ListMessageByFromState](#listmessagebyfromstate) + * [ListNode](#listnode) + * [LogList](#loglist) + * [MarkBadMessage](#markbadmessage) + * [NetAddrsListen](#netaddrslisten) + * [NetConnect](#netconnect) + * [NetFindPeer](#netfindpeer) + * [NetPeers](#netpeers) + * [PushMessage](#pushmessage) + * [PushMessageWithId](#pushmessagewithid) + * [RecoverFailedMsg](#recoverfailedmsg) + * [ReplaceMessage](#replacemessage) + * [RepublishMessage](#republishmessage) + * [SaveNode](#savenode) + * [Send](#send) + * [SetFeeParams](#setfeeparams) + * [SetLogLevel](#setloglevel) + * [SetSelectMsgNum](#setselectmsgnum) + * [SetSharedParams](#setsharedparams) + * [UpdateAllFilledMessage](#updateallfilledmessage) + * [UpdateFilledMessageByID](#updatefilledmessagebyid) + * [UpdateMessageStateByID](#updatemessagestatebyid) + * [UpdateNonce](#updatenonce) + * [Version](#version) + * [WaitMessage](#waitmessage) + * [WalletHas](#wallethas) + +## Messager + +### ActiveAddress + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### ClearUnFillMessage + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `123` + +### DeleteAddress + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### DeleteNode + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### ForbiddenAddress + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### GetAddress + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "addr": "f01234", + "nonce": 42, + "weight": 9, + "selMsgNum": 42, + "state": 1, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasFeeCap": "0", + "gasOverPremium": 12.3, + "baseFee": "0", + "isDeleted": 123, + "createAt": "0001-01-01T00:00:00Z", + "updateAt": "0001-01-01T00:00:00Z" +} +``` + +### GetMessageByFromAndNonce + + +Perms: read + +Inputs: +```json +[ + "f01234", + 42 +] +``` + +Response: +```json +{ + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" +} +``` + +### GetMessageBySignedCid + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" +} +``` + +### GetMessageByUid + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" +} +``` + +### GetMessageByUnsignedCid + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" +} +``` + +### GetNode + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Name": "venus", + "URL": "/ip4/127.0.0.1/tcp/3453", + "Token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0._eHBJJAiBzQmfcbD_vVmtTrkgyJQ-LOgGOiHfb8rU1I", + "Type": 2 +} +``` + +### GetSharedParams + + +Perms: admin + +Inputs: `[]` + +Response: +```json +{ + "id": 42, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasFeeCap": "0", + "gasOverPremium": 12.3, + "baseFee": "0", + "selMsgNum": 42 +} +``` + +### HasAddress + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### HasMessageByUid + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: `true` + +### HasNode + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `true` + +### ListAddress + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "id": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "addr": "f01234", + "nonce": 42, + "weight": 9, + "selMsgNum": 42, + "state": 1, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasFeeCap": "0", + "gasOverPremium": 12.3, + "baseFee": "0", + "isDeleted": 123, + "createAt": "0001-01-01T00:00:00Z", + "updateAt": "0001-01-01T00:00:00Z" + } +] +``` + +### ListBlockedMessage + + +Perms: admin + +Inputs: +```json +[ + "f01234", + 60000000000 +] +``` + +Response: +```json +[ + { + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" + } +] +``` + +### ListFailedMessage + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" + } +] +``` + +### ListMessage + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" + } +] +``` + +### ListMessageByAddress + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + { + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" + } +] +``` + +### ListMessageByFromState + + +Perms: admin + +Inputs: +```json +[ + "f01234", + 3, + true, + 123, + 123 +] +``` + +Response: +```json +[ + { + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" + } +] +``` + +### ListNode + + +Perms: admin + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Name": "venus", + "URL": "/ip4/127.0.0.1/tcp/3453", + "Token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0._eHBJJAiBzQmfcbD_vVmtTrkgyJQ-LOgGOiHfb8rU1I", + "Type": 2 + } +] +``` + +### LogList + + +Perms: write + +Inputs: `[]` + +Response: +```json +[ + "string value" +] +``` + +### MarkBadMessage + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### NetAddrsListen + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetConnect + + +Perms: admin + +Inputs: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +Response: `{}` + +### NetFindPeer + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetPeers + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +### PushMessage + + +Perms: write + +Inputs: +```json +[ + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + } +] +``` + +Response: `"string value"` + +### PushMessageWithId + + +Perms: write + +Inputs: +```json +[ + "string value", + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + } +] +``` + +Response: `"string value"` + +### RecoverFailedMsg + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + "string value" +] +``` + +### ReplaceMessage + + +Perms: admin + +Inputs: +```json +[ + { + "ID": "string value", + "Auto": true, + "MaxFee": "0", + "GasLimit": 9, + "GasPremium": "0", + "GasFeecap": "0", + "GasOverPremium": 12.3 + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### RepublishMessage + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### SaveNode + + +Perms: admin + +Inputs: +```json +[ + { + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "Name": "venus", + "URL": "/ip4/127.0.0.1/tcp/3453", + "Token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0._eHBJJAiBzQmfcbD_vVmtTrkgyJQ-LOgGOiHfb8rU1I", + "Type": 2 + } +] +``` + +Response: `{}` + +### Send + + +Perms: admin + +Inputs: +```json +[ + { + "To": "f01234", + "From": "f01234", + "Val": "0", + "Account": "string value", + "GasPremium": "0", + "GasFeeCap": "0", + "GasLimit": 10000, + "Method": 1, + "Params": "string value", + "ParamsType": "json" + } +] +``` + +Response: `"string value"` + +### SetFeeParams + + +Perms: admin + +Inputs: +```json +[ + { + "address": "f01234", + "gasOverEstimation": 12.3, + "gasOverPremium": 12.3, + "maxFeeStr": "string value", + "gasFeeCapStr": "string value", + "baseFeeStr": "string value" + } +] +``` + +Response: `{}` + +### SetLogLevel + + +Perms: admin + +Inputs: +```json +[ + "string value", + "string value" +] +``` + +Response: `{}` + +### SetSelectMsgNum + + +Perms: admin + +Inputs: +```json +[ + "f01234", + 42 +] +``` + +Response: `{}` + +### SetSharedParams + + +Perms: admin + +Inputs: +```json +[ + { + "id": 42, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasFeeCap": "0", + "gasOverPremium": 12.3, + "baseFee": "0", + "selMsgNum": 42 + } +] +``` + +Response: `{}` + +### UpdateAllFilledMessage + + +Perms: admin + +Inputs: `[]` + +Response: `123` + +### UpdateFilledMessageByID + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `"string value"` + +### UpdateMessageStateByID + + +Perms: admin + +Inputs: +```json +[ + "string value", + 3 +] +``` + +Response: `{}` + +### UpdateNonce + + +Perms: admin + +Inputs: +```json +[ + "f01234", + 42 +] +``` + +Response: `{}` + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + +### WaitMessage + + +Perms: read + +Inputs: +```json +[ + "string value", + 42 +] +``` + +Response: +```json +{ + "ID": "e26f1e5c-47f7-4561-a11d-18fab6e748af", + "UnsignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SignedCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Height": 100, + "Confidence": 10, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Meta": { + "expireEpoch": 10101, + "gasOverEstimation": 12.3, + "maxFee": "0", + "gasOverPremium": 12.3 + }, + "WalletName": "test", + "State": 1, + "ErrorMsg": "", + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z" +} +``` + +### WalletHas + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + diff --git a/venus-shared/api/messager/mock/mock_imessager.go b/venus-shared/api/messager/mock/mock_imessager.go new file mode 100644 index 0000000000..6051c8780c --- /dev/null +++ b/venus-shared/api/messager/mock/mock_imessager.go @@ -0,0 +1,718 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/messager (interfaces: IMessager) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + time "time" + + address "github.com/filecoin-project/go-address" + internal "github.com/filecoin-project/venus/venus-shared/internal" + types "github.com/filecoin-project/venus/venus-shared/types" + messager "github.com/filecoin-project/venus/venus-shared/types/messager" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// MockIMessager is a mock of IMessager interface. +type MockIMessager struct { + ctrl *gomock.Controller + recorder *MockIMessagerMockRecorder +} + +// MockIMessagerMockRecorder is the mock recorder for MockIMessager. +type MockIMessagerMockRecorder struct { + mock *MockIMessager +} + +// NewMockIMessager creates a new mock instance. +func NewMockIMessager(ctrl *gomock.Controller) *MockIMessager { + mock := &MockIMessager{ctrl: ctrl} + mock.recorder = &MockIMessagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIMessager) EXPECT() *MockIMessagerMockRecorder { + return m.recorder +} + +// ActiveAddress mocks base method. +func (m *MockIMessager) ActiveAddress(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ActiveAddress", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ActiveAddress indicates an expected call of ActiveAddress. +func (mr *MockIMessagerMockRecorder) ActiveAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveAddress", reflect.TypeOf((*MockIMessager)(nil).ActiveAddress), arg0, arg1) +} + +// ClearUnFillMessage mocks base method. +func (m *MockIMessager) ClearUnFillMessage(arg0 context.Context, arg1 address.Address) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClearUnFillMessage", arg0, arg1) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClearUnFillMessage indicates an expected call of ClearUnFillMessage. +func (mr *MockIMessagerMockRecorder) ClearUnFillMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearUnFillMessage", reflect.TypeOf((*MockIMessager)(nil).ClearUnFillMessage), arg0, arg1) +} + +// DeleteAddress mocks base method. +func (m *MockIMessager) DeleteAddress(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAddress", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAddress indicates an expected call of DeleteAddress. +func (mr *MockIMessagerMockRecorder) DeleteAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAddress", reflect.TypeOf((*MockIMessager)(nil).DeleteAddress), arg0, arg1) +} + +// DeleteNode mocks base method. +func (m *MockIMessager) DeleteNode(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNode", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNode indicates an expected call of DeleteNode. +func (mr *MockIMessagerMockRecorder) DeleteNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNode", reflect.TypeOf((*MockIMessager)(nil).DeleteNode), arg0, arg1) +} + +// ForbiddenAddress mocks base method. +func (m *MockIMessager) ForbiddenAddress(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ForbiddenAddress", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ForbiddenAddress indicates an expected call of ForbiddenAddress. +func (mr *MockIMessagerMockRecorder) ForbiddenAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForbiddenAddress", reflect.TypeOf((*MockIMessager)(nil).ForbiddenAddress), arg0, arg1) +} + +// GetAddress mocks base method. +func (m *MockIMessager) GetAddress(arg0 context.Context, arg1 address.Address) (*messager.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAddress", arg0, arg1) + ret0, _ := ret[0].(*messager.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAddress indicates an expected call of GetAddress. +func (mr *MockIMessagerMockRecorder) GetAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddress", reflect.TypeOf((*MockIMessager)(nil).GetAddress), arg0, arg1) +} + +// GetMessageByFromAndNonce mocks base method. +func (m *MockIMessager) GetMessageByFromAndNonce(arg0 context.Context, arg1 address.Address, arg2 uint64) (*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMessageByFromAndNonce", arg0, arg1, arg2) + ret0, _ := ret[0].(*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMessageByFromAndNonce indicates an expected call of GetMessageByFromAndNonce. +func (mr *MockIMessagerMockRecorder) GetMessageByFromAndNonce(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessageByFromAndNonce", reflect.TypeOf((*MockIMessager)(nil).GetMessageByFromAndNonce), arg0, arg1, arg2) +} + +// GetMessageBySignedCid mocks base method. +func (m *MockIMessager) GetMessageBySignedCid(arg0 context.Context, arg1 cid.Cid) (*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMessageBySignedCid", arg0, arg1) + ret0, _ := ret[0].(*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMessageBySignedCid indicates an expected call of GetMessageBySignedCid. +func (mr *MockIMessagerMockRecorder) GetMessageBySignedCid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessageBySignedCid", reflect.TypeOf((*MockIMessager)(nil).GetMessageBySignedCid), arg0, arg1) +} + +// GetMessageByUid mocks base method. +func (m *MockIMessager) GetMessageByUid(arg0 context.Context, arg1 string) (*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMessageByUid", arg0, arg1) + ret0, _ := ret[0].(*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMessageByUid indicates an expected call of GetMessageByUid. +func (mr *MockIMessagerMockRecorder) GetMessageByUid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessageByUid", reflect.TypeOf((*MockIMessager)(nil).GetMessageByUid), arg0, arg1) +} + +// GetMessageByUnsignedCid mocks base method. +func (m *MockIMessager) GetMessageByUnsignedCid(arg0 context.Context, arg1 cid.Cid) (*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMessageByUnsignedCid", arg0, arg1) + ret0, _ := ret[0].(*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMessageByUnsignedCid indicates an expected call of GetMessageByUnsignedCid. +func (mr *MockIMessagerMockRecorder) GetMessageByUnsignedCid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessageByUnsignedCid", reflect.TypeOf((*MockIMessager)(nil).GetMessageByUnsignedCid), arg0, arg1) +} + +// GetNode mocks base method. +func (m *MockIMessager) GetNode(arg0 context.Context, arg1 string) (*messager.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNode", arg0, arg1) + ret0, _ := ret[0].(*messager.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNode indicates an expected call of GetNode. +func (mr *MockIMessagerMockRecorder) GetNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNode", reflect.TypeOf((*MockIMessager)(nil).GetNode), arg0, arg1) +} + +// GetSharedParams mocks base method. +func (m *MockIMessager) GetSharedParams(arg0 context.Context) (*messager.SharedSpec, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSharedParams", arg0) + ret0, _ := ret[0].(*messager.SharedSpec) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSharedParams indicates an expected call of GetSharedParams. +func (mr *MockIMessagerMockRecorder) GetSharedParams(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSharedParams", reflect.TypeOf((*MockIMessager)(nil).GetSharedParams), arg0) +} + +// HasAddress mocks base method. +func (m *MockIMessager) HasAddress(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasAddress", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasAddress indicates an expected call of HasAddress. +func (mr *MockIMessagerMockRecorder) HasAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasAddress", reflect.TypeOf((*MockIMessager)(nil).HasAddress), arg0, arg1) +} + +// HasMessageByUid mocks base method. +func (m *MockIMessager) HasMessageByUid(arg0 context.Context, arg1 string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasMessageByUid", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasMessageByUid indicates an expected call of HasMessageByUid. +func (mr *MockIMessagerMockRecorder) HasMessageByUid(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasMessageByUid", reflect.TypeOf((*MockIMessager)(nil).HasMessageByUid), arg0, arg1) +} + +// HasNode mocks base method. +func (m *MockIMessager) HasNode(arg0 context.Context, arg1 string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasNode", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasNode indicates an expected call of HasNode. +func (mr *MockIMessagerMockRecorder) HasNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasNode", reflect.TypeOf((*MockIMessager)(nil).HasNode), arg0, arg1) +} + +// ListAddress mocks base method. +func (m *MockIMessager) ListAddress(arg0 context.Context) ([]*messager.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAddress", arg0) + ret0, _ := ret[0].([]*messager.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAddress indicates an expected call of ListAddress. +func (mr *MockIMessagerMockRecorder) ListAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddress", reflect.TypeOf((*MockIMessager)(nil).ListAddress), arg0) +} + +// ListBlockedMessage mocks base method. +func (m *MockIMessager) ListBlockedMessage(arg0 context.Context, arg1 address.Address, arg2 time.Duration) ([]*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBlockedMessage", arg0, arg1, arg2) + ret0, _ := ret[0].([]*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBlockedMessage indicates an expected call of ListBlockedMessage. +func (mr *MockIMessagerMockRecorder) ListBlockedMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBlockedMessage", reflect.TypeOf((*MockIMessager)(nil).ListBlockedMessage), arg0, arg1, arg2) +} + +// ListFailedMessage mocks base method. +func (m *MockIMessager) ListFailedMessage(arg0 context.Context) ([]*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFailedMessage", arg0) + ret0, _ := ret[0].([]*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListFailedMessage indicates an expected call of ListFailedMessage. +func (mr *MockIMessagerMockRecorder) ListFailedMessage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFailedMessage", reflect.TypeOf((*MockIMessager)(nil).ListFailedMessage), arg0) +} + +// ListMessage mocks base method. +func (m *MockIMessager) ListMessage(arg0 context.Context) ([]*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMessage", arg0) + ret0, _ := ret[0].([]*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMessage indicates an expected call of ListMessage. +func (mr *MockIMessagerMockRecorder) ListMessage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMessage", reflect.TypeOf((*MockIMessager)(nil).ListMessage), arg0) +} + +// ListMessageByAddress mocks base method. +func (m *MockIMessager) ListMessageByAddress(arg0 context.Context, arg1 address.Address) ([]*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMessageByAddress", arg0, arg1) + ret0, _ := ret[0].([]*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMessageByAddress indicates an expected call of ListMessageByAddress. +func (mr *MockIMessagerMockRecorder) ListMessageByAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMessageByAddress", reflect.TypeOf((*MockIMessager)(nil).ListMessageByAddress), arg0, arg1) +} + +// ListMessageByFromState mocks base method. +func (m *MockIMessager) ListMessageByFromState(arg0 context.Context, arg1 address.Address, arg2 messager.MessageState, arg3 bool, arg4, arg5 int) ([]*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMessageByFromState", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].([]*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMessageByFromState indicates an expected call of ListMessageByFromState. +func (mr *MockIMessagerMockRecorder) ListMessageByFromState(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMessageByFromState", reflect.TypeOf((*MockIMessager)(nil).ListMessageByFromState), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// ListNode mocks base method. +func (m *MockIMessager) ListNode(arg0 context.Context) ([]*messager.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNode", arg0) + ret0, _ := ret[0].([]*messager.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNode indicates an expected call of ListNode. +func (mr *MockIMessagerMockRecorder) ListNode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNode", reflect.TypeOf((*MockIMessager)(nil).ListNode), arg0) +} + +// LogList mocks base method. +func (m *MockIMessager) LogList(arg0 context.Context) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogList", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LogList indicates an expected call of LogList. +func (mr *MockIMessagerMockRecorder) LogList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockIMessager)(nil).LogList), arg0) +} + +// MarkBadMessage mocks base method. +func (m *MockIMessager) MarkBadMessage(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkBadMessage", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkBadMessage indicates an expected call of MarkBadMessage. +func (mr *MockIMessagerMockRecorder) MarkBadMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkBadMessage", reflect.TypeOf((*MockIMessager)(nil).MarkBadMessage), arg0, arg1) +} + +// NetAddrsListen mocks base method. +func (m *MockIMessager) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAddrsListen", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAddrsListen indicates an expected call of NetAddrsListen. +func (mr *MockIMessagerMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockIMessager)(nil).NetAddrsListen), arg0) +} + +// NetConnect mocks base method. +func (m *MockIMessager) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetConnect indicates an expected call of NetConnect. +func (mr *MockIMessagerMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockIMessager)(nil).NetConnect), arg0, arg1) +} + +// NetFindPeer mocks base method. +func (m *MockIMessager) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetFindPeer indicates an expected call of NetFindPeer. +func (mr *MockIMessagerMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockIMessager)(nil).NetFindPeer), arg0, arg1) +} + +// NetPeers mocks base method. +func (m *MockIMessager) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeers", arg0) + ret0, _ := ret[0].([]peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeers indicates an expected call of NetPeers. +func (mr *MockIMessagerMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockIMessager)(nil).NetPeers), arg0) +} + +// PushMessage mocks base method. +func (m *MockIMessager) PushMessage(arg0 context.Context, arg1 *internal.Message, arg2 *messager.SendSpec) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PushMessage indicates an expected call of PushMessage. +func (mr *MockIMessagerMockRecorder) PushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushMessage", reflect.TypeOf((*MockIMessager)(nil).PushMessage), arg0, arg1, arg2) +} + +// PushMessageWithId mocks base method. +func (m *MockIMessager) PushMessageWithId(arg0 context.Context, arg1 string, arg2 *internal.Message, arg3 *messager.SendSpec) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PushMessageWithId", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PushMessageWithId indicates an expected call of PushMessageWithId. +func (mr *MockIMessagerMockRecorder) PushMessageWithId(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushMessageWithId", reflect.TypeOf((*MockIMessager)(nil).PushMessageWithId), arg0, arg1, arg2, arg3) +} + +// RecoverFailedMsg mocks base method. +func (m *MockIMessager) RecoverFailedMsg(arg0 context.Context, arg1 address.Address) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecoverFailedMsg", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RecoverFailedMsg indicates an expected call of RecoverFailedMsg. +func (mr *MockIMessagerMockRecorder) RecoverFailedMsg(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverFailedMsg", reflect.TypeOf((*MockIMessager)(nil).RecoverFailedMsg), arg0, arg1) +} + +// ReplaceMessage mocks base method. +func (m *MockIMessager) ReplaceMessage(arg0 context.Context, arg1 *messager.ReplacMessageParams) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplaceMessage", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReplaceMessage indicates an expected call of ReplaceMessage. +func (mr *MockIMessagerMockRecorder) ReplaceMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplaceMessage", reflect.TypeOf((*MockIMessager)(nil).ReplaceMessage), arg0, arg1) +} + +// RepublishMessage mocks base method. +func (m *MockIMessager) RepublishMessage(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RepublishMessage", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RepublishMessage indicates an expected call of RepublishMessage. +func (mr *MockIMessagerMockRecorder) RepublishMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RepublishMessage", reflect.TypeOf((*MockIMessager)(nil).RepublishMessage), arg0, arg1) +} + +// SaveNode mocks base method. +func (m *MockIMessager) SaveNode(arg0 context.Context, arg1 *messager.Node) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveNode", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveNode indicates an expected call of SaveNode. +func (mr *MockIMessagerMockRecorder) SaveNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNode", reflect.TypeOf((*MockIMessager)(nil).SaveNode), arg0, arg1) +} + +// Send mocks base method. +func (m *MockIMessager) Send(arg0 context.Context, arg1 messager.QuickSendParams) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Send indicates an expected call of Send. +func (mr *MockIMessagerMockRecorder) Send(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockIMessager)(nil).Send), arg0, arg1) +} + +// SetFeeParams mocks base method. +func (m *MockIMessager) SetFeeParams(arg0 context.Context, arg1 *messager.AddressSpec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetFeeParams", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetFeeParams indicates an expected call of SetFeeParams. +func (mr *MockIMessagerMockRecorder) SetFeeParams(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeParams", reflect.TypeOf((*MockIMessager)(nil).SetFeeParams), arg0, arg1) +} + +// SetLogLevel mocks base method. +func (m *MockIMessager) SetLogLevel(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetLogLevel", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetLogLevel indicates an expected call of SetLogLevel. +func (mr *MockIMessagerMockRecorder) SetLogLevel(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLogLevel", reflect.TypeOf((*MockIMessager)(nil).SetLogLevel), arg0, arg1, arg2) +} + +// SetSelectMsgNum mocks base method. +func (m *MockIMessager) SetSelectMsgNum(arg0 context.Context, arg1 address.Address, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetSelectMsgNum", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetSelectMsgNum indicates an expected call of SetSelectMsgNum. +func (mr *MockIMessagerMockRecorder) SetSelectMsgNum(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSelectMsgNum", reflect.TypeOf((*MockIMessager)(nil).SetSelectMsgNum), arg0, arg1, arg2) +} + +// SetSharedParams mocks base method. +func (m *MockIMessager) SetSharedParams(arg0 context.Context, arg1 *messager.SharedSpec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetSharedParams", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetSharedParams indicates an expected call of SetSharedParams. +func (mr *MockIMessagerMockRecorder) SetSharedParams(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSharedParams", reflect.TypeOf((*MockIMessager)(nil).SetSharedParams), arg0, arg1) +} + +// UpdateAllFilledMessage mocks base method. +func (m *MockIMessager) UpdateAllFilledMessage(arg0 context.Context) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAllFilledMessage", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAllFilledMessage indicates an expected call of UpdateAllFilledMessage. +func (mr *MockIMessagerMockRecorder) UpdateAllFilledMessage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAllFilledMessage", reflect.TypeOf((*MockIMessager)(nil).UpdateAllFilledMessage), arg0) +} + +// UpdateFilledMessageByID mocks base method. +func (m *MockIMessager) UpdateFilledMessageByID(arg0 context.Context, arg1 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateFilledMessageByID", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateFilledMessageByID indicates an expected call of UpdateFilledMessageByID. +func (mr *MockIMessagerMockRecorder) UpdateFilledMessageByID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFilledMessageByID", reflect.TypeOf((*MockIMessager)(nil).UpdateFilledMessageByID), arg0, arg1) +} + +// UpdateMessageStateByID mocks base method. +func (m *MockIMessager) UpdateMessageStateByID(arg0 context.Context, arg1 string, arg2 messager.MessageState) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateMessageStateByID", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateMessageStateByID indicates an expected call of UpdateMessageStateByID. +func (mr *MockIMessagerMockRecorder) UpdateMessageStateByID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMessageStateByID", reflect.TypeOf((*MockIMessager)(nil).UpdateMessageStateByID), arg0, arg1, arg2) +} + +// UpdateNonce mocks base method. +func (m *MockIMessager) UpdateNonce(arg0 context.Context, arg1 address.Address, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNonce", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateNonce indicates an expected call of UpdateNonce. +func (mr *MockIMessagerMockRecorder) UpdateNonce(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNonce", reflect.TypeOf((*MockIMessager)(nil).UpdateNonce), arg0, arg1, arg2) +} + +// Version mocks base method. +func (m *MockIMessager) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockIMessagerMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockIMessager)(nil).Version), arg0) +} + +// WaitMessage mocks base method. +func (m *MockIMessager) WaitMessage(arg0 context.Context, arg1 string, arg2 uint64) (*messager.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*messager.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WaitMessage indicates an expected call of WaitMessage. +func (mr *MockIMessagerMockRecorder) WaitMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitMessage", reflect.TypeOf((*MockIMessager)(nil).WaitMessage), arg0, arg1, arg2) +} + +// WalletHas mocks base method. +func (m *MockIMessager) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockIMessagerMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockIMessager)(nil).WalletHas), arg0, arg1) +} diff --git a/venus-shared/api/messager/proxy_gen.go b/venus-shared/api/messager/proxy_gen.go new file mode 100644 index 0000000000..113f62c3bd --- /dev/null +++ b/venus-shared/api/messager/proxy_gen.go @@ -0,0 +1,204 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package messager + +import ( + "context" + "time" + + address "github.com/filecoin-project/go-address" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/types" + mtypes "github.com/filecoin-project/venus/venus-shared/types/messager" +) + +type IMessagerStruct struct { + Internal struct { + ActiveAddress func(ctx context.Context, addr address.Address) error `perm:"admin"` + ClearUnFillMessage func(ctx context.Context, addr address.Address) (int, error) `perm:"admin"` + DeleteAddress func(ctx context.Context, addr address.Address) error `perm:"admin"` + DeleteNode func(ctx context.Context, name string) error `perm:"admin"` + ForbiddenAddress func(ctx context.Context, addr address.Address) error `perm:"admin"` + GetAddress func(ctx context.Context, addr address.Address) (*mtypes.Address, error) `perm:"admin"` + GetMessageByFromAndNonce func(ctx context.Context, from address.Address, nonce uint64) (*mtypes.Message, error) `perm:"read"` + GetMessageBySignedCid func(ctx context.Context, cid cid.Cid) (*mtypes.Message, error) `perm:"read"` + GetMessageByUid func(ctx context.Context, id string) (*mtypes.Message, error) `perm:"read"` + GetMessageByUnsignedCid func(ctx context.Context, cid cid.Cid) (*mtypes.Message, error) `perm:"read"` + GetNode func(ctx context.Context, name string) (*mtypes.Node, error) `perm:"admin"` + GetSharedParams func(ctx context.Context) (*mtypes.SharedSpec, error) `perm:"admin"` + HasAddress func(ctx context.Context, addr address.Address) (bool, error) `perm:"read"` + HasMessageByUid func(ctx context.Context, id string) (bool, error) `perm:"read"` + HasNode func(ctx context.Context, name string) (bool, error) `perm:"admin"` + ListAddress func(ctx context.Context) ([]*mtypes.Address, error) `perm:"admin"` + ListBlockedMessage func(ctx context.Context, addr address.Address, d time.Duration) ([]*mtypes.Message, error) `perm:"admin"` + ListFailedMessage func(ctx context.Context) ([]*mtypes.Message, error) `perm:"admin"` + ListMessage func(ctx context.Context) ([]*mtypes.Message, error) `perm:"admin"` + ListMessageByAddress func(ctx context.Context, addr address.Address) ([]*mtypes.Message, error) `perm:"admin"` + ListMessageByFromState func(ctx context.Context, from address.Address, state mtypes.MessageState, isAsc bool, pageIndex, pageSize int) ([]*mtypes.Message, error) `perm:"admin"` + ListNode func(ctx context.Context) ([]*mtypes.Node, error) `perm:"admin"` + LogList func(context.Context) ([]string, error) `perm:"write"` + MarkBadMessage func(ctx context.Context, id string) error `perm:"admin"` + NetAddrsListen func(ctx context.Context) (peer.AddrInfo, error) `perm:"read"` + NetConnect func(ctx context.Context, pi peer.AddrInfo) error `perm:"admin"` + NetFindPeer func(ctx context.Context, p peer.ID) (peer.AddrInfo, error) `perm:"read"` + NetPeers func(ctx context.Context) ([]peer.AddrInfo, error) `perm:"read"` + PushMessage func(ctx context.Context, msg *types.Message, meta *mtypes.SendSpec) (string, error) `perm:"write"` + PushMessageWithId func(ctx context.Context, id string, msg *types.Message, meta *mtypes.SendSpec) (string, error) `perm:"write"` + RecoverFailedMsg func(ctx context.Context, addr address.Address) ([]string, error) `perm:"admin"` + ReplaceMessage func(ctx context.Context, params *mtypes.ReplacMessageParams) (cid.Cid, error) `perm:"admin"` + RepublishMessage func(ctx context.Context, id string) error `perm:"admin"` + SaveNode func(ctx context.Context, node *mtypes.Node) error `perm:"admin"` + Send func(ctx context.Context, params mtypes.QuickSendParams) (string, error) `perm:"admin"` + SetFeeParams func(ctx context.Context, params *mtypes.AddressSpec) error `perm:"admin"` + SetLogLevel func(ctx context.Context, subsystem, level string) error `perm:"admin"` + SetSelectMsgNum func(ctx context.Context, addr address.Address, num uint64) error `perm:"admin"` + SetSharedParams func(ctx context.Context, params *mtypes.SharedSpec) error `perm:"admin"` + UpdateAllFilledMessage func(ctx context.Context) (int, error) `perm:"admin"` + UpdateFilledMessageByID func(ctx context.Context, id string) (string, error) `perm:"admin"` + UpdateMessageStateByID func(ctx context.Context, id string, state mtypes.MessageState) error `perm:"admin"` + UpdateNonce func(ctx context.Context, addr address.Address, nonce uint64) error `perm:"admin"` + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + WaitMessage func(ctx context.Context, id string, confidence uint64) (*mtypes.Message, error) `perm:"read"` + WalletHas func(ctx context.Context, addr address.Address) (bool, error) `perm:"read"` + } +} + +func (s *IMessagerStruct) ActiveAddress(p0 context.Context, p1 address.Address) error { + return s.Internal.ActiveAddress(p0, p1) +} +func (s *IMessagerStruct) ClearUnFillMessage(p0 context.Context, p1 address.Address) (int, error) { + return s.Internal.ClearUnFillMessage(p0, p1) +} +func (s *IMessagerStruct) DeleteAddress(p0 context.Context, p1 address.Address) error { + return s.Internal.DeleteAddress(p0, p1) +} +func (s *IMessagerStruct) DeleteNode(p0 context.Context, p1 string) error { + return s.Internal.DeleteNode(p0, p1) +} +func (s *IMessagerStruct) ForbiddenAddress(p0 context.Context, p1 address.Address) error { + return s.Internal.ForbiddenAddress(p0, p1) +} +func (s *IMessagerStruct) GetAddress(p0 context.Context, p1 address.Address) (*mtypes.Address, error) { + return s.Internal.GetAddress(p0, p1) +} +func (s *IMessagerStruct) GetMessageByFromAndNonce(p0 context.Context, p1 address.Address, p2 uint64) (*mtypes.Message, error) { + return s.Internal.GetMessageByFromAndNonce(p0, p1, p2) +} +func (s *IMessagerStruct) GetMessageBySignedCid(p0 context.Context, p1 cid.Cid) (*mtypes.Message, error) { + return s.Internal.GetMessageBySignedCid(p0, p1) +} +func (s *IMessagerStruct) GetMessageByUid(p0 context.Context, p1 string) (*mtypes.Message, error) { + return s.Internal.GetMessageByUid(p0, p1) +} +func (s *IMessagerStruct) GetMessageByUnsignedCid(p0 context.Context, p1 cid.Cid) (*mtypes.Message, error) { + return s.Internal.GetMessageByUnsignedCid(p0, p1) +} +func (s *IMessagerStruct) GetNode(p0 context.Context, p1 string) (*mtypes.Node, error) { + return s.Internal.GetNode(p0, p1) +} +func (s *IMessagerStruct) GetSharedParams(p0 context.Context) (*mtypes.SharedSpec, error) { + return s.Internal.GetSharedParams(p0) +} +func (s *IMessagerStruct) HasAddress(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.HasAddress(p0, p1) +} +func (s *IMessagerStruct) HasMessageByUid(p0 context.Context, p1 string) (bool, error) { + return s.Internal.HasMessageByUid(p0, p1) +} +func (s *IMessagerStruct) HasNode(p0 context.Context, p1 string) (bool, error) { + return s.Internal.HasNode(p0, p1) +} +func (s *IMessagerStruct) ListAddress(p0 context.Context) ([]*mtypes.Address, error) { + return s.Internal.ListAddress(p0) +} +func (s *IMessagerStruct) ListBlockedMessage(p0 context.Context, p1 address.Address, p2 time.Duration) ([]*mtypes.Message, error) { + return s.Internal.ListBlockedMessage(p0, p1, p2) +} +func (s *IMessagerStruct) ListFailedMessage(p0 context.Context) ([]*mtypes.Message, error) { + return s.Internal.ListFailedMessage(p0) +} +func (s *IMessagerStruct) ListMessage(p0 context.Context) ([]*mtypes.Message, error) { + return s.Internal.ListMessage(p0) +} +func (s *IMessagerStruct) ListMessageByAddress(p0 context.Context, p1 address.Address) ([]*mtypes.Message, error) { + return s.Internal.ListMessageByAddress(p0, p1) +} +func (s *IMessagerStruct) ListMessageByFromState(p0 context.Context, p1 address.Address, p2 mtypes.MessageState, p3 bool, p4, p5 int) ([]*mtypes.Message, error) { + return s.Internal.ListMessageByFromState(p0, p1, p2, p3, p4, p5) +} +func (s *IMessagerStruct) ListNode(p0 context.Context) ([]*mtypes.Node, error) { + return s.Internal.ListNode(p0) +} +func (s *IMessagerStruct) LogList(p0 context.Context) ([]string, error) { + return s.Internal.LogList(p0) +} +func (s *IMessagerStruct) MarkBadMessage(p0 context.Context, p1 string) error { + return s.Internal.MarkBadMessage(p0, p1) +} +func (s *IMessagerStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + return s.Internal.NetAddrsListen(p0) +} +func (s *IMessagerStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { + return s.Internal.NetConnect(p0, p1) +} +func (s *IMessagerStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { + return s.Internal.NetFindPeer(p0, p1) +} +func (s *IMessagerStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { + return s.Internal.NetPeers(p0) +} +func (s *IMessagerStruct) PushMessage(p0 context.Context, p1 *types.Message, p2 *mtypes.SendSpec) (string, error) { + return s.Internal.PushMessage(p0, p1, p2) +} +func (s *IMessagerStruct) PushMessageWithId(p0 context.Context, p1 string, p2 *types.Message, p3 *mtypes.SendSpec) (string, error) { + return s.Internal.PushMessageWithId(p0, p1, p2, p3) +} +func (s *IMessagerStruct) RecoverFailedMsg(p0 context.Context, p1 address.Address) ([]string, error) { + return s.Internal.RecoverFailedMsg(p0, p1) +} +func (s *IMessagerStruct) ReplaceMessage(p0 context.Context, p1 *mtypes.ReplacMessageParams) (cid.Cid, error) { + return s.Internal.ReplaceMessage(p0, p1) +} +func (s *IMessagerStruct) RepublishMessage(p0 context.Context, p1 string) error { + return s.Internal.RepublishMessage(p0, p1) +} +func (s *IMessagerStruct) SaveNode(p0 context.Context, p1 *mtypes.Node) error { + return s.Internal.SaveNode(p0, p1) +} +func (s *IMessagerStruct) Send(p0 context.Context, p1 mtypes.QuickSendParams) (string, error) { + return s.Internal.Send(p0, p1) +} +func (s *IMessagerStruct) SetFeeParams(p0 context.Context, p1 *mtypes.AddressSpec) error { + return s.Internal.SetFeeParams(p0, p1) +} +func (s *IMessagerStruct) SetLogLevel(p0 context.Context, p1, p2 string) error { + return s.Internal.SetLogLevel(p0, p1, p2) +} +func (s *IMessagerStruct) SetSelectMsgNum(p0 context.Context, p1 address.Address, p2 uint64) error { + return s.Internal.SetSelectMsgNum(p0, p1, p2) +} +func (s *IMessagerStruct) SetSharedParams(p0 context.Context, p1 *mtypes.SharedSpec) error { + return s.Internal.SetSharedParams(p0, p1) +} +func (s *IMessagerStruct) UpdateAllFilledMessage(p0 context.Context) (int, error) { + return s.Internal.UpdateAllFilledMessage(p0) +} +func (s *IMessagerStruct) UpdateFilledMessageByID(p0 context.Context, p1 string) (string, error) { + return s.Internal.UpdateFilledMessageByID(p0, p1) +} +func (s *IMessagerStruct) UpdateMessageStateByID(p0 context.Context, p1 string, p2 mtypes.MessageState) error { + return s.Internal.UpdateMessageStateByID(p0, p1, p2) +} +func (s *IMessagerStruct) UpdateNonce(p0 context.Context, p1 address.Address, p2 uint64) error { + return s.Internal.UpdateNonce(p0, p1, p2) +} +func (s *IMessagerStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} +func (s *IMessagerStruct) WaitMessage(p0 context.Context, p1 string, p2 uint64) (*mtypes.Message, error) { + return s.Internal.WaitMessage(p0, p1, p2) +} +func (s *IMessagerStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1) +} diff --git a/venus-shared/api/permission/permission.go b/venus-shared/api/permission/permission.go new file mode 100644 index 0000000000..4f928f79a2 --- /dev/null +++ b/venus-shared/api/permission/permission.go @@ -0,0 +1,84 @@ +package permission + +import ( + "context" + "fmt" + "reflect" + + "github.com/filecoin-project/venus/venus-shared/api" + + "github.com/filecoin-project/go-jsonrpc/auth" +) + +type permission = auth.Permission + +const ( + // When changing these, update docs/API.md too + + PermRead permission = "read" // default + PermWrite permission = "write" + PermSign permission = "sign" // Use wallet keys for signing + PermAdmin permission = "admin" // Manage permissions + +) + +var ( + AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin} + DefaultPerms = []auth.Permission{PermRead} +) + +// PermissionProxy the scheduler between API and internal business +// nolint +func PermissionProxy(in interface{}, out interface{}) { + ra := reflect.ValueOf(in) + outs := api.GetInternalStructs(out) + for _, out := range outs { + rint := reflect.ValueOf(out).Elem() + for i := 0; i < ra.NumMethod(); i++ { + methodName := ra.Type().Method(i).Name + field, exists := rint.Type().FieldByName(methodName) + if !exists { + continue + } + + requiredPerm := field.Tag.Get("perm") + if requiredPerm == "" { + panic("missing 'perm' tag on " + field.Name) // ok + } + + var found bool + for _, perm := range AllPermissions { + if perm == requiredPerm { + found = true + } + } + if !found { + panic("unknown 'perm' tag on " + field.Name) + } + + fn := ra.Method(i) + rint.FieldByName(methodName).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { + ctx := args[0].Interface().(context.Context) + errNum := 0 + if !auth.HasPerm(ctx, DefaultPerms, requiredPerm) { + errNum++ + goto ABORT + } + return fn.Call(args) + ABORT: + err := fmt.Errorf("missing permission to invoke '%s'", methodName) + if errNum&1 == 1 { + err = fmt.Errorf("%s (need '%s')", err, requiredPerm) + } + rerr := reflect.ValueOf(&err).Elem() + if fn.Type().NumOut() == 2 { + return []reflect.Value{ + reflect.Zero(fn.Type().Out(0)), + rerr, + } + } + return []reflect.Value{rerr} + })) + } + } +} diff --git a/venus-shared/api/proxy_util.go b/venus-shared/api/proxy_util.go new file mode 100644 index 0000000000..9eb8e5ee25 --- /dev/null +++ b/venus-shared/api/proxy_util.go @@ -0,0 +1,30 @@ +package api + +import "reflect" + +var _internalField = "Internal" + +// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct +func GetInternalStructs(in interface{}) []interface{} { + return getInternalStructs(reflect.ValueOf(in).Elem()) +} + +func getInternalStructs(rv reflect.Value) []interface{} { + var out []interface{} + + for i := 0; i < rv.NumField(); i++ { + filedValue := rv.Field(i) + filedType := rv.Type().Field(i) + if filedType.Name == _internalField { + ii := filedValue.Addr().Interface() + out = append(out, ii) + continue + } + + sub := getInternalStructs(rv.Field(i)) + + out = append(out, sub...) + } + + return out +} diff --git a/venus-shared/api/proxy_util_test.go b/venus-shared/api/proxy_util_test.go new file mode 100644 index 0000000000..945fe58f1c --- /dev/null +++ b/venus-shared/api/proxy_util_test.go @@ -0,0 +1,65 @@ +package api + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/require" +) + +type StrA struct { + StrB + + Internal struct { + A int + } +} + +type StrB struct { + Internal struct { + B int + } +} + +type StrC struct { + Internal struct { + Internal struct { + C int + } + } +} + +func TestGetInternalStructs(t *testing.T) { + tf.UnitTest(t) + var proxy StrA + + sts := GetInternalStructs(&proxy) + require.Len(t, sts, 2) + + sa := sts[0].(*struct{ B int }) + sa.B = 3 + sb := sts[1].(*struct{ A int }) + sb.A = 4 + + require.Equal(t, 4, proxy.Internal.A) + require.Equal(t, 3, proxy.StrB.Internal.B) +} + +func TestNestedInternalStructs(t *testing.T) { + tf.UnitTest(t) + var proxy StrC + + // check that only the top-level internal struct gets picked up + + sts := GetInternalStructs(&proxy) + require.Len(t, sts, 1) + + sa := sts[0].(*struct { + Internal struct { + C int + } + }) + sa.Internal.C = 5 + + require.Equal(t, 5, proxy.Internal.Internal.C) +} diff --git a/venus-shared/api/request_header.go b/venus-shared/api/request_header.go new file mode 100644 index 0000000000..9f00480272 --- /dev/null +++ b/venus-shared/api/request_header.go @@ -0,0 +1,6 @@ +package api + +const ( + VenusAPINamespaceHeader = "X-VENUS-API-NAMESPACE" + AuthorizationHeader = "Authorization" +) diff --git a/venus-shared/api/wallet/client_gen.go b/venus-shared/api/wallet/client_gen.go new file mode 100644 index 0000000000..654b49e79f --- /dev/null +++ b/venus-shared/api/wallet/client_gen.go @@ -0,0 +1,54 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package wallet + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/venus/venus-shared/api" +) + +const MajorVersion = 0 +const APINamespace = "wallet.IFullAPI" +const MethodNamespace = "Filecoin" + +// NewIFullAPIRPC creates a new httpparse jsonrpc remotecli. +func NewIFullAPIRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (IFullAPI, jsonrpc.ClientCloser, error) { + endpoint, err := api.Endpoint(addr, MajorVersion) + if err != nil { + return nil, nil, fmt.Errorf("invalid addr %s: %w", addr, err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + + var res IFullAPIStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} + +// DialIFullAPIRPC is a more convinient way of building client, as it resolves any format (url, multiaddr) of addr string. +func DialIFullAPIRPC(ctx context.Context, addr string, token string, requestHeader http.Header, opts ...jsonrpc.Option) (IFullAPI, jsonrpc.ClientCloser, error) { + ainfo := api.NewAPIInfo(addr, token) + endpoint, err := ainfo.DialArgs(api.VerString(MajorVersion)) + if err != nil { + return nil, nil, fmt.Errorf("get dial args: %w", err) + } + + if requestHeader == nil { + requestHeader = http.Header{} + } + requestHeader.Set(api.VenusAPINamespaceHeader, APINamespace) + ainfo.SetAuthHeader(requestHeader) + + var res IFullAPIStruct + closer, err := jsonrpc.NewMergeClient(ctx, endpoint, MethodNamespace, api.GetInternalStructs(&res), requestHeader, opts...) + + return &res, closer, err +} diff --git a/venus-shared/api/wallet/common.go b/venus-shared/api/wallet/common.go new file mode 100644 index 0000000000..86b510a57c --- /dev/null +++ b/venus-shared/api/wallet/common.go @@ -0,0 +1,19 @@ +package wallet + +import ( + "context" + + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/venus/venus-shared/api" +) + +type ICommon interface { + // Auth + AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read + AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin + + LogList(context.Context) ([]string, error) //perm:read + LogSetLevel(context.Context, string, string) error //perm:write + + api.Version +} diff --git a/venus-shared/api/wallet/full.go b/venus-shared/api/wallet/full.go new file mode 100644 index 0000000000..27b2ee99ef --- /dev/null +++ b/venus-shared/api/wallet/full.go @@ -0,0 +1,8 @@ +package wallet + +type IFullAPI interface { + ILocalStrategy + ILocalWallet + ICommon + IWalletEvent +} diff --git a/venus-shared/api/wallet/method.md b/venus-shared/api/wallet/method.md new file mode 100644 index 0000000000..e9dd0c505e --- /dev/null +++ b/venus-shared/api/wallet/method.md @@ -0,0 +1,1049 @@ +# Groups + +* [Common](#common) + * [AuthNew](#authnew) + * [AuthVerify](#authverify) + * [LogList](#loglist) + * [LogSetLevel](#logsetlevel) + * [Version](#version) +* [Strategy](#strategy) + * [AddMethodIntoKeyBind](#addmethodintokeybind) + * [AddMsgTypeIntoKeyBind](#addmsgtypeintokeybind) + * [GetGroupByName](#getgroupbyname) + * [GetKeyBindByName](#getkeybindbyname) + * [GetKeyBinds](#getkeybinds) + * [GetMethodTemplateByName](#getmethodtemplatebyname) + * [GetMsgTypeTemplate](#getmsgtypetemplate) + * [GetWalletTokenInfo](#getwallettokeninfo) + * [GetWalletTokensByGroup](#getwallettokensbygroup) + * [ListGroups](#listgroups) + * [ListKeyBinds](#listkeybinds) + * [ListMethodTemplates](#listmethodtemplates) + * [ListMsgTypeTemplates](#listmsgtypetemplates) + * [NewGroup](#newgroup) + * [NewKeyBindCustom](#newkeybindcustom) + * [NewKeyBindFromTemplate](#newkeybindfromtemplate) + * [NewMethodTemplate](#newmethodtemplate) + * [NewMsgTypeTemplate](#newmsgtypetemplate) + * [NewStToken](#newsttoken) + * [RemoveGroup](#removegroup) + * [RemoveKeyBind](#removekeybind) + * [RemoveKeyBindByAddress](#removekeybindbyaddress) + * [RemoveMethodFromKeyBind](#removemethodfromkeybind) + * [RemoveMethodTemplate](#removemethodtemplate) + * [RemoveMsgTypeFromKeyBind](#removemsgtypefromkeybind) + * [RemoveMsgTypeTemplate](#removemsgtypetemplate) + * [RemoveStToken](#removesttoken) +* [StrategyVerify](#strategyverify) + * [ContainWallet](#containwallet) + * [ScopeWallet](#scopewallet) + * [Verify](#verify) +* [Wallet](#wallet) + * [WalletDelete](#walletdelete) + * [WalletExport](#walletexport) + * [WalletHas](#wallethas) + * [WalletImport](#walletimport) + * [WalletList](#walletlist) + * [WalletNew](#walletnew) + * [WalletSign](#walletsign) +* [WalletEvent](#walletevent) + * [AddNewAddress](#addnewaddress) + * [AddSupportAccount](#addsupportaccount) +* [WalletLock](#walletlock) + * [Lock](#lock) + * [LockState](#lockstate) + * [SetPassword](#setpassword) + * [Unlock](#unlock) + * [VerifyPassword](#verifypassword) + +## Common + +### AuthNew + + +Perms: admin + +Inputs: +```json +[ + [ + "write" + ] +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### AuthVerify +Auth + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +[ + "write" +] +``` + +### LogList + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + "string value" +] +``` + +### LogSetLevel + + +Perms: write + +Inputs: +```json +[ + "string value", + "string value" +] +``` + +Response: `{}` + +### Version +Version provides information about API provider + + +Perms: read + +Inputs: `[]` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131840 +} +``` + +## Strategy + +### AddMethodIntoKeyBind +AddMethodIntoKeyBind append methods into keyBind + + +Perms: admin + +Inputs: +```json +[ + "string value", + [ + "string value" + ] +] +``` + +Response: +```json +{ + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] +} +``` + +### AddMsgTypeIntoKeyBind +AddMsgTypeIntoKeyBind append msgTypes into keyBind + + +Perms: admin + +Inputs: +```json +[ + "string value", + [ + 123 + ] +] +``` + +Response: +```json +{ + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] +} +``` + +### GetGroupByName +GetGroupByName get a group by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "GroupID": 42, + "Name": "string value", + "KeyBinds": [ + { + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] + } + ] +} +``` + +### GetKeyBindByName +GetKeyBindByName get a keyBind by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] +} +``` + +### GetKeyBinds +GetKeyBinds list keyBinds by address + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +[ + { + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] + } +] +``` + +### GetMethodTemplateByName +GetMethodTemplateByName get a method template by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "MTId": 42, + "Name": "string value", + "Methods": [ + "string value" + ] +} +``` + +### GetMsgTypeTemplate +GetMsgTypeTemplate get a msgType template by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "MTTId": 42, + "Name": "string value", + "MetaTypes": 2 +} +``` + +### GetWalletTokenInfo +GetWalletTokenInfo get group details by token + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Token": "string value", + "GroupID": 42, + "Name": "string value", + "KeyBinds": [ + { + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] + } + ] +} +``` + +### GetWalletTokensByGroup +GetWalletTokensByGroup list strategy tokens under the group + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +[ + "string value" +] +``` + +### ListGroups +ListGroups list groups' simple information + + +Perms: admin + +Inputs: +```json +[ + 123, + 123 +] +``` + +Response: +```json +[ + { + "GroupID": 42, + "Name": "string value", + "KeyBinds": [ + { + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] + } + ] + } +] +``` + +### ListKeyBinds +ListKeyBinds list keyBinds' details + + +Perms: admin + +Inputs: +```json +[ + 123, + 123 +] +``` + +Response: +```json +[ + { + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] + } +] +``` + +### ListMethodTemplates +ListMethodTemplates list method templates' details + + +Perms: admin + +Inputs: +```json +[ + 123, + 123 +] +``` + +Response: +```json +[ + { + "MTId": 42, + "Name": "string value", + "Methods": [ + "string value" + ] + } +] +``` + +### ListMsgTypeTemplates +ListMsgTypeTemplates list msgType templates' details + + +Perms: admin + +Inputs: +```json +[ + 123, + 123 +] +``` + +Response: +```json +[ + { + "MTTId": 42, + "Name": "string value", + "MetaTypes": 2 + } +] +``` + +### NewGroup +NewGroup create a group to group multiple keyBinds together + + +Perms: admin + +Inputs: +```json +[ + "string value", + [ + "string value" + ] +] +``` + +Response: `{}` + +### NewKeyBindCustom +NewKeyBindCustom create a keyBind with custom msyTypes and methods + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234", + [ + 123 + ], + [ + "string value" + ] +] +``` + +Response: `{}` + +### NewKeyBindFromTemplate +NewKeyBindFromTemplate create a keyBind form msgType template and method template + + +Perms: admin + +Inputs: +```json +[ + "string value", + "f01234", + "string value", + "string value" +] +``` + +Response: `{}` + +### NewMethodTemplate +NewMethodTemplate create a method template + + +Perms: admin + +Inputs: +```json +[ + "string value", + [ + "string value" + ] +] +``` + +Response: `{}` + +### NewMsgTypeTemplate +NewMsgTypeTemplate create a msgType template + + +Perms: admin + +Inputs: +```json +[ + "string value", + [ + 123 + ] +] +``` + +Response: `{}` + +### NewStToken +NewStToken generate a random token from group + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `"string value"` + +### RemoveGroup +RemoveGroup delete group by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### RemoveKeyBind +RemoveKeyBind delete keyBind by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### RemoveKeyBindByAddress +RemoveKeyBindByAddress delete some keyBinds by address + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `9` + +### RemoveMethodFromKeyBind +RemoveMethodFromKeyBind remove methods from keyBind + + +Perms: admin + +Inputs: +```json +[ + "string value", + [ + "string value" + ] +] +``` + +Response: +```json +{ + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] +} +``` + +### RemoveMethodTemplate +RemoveMethodTemplate delete method template by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### RemoveMsgTypeFromKeyBind +RemoveMsgTypeFromKeyBind remove msgTypes form keyBind + + +Perms: admin + +Inputs: +```json +[ + "string value", + [ + 123 + ] +] +``` + +Response: +```json +{ + "BindID": 42, + "Name": "string value", + "Address": "string value", + "MetaTypes": 2, + "Methods": [ + "string value" + ] +} +``` + +### RemoveMsgTypeTemplate +RemoveMsgTypeTemplate delete msgType template by name + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### RemoveStToken +RemoveStToken delete strategy token + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +## StrategyVerify + +### ContainWallet +ContainWallet Check if it is visible to the wallet + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### ScopeWallet +ScopeWallet get the wallet scope + + +Perms: admin + +Inputs: `[]` + +Response: +```json +{ + "Root": true, + "Addresses": [ + "f01234" + ] +} +``` + +### Verify +Verify verify the address strategy permissions + + +Perms: admin + +Inputs: +```json +[ + "f01234", + "message", + { + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + }, + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `{}` + +## Wallet + +### WalletDelete + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### WalletExport + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletHas + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### WalletImport + + +Perms: admin + +Inputs: +```json +[ + { + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `"f01234"` + +### WalletList + + +Perms: read + +Inputs: `[]` + +Response: +```json +[ + "f01234" +] +``` + +### WalletNew + + +Perms: admin + +Inputs: +```json +[ + "bls" +] +``` + +Response: `"f01234"` + +### WalletSign + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": "message", + "Extra": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +## WalletEvent + +### AddNewAddress + + +Perms: admin + +Inputs: +```json +[ + [ + "f01234" + ] +] +``` + +Response: `{}` + +### AddSupportAccount + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +## WalletLock + +### Lock +lock the wallet and disable IWallet logic + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### LockState +show lock state + + +Perms: admin + +Inputs: `[]` + +Response: `true` + +### SetPassword +SetPassword do it first after program setup + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### Unlock +unlock the wallet and enable IWallet logic + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### VerifyPassword +VerifyPassword verify that the passwords are consistent + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + diff --git a/venus-shared/api/wallet/mock/mock_ifullapi.go b/venus-shared/api/wallet/mock/mock_ifullapi.go new file mode 100644 index 0000000000..9b9f4e7f7f --- /dev/null +++ b/venus-shared/api/wallet/mock/mock_ifullapi.go @@ -0,0 +1,755 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/venus/venus-shared/api/wallet (interfaces: IFullAPI) + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + auth "github.com/filecoin-project/go-jsonrpc/auth" + crypto "github.com/filecoin-project/go-state-types/crypto" + internal "github.com/filecoin-project/venus/venus-shared/internal" + types "github.com/filecoin-project/venus/venus-shared/types" + wallet "github.com/filecoin-project/venus/venus-shared/types/wallet" + gomock "github.com/golang/mock/gomock" +) + +// MockIFullAPI is a mock of IFullAPI interface. +type MockIFullAPI struct { + ctrl *gomock.Controller + recorder *MockIFullAPIMockRecorder +} + +// MockIFullAPIMockRecorder is the mock recorder for MockIFullAPI. +type MockIFullAPIMockRecorder struct { + mock *MockIFullAPI +} + +// NewMockIFullAPI creates a new mock instance. +func NewMockIFullAPI(ctrl *gomock.Controller) *MockIFullAPI { + mock := &MockIFullAPI{ctrl: ctrl} + mock.recorder = &MockIFullAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIFullAPI) EXPECT() *MockIFullAPIMockRecorder { + return m.recorder +} + +// AddMethodIntoKeyBind mocks base method. +func (m *MockIFullAPI) AddMethodIntoKeyBind(arg0 context.Context, arg1 string, arg2 []string) (*wallet.KeyBind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddMethodIntoKeyBind", arg0, arg1, arg2) + ret0, _ := ret[0].(*wallet.KeyBind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddMethodIntoKeyBind indicates an expected call of AddMethodIntoKeyBind. +func (mr *MockIFullAPIMockRecorder) AddMethodIntoKeyBind(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddMethodIntoKeyBind", reflect.TypeOf((*MockIFullAPI)(nil).AddMethodIntoKeyBind), arg0, arg1, arg2) +} + +// AddMsgTypeIntoKeyBind mocks base method. +func (m *MockIFullAPI) AddMsgTypeIntoKeyBind(arg0 context.Context, arg1 string, arg2 []int) (*wallet.KeyBind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddMsgTypeIntoKeyBind", arg0, arg1, arg2) + ret0, _ := ret[0].(*wallet.KeyBind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddMsgTypeIntoKeyBind indicates an expected call of AddMsgTypeIntoKeyBind. +func (mr *MockIFullAPIMockRecorder) AddMsgTypeIntoKeyBind(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddMsgTypeIntoKeyBind", reflect.TypeOf((*MockIFullAPI)(nil).AddMsgTypeIntoKeyBind), arg0, arg1, arg2) +} + +// AddNewAddress mocks base method. +func (m *MockIFullAPI) AddNewAddress(arg0 context.Context, arg1 []address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddNewAddress", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddNewAddress indicates an expected call of AddNewAddress. +func (mr *MockIFullAPIMockRecorder) AddNewAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewAddress", reflect.TypeOf((*MockIFullAPI)(nil).AddNewAddress), arg0, arg1) +} + +// AddSupportAccount mocks base method. +func (m *MockIFullAPI) AddSupportAccount(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddSupportAccount", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddSupportAccount indicates an expected call of AddSupportAccount. +func (mr *MockIFullAPIMockRecorder) AddSupportAccount(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSupportAccount", reflect.TypeOf((*MockIFullAPI)(nil).AddSupportAccount), arg0, arg1) +} + +// AuthNew mocks base method. +func (m *MockIFullAPI) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthNew indicates an expected call of AuthNew. +func (mr *MockIFullAPIMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockIFullAPI)(nil).AuthNew), arg0, arg1) +} + +// AuthVerify mocks base method. +func (m *MockIFullAPI) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1) + ret0, _ := ret[0].([]auth.Permission) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthVerify indicates an expected call of AuthVerify. +func (mr *MockIFullAPIMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockIFullAPI)(nil).AuthVerify), arg0, arg1) +} + +// ContainWallet mocks base method. +func (m *MockIFullAPI) ContainWallet(arg0 context.Context, arg1 address.Address) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainWallet", arg0, arg1) + ret0, _ := ret[0].(bool) + return ret0 +} + +// ContainWallet indicates an expected call of ContainWallet. +func (mr *MockIFullAPIMockRecorder) ContainWallet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainWallet", reflect.TypeOf((*MockIFullAPI)(nil).ContainWallet), arg0, arg1) +} + +// GetGroupByName mocks base method. +func (m *MockIFullAPI) GetGroupByName(arg0 context.Context, arg1 string) (*wallet.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroupByName", arg0, arg1) + ret0, _ := ret[0].(*wallet.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroupByName indicates an expected call of GetGroupByName. +func (mr *MockIFullAPIMockRecorder) GetGroupByName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockIFullAPI)(nil).GetGroupByName), arg0, arg1) +} + +// GetKeyBindByName mocks base method. +func (m *MockIFullAPI) GetKeyBindByName(arg0 context.Context, arg1 string) (*wallet.KeyBind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetKeyBindByName", arg0, arg1) + ret0, _ := ret[0].(*wallet.KeyBind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetKeyBindByName indicates an expected call of GetKeyBindByName. +func (mr *MockIFullAPIMockRecorder) GetKeyBindByName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKeyBindByName", reflect.TypeOf((*MockIFullAPI)(nil).GetKeyBindByName), arg0, arg1) +} + +// GetKeyBinds mocks base method. +func (m *MockIFullAPI) GetKeyBinds(arg0 context.Context, arg1 address.Address) ([]*wallet.KeyBind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetKeyBinds", arg0, arg1) + ret0, _ := ret[0].([]*wallet.KeyBind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetKeyBinds indicates an expected call of GetKeyBinds. +func (mr *MockIFullAPIMockRecorder) GetKeyBinds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKeyBinds", reflect.TypeOf((*MockIFullAPI)(nil).GetKeyBinds), arg0, arg1) +} + +// GetMethodTemplateByName mocks base method. +func (m *MockIFullAPI) GetMethodTemplateByName(arg0 context.Context, arg1 string) (*wallet.MethodTemplate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMethodTemplateByName", arg0, arg1) + ret0, _ := ret[0].(*wallet.MethodTemplate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMethodTemplateByName indicates an expected call of GetMethodTemplateByName. +func (mr *MockIFullAPIMockRecorder) GetMethodTemplateByName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMethodTemplateByName", reflect.TypeOf((*MockIFullAPI)(nil).GetMethodTemplateByName), arg0, arg1) +} + +// GetMsgTypeTemplate mocks base method. +func (m *MockIFullAPI) GetMsgTypeTemplate(arg0 context.Context, arg1 string) (*wallet.MsgTypeTemplate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMsgTypeTemplate", arg0, arg1) + ret0, _ := ret[0].(*wallet.MsgTypeTemplate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMsgTypeTemplate indicates an expected call of GetMsgTypeTemplate. +func (mr *MockIFullAPIMockRecorder) GetMsgTypeTemplate(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMsgTypeTemplate", reflect.TypeOf((*MockIFullAPI)(nil).GetMsgTypeTemplate), arg0, arg1) +} + +// GetWalletTokenInfo mocks base method. +func (m *MockIFullAPI) GetWalletTokenInfo(arg0 context.Context, arg1 string) (*wallet.GroupAuth, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWalletTokenInfo", arg0, arg1) + ret0, _ := ret[0].(*wallet.GroupAuth) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWalletTokenInfo indicates an expected call of GetWalletTokenInfo. +func (mr *MockIFullAPIMockRecorder) GetWalletTokenInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWalletTokenInfo", reflect.TypeOf((*MockIFullAPI)(nil).GetWalletTokenInfo), arg0, arg1) +} + +// GetWalletTokensByGroup mocks base method. +func (m *MockIFullAPI) GetWalletTokensByGroup(arg0 context.Context, arg1 string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWalletTokensByGroup", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWalletTokensByGroup indicates an expected call of GetWalletTokensByGroup. +func (mr *MockIFullAPIMockRecorder) GetWalletTokensByGroup(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWalletTokensByGroup", reflect.TypeOf((*MockIFullAPI)(nil).GetWalletTokensByGroup), arg0, arg1) +} + +// ListGroups mocks base method. +func (m *MockIFullAPI) ListGroups(arg0 context.Context, arg1, arg2 int) ([]*wallet.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListGroups", arg0, arg1, arg2) + ret0, _ := ret[0].([]*wallet.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListGroups indicates an expected call of ListGroups. +func (mr *MockIFullAPIMockRecorder) ListGroups(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroups", reflect.TypeOf((*MockIFullAPI)(nil).ListGroups), arg0, arg1, arg2) +} + +// ListKeyBinds mocks base method. +func (m *MockIFullAPI) ListKeyBinds(arg0 context.Context, arg1, arg2 int) ([]*wallet.KeyBind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListKeyBinds", arg0, arg1, arg2) + ret0, _ := ret[0].([]*wallet.KeyBind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListKeyBinds indicates an expected call of ListKeyBinds. +func (mr *MockIFullAPIMockRecorder) ListKeyBinds(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListKeyBinds", reflect.TypeOf((*MockIFullAPI)(nil).ListKeyBinds), arg0, arg1, arg2) +} + +// ListMethodTemplates mocks base method. +func (m *MockIFullAPI) ListMethodTemplates(arg0 context.Context, arg1, arg2 int) ([]*wallet.MethodTemplate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMethodTemplates", arg0, arg1, arg2) + ret0, _ := ret[0].([]*wallet.MethodTemplate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMethodTemplates indicates an expected call of ListMethodTemplates. +func (mr *MockIFullAPIMockRecorder) ListMethodTemplates(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMethodTemplates", reflect.TypeOf((*MockIFullAPI)(nil).ListMethodTemplates), arg0, arg1, arg2) +} + +// ListMsgTypeTemplates mocks base method. +func (m *MockIFullAPI) ListMsgTypeTemplates(arg0 context.Context, arg1, arg2 int) ([]*wallet.MsgTypeTemplate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMsgTypeTemplates", arg0, arg1, arg2) + ret0, _ := ret[0].([]*wallet.MsgTypeTemplate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMsgTypeTemplates indicates an expected call of ListMsgTypeTemplates. +func (mr *MockIFullAPIMockRecorder) ListMsgTypeTemplates(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMsgTypeTemplates", reflect.TypeOf((*MockIFullAPI)(nil).ListMsgTypeTemplates), arg0, arg1, arg2) +} + +// Lock mocks base method. +func (m *MockIFullAPI) Lock(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Lock", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Lock indicates an expected call of Lock. +func (mr *MockIFullAPIMockRecorder) Lock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockIFullAPI)(nil).Lock), arg0, arg1) +} + +// LockState mocks base method. +func (m *MockIFullAPI) LockState(arg0 context.Context) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LockState", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// LockState indicates an expected call of LockState. +func (mr *MockIFullAPIMockRecorder) LockState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockState", reflect.TypeOf((*MockIFullAPI)(nil).LockState), arg0) +} + +// LogList mocks base method. +func (m *MockIFullAPI) LogList(arg0 context.Context) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogList", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LogList indicates an expected call of LogList. +func (mr *MockIFullAPIMockRecorder) LogList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockIFullAPI)(nil).LogList), arg0) +} + +// LogSetLevel mocks base method. +func (m *MockIFullAPI) LogSetLevel(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// LogSetLevel indicates an expected call of LogSetLevel. +func (mr *MockIFullAPIMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockIFullAPI)(nil).LogSetLevel), arg0, arg1, arg2) +} + +// NewGroup mocks base method. +func (m *MockIFullAPI) NewGroup(arg0 context.Context, arg1 string, arg2 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewGroup", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NewGroup indicates an expected call of NewGroup. +func (mr *MockIFullAPIMockRecorder) NewGroup(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewGroup", reflect.TypeOf((*MockIFullAPI)(nil).NewGroup), arg0, arg1, arg2) +} + +// NewKeyBindCustom mocks base method. +func (m *MockIFullAPI) NewKeyBindCustom(arg0 context.Context, arg1 string, arg2 address.Address, arg3 []int, arg4 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewKeyBindCustom", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// NewKeyBindCustom indicates an expected call of NewKeyBindCustom. +func (mr *MockIFullAPIMockRecorder) NewKeyBindCustom(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewKeyBindCustom", reflect.TypeOf((*MockIFullAPI)(nil).NewKeyBindCustom), arg0, arg1, arg2, arg3, arg4) +} + +// NewKeyBindFromTemplate mocks base method. +func (m *MockIFullAPI) NewKeyBindFromTemplate(arg0 context.Context, arg1 string, arg2 address.Address, arg3, arg4 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewKeyBindFromTemplate", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// NewKeyBindFromTemplate indicates an expected call of NewKeyBindFromTemplate. +func (mr *MockIFullAPIMockRecorder) NewKeyBindFromTemplate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewKeyBindFromTemplate", reflect.TypeOf((*MockIFullAPI)(nil).NewKeyBindFromTemplate), arg0, arg1, arg2, arg3, arg4) +} + +// NewMethodTemplate mocks base method. +func (m *MockIFullAPI) NewMethodTemplate(arg0 context.Context, arg1 string, arg2 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewMethodTemplate", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NewMethodTemplate indicates an expected call of NewMethodTemplate. +func (mr *MockIFullAPIMockRecorder) NewMethodTemplate(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMethodTemplate", reflect.TypeOf((*MockIFullAPI)(nil).NewMethodTemplate), arg0, arg1, arg2) +} + +// NewMsgTypeTemplate mocks base method. +func (m *MockIFullAPI) NewMsgTypeTemplate(arg0 context.Context, arg1 string, arg2 []int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewMsgTypeTemplate", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NewMsgTypeTemplate indicates an expected call of NewMsgTypeTemplate. +func (mr *MockIFullAPIMockRecorder) NewMsgTypeTemplate(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMsgTypeTemplate", reflect.TypeOf((*MockIFullAPI)(nil).NewMsgTypeTemplate), arg0, arg1, arg2) +} + +// NewStToken mocks base method. +func (m *MockIFullAPI) NewStToken(arg0 context.Context, arg1 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewStToken", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewStToken indicates an expected call of NewStToken. +func (mr *MockIFullAPIMockRecorder) NewStToken(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewStToken", reflect.TypeOf((*MockIFullAPI)(nil).NewStToken), arg0, arg1) +} + +// RemoveGroup mocks base method. +func (m *MockIFullAPI) RemoveGroup(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveGroup", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveGroup indicates an expected call of RemoveGroup. +func (mr *MockIFullAPIMockRecorder) RemoveGroup(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveGroup", reflect.TypeOf((*MockIFullAPI)(nil).RemoveGroup), arg0, arg1) +} + +// RemoveKeyBind mocks base method. +func (m *MockIFullAPI) RemoveKeyBind(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveKeyBind", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveKeyBind indicates an expected call of RemoveKeyBind. +func (mr *MockIFullAPIMockRecorder) RemoveKeyBind(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveKeyBind", reflect.TypeOf((*MockIFullAPI)(nil).RemoveKeyBind), arg0, arg1) +} + +// RemoveKeyBindByAddress mocks base method. +func (m *MockIFullAPI) RemoveKeyBindByAddress(arg0 context.Context, arg1 address.Address) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveKeyBindByAddress", arg0, arg1) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveKeyBindByAddress indicates an expected call of RemoveKeyBindByAddress. +func (mr *MockIFullAPIMockRecorder) RemoveKeyBindByAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveKeyBindByAddress", reflect.TypeOf((*MockIFullAPI)(nil).RemoveKeyBindByAddress), arg0, arg1) +} + +// RemoveMethodFromKeyBind mocks base method. +func (m *MockIFullAPI) RemoveMethodFromKeyBind(arg0 context.Context, arg1 string, arg2 []string) (*wallet.KeyBind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveMethodFromKeyBind", arg0, arg1, arg2) + ret0, _ := ret[0].(*wallet.KeyBind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveMethodFromKeyBind indicates an expected call of RemoveMethodFromKeyBind. +func (mr *MockIFullAPIMockRecorder) RemoveMethodFromKeyBind(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveMethodFromKeyBind", reflect.TypeOf((*MockIFullAPI)(nil).RemoveMethodFromKeyBind), arg0, arg1, arg2) +} + +// RemoveMethodTemplate mocks base method. +func (m *MockIFullAPI) RemoveMethodTemplate(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveMethodTemplate", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveMethodTemplate indicates an expected call of RemoveMethodTemplate. +func (mr *MockIFullAPIMockRecorder) RemoveMethodTemplate(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveMethodTemplate", reflect.TypeOf((*MockIFullAPI)(nil).RemoveMethodTemplate), arg0, arg1) +} + +// RemoveMsgTypeFromKeyBind mocks base method. +func (m *MockIFullAPI) RemoveMsgTypeFromKeyBind(arg0 context.Context, arg1 string, arg2 []int) (*wallet.KeyBind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveMsgTypeFromKeyBind", arg0, arg1, arg2) + ret0, _ := ret[0].(*wallet.KeyBind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveMsgTypeFromKeyBind indicates an expected call of RemoveMsgTypeFromKeyBind. +func (mr *MockIFullAPIMockRecorder) RemoveMsgTypeFromKeyBind(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveMsgTypeFromKeyBind", reflect.TypeOf((*MockIFullAPI)(nil).RemoveMsgTypeFromKeyBind), arg0, arg1, arg2) +} + +// RemoveMsgTypeTemplate mocks base method. +func (m *MockIFullAPI) RemoveMsgTypeTemplate(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveMsgTypeTemplate", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveMsgTypeTemplate indicates an expected call of RemoveMsgTypeTemplate. +func (mr *MockIFullAPIMockRecorder) RemoveMsgTypeTemplate(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveMsgTypeTemplate", reflect.TypeOf((*MockIFullAPI)(nil).RemoveMsgTypeTemplate), arg0, arg1) +} + +// RemoveStToken mocks base method. +func (m *MockIFullAPI) RemoveStToken(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveStToken", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveStToken indicates an expected call of RemoveStToken. +func (mr *MockIFullAPIMockRecorder) RemoveStToken(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveStToken", reflect.TypeOf((*MockIFullAPI)(nil).RemoveStToken), arg0, arg1) +} + +// ScopeWallet mocks base method. +func (m *MockIFullAPI) ScopeWallet(arg0 context.Context) (*wallet.AddressScope, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ScopeWallet", arg0) + ret0, _ := ret[0].(*wallet.AddressScope) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ScopeWallet indicates an expected call of ScopeWallet. +func (mr *MockIFullAPIMockRecorder) ScopeWallet(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScopeWallet", reflect.TypeOf((*MockIFullAPI)(nil).ScopeWallet), arg0) +} + +// SetPassword mocks base method. +func (m *MockIFullAPI) SetPassword(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPassword", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetPassword indicates an expected call of SetPassword. +func (mr *MockIFullAPIMockRecorder) SetPassword(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPassword", reflect.TypeOf((*MockIFullAPI)(nil).SetPassword), arg0, arg1) +} + +// Unlock mocks base method. +func (m *MockIFullAPI) Unlock(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Unlock", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Unlock indicates an expected call of Unlock. +func (mr *MockIFullAPIMockRecorder) Unlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockIFullAPI)(nil).Unlock), arg0, arg1) +} + +// Verify mocks base method. +func (m *MockIFullAPI) Verify(arg0 context.Context, arg1 address.Address, arg2 types.MsgType, arg3 *internal.Message) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Verify", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Verify indicates an expected call of Verify. +func (mr *MockIFullAPIMockRecorder) Verify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockIFullAPI)(nil).Verify), arg0, arg1, arg2, arg3) +} + +// VerifyPassword mocks base method. +func (m *MockIFullAPI) VerifyPassword(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyPassword", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyPassword indicates an expected call of VerifyPassword. +func (mr *MockIFullAPIMockRecorder) VerifyPassword(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPassword", reflect.TypeOf((*MockIFullAPI)(nil).VerifyPassword), arg0, arg1) +} + +// Version mocks base method. +func (m *MockIFullAPI) Version(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockIFullAPIMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockIFullAPI)(nil).Version), arg0) +} + +// WalletDelete mocks base method. +func (m *MockIFullAPI) WalletDelete(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletDelete indicates an expected call of WalletDelete. +func (mr *MockIFullAPIMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockIFullAPI)(nil).WalletDelete), arg0, arg1) +} + +// WalletExport mocks base method. +func (m *MockIFullAPI) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletExport", arg0, arg1) + ret0, _ := ret[0].(*types.KeyInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletExport indicates an expected call of WalletExport. +func (mr *MockIFullAPIMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockIFullAPI)(nil).WalletExport), arg0, arg1) +} + +// WalletHas mocks base method. +func (m *MockIFullAPI) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockIFullAPIMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockIFullAPI)(nil).WalletHas), arg0, arg1) +} + +// WalletImport mocks base method. +func (m *MockIFullAPI) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletImport indicates an expected call of WalletImport. +func (mr *MockIFullAPIMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockIFullAPI)(nil).WalletImport), arg0, arg1) +} + +// WalletList mocks base method. +func (m *MockIFullAPI) WalletList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletList indicates an expected call of WalletList. +func (mr *MockIFullAPIMockRecorder) WalletList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockIFullAPI)(nil).WalletList), arg0) +} + +// WalletNew mocks base method. +func (m *MockIFullAPI) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletNew", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletNew indicates an expected call of WalletNew. +func (mr *MockIFullAPIMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockIFullAPI)(nil).WalletNew), arg0, arg1) +} + +// WalletSign mocks base method. +func (m *MockIFullAPI) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 types.MsgMeta) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockIFullAPIMockRecorder) WalletSign(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockIFullAPI)(nil).WalletSign), arg0, arg1, arg2, arg3) +} diff --git a/venus-shared/api/wallet/proxy_gen.go b/venus-shared/api/wallet/proxy_gen.go new file mode 100644 index 0000000000..fad48f184c --- /dev/null +++ b/venus-shared/api/wallet/proxy_gen.go @@ -0,0 +1,256 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/api-gen. DO NOT EDIT. +package wallet + +import ( + "context" + + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/wallet" +) + +type IStrategyVerifyStruct struct { + Internal struct { + ContainWallet func(ctx context.Context, address address.Address) bool `perm:"admin"` + ScopeWallet func(ctx context.Context) (*wallet.AddressScope, error) `perm:"admin"` + Verify func(ctx context.Context, address address.Address, msgType types.MsgType, msg *types.Message) error `perm:"admin"` + } +} + +func (s *IStrategyVerifyStruct) ContainWallet(p0 context.Context, p1 address.Address) bool { + return s.Internal.ContainWallet(p0, p1) +} +func (s *IStrategyVerifyStruct) ScopeWallet(p0 context.Context) (*wallet.AddressScope, error) { + return s.Internal.ScopeWallet(p0) +} +func (s *IStrategyVerifyStruct) Verify(p0 context.Context, p1 address.Address, p2 types.MsgType, p3 *types.Message) error { + return s.Internal.Verify(p0, p1, p2, p3) +} + +type IStrategyStruct struct { + Internal struct { + AddMethodIntoKeyBind func(ctx context.Context, name string, methods []string) (*wallet.KeyBind, error) `perm:"admin"` + AddMsgTypeIntoKeyBind func(ctx context.Context, name string, codes []int) (*wallet.KeyBind, error) `perm:"admin"` + GetGroupByName func(ctx context.Context, name string) (*wallet.Group, error) `perm:"admin"` + GetKeyBindByName func(ctx context.Context, name string) (*wallet.KeyBind, error) `perm:"admin"` + GetKeyBinds func(ctx context.Context, address address.Address) ([]*wallet.KeyBind, error) `perm:"admin"` + GetMethodTemplateByName func(ctx context.Context, name string) (*wallet.MethodTemplate, error) `perm:"admin"` + GetMsgTypeTemplate func(ctx context.Context, name string) (*wallet.MsgTypeTemplate, error) `perm:"admin"` + GetWalletTokenInfo func(ctx context.Context, token string) (*wallet.GroupAuth, error) `perm:"admin"` + GetWalletTokensByGroup func(ctx context.Context, groupName string) ([]string, error) `perm:"admin"` + ListGroups func(ctx context.Context, fromIndex, toIndex int) ([]*wallet.Group, error) `perm:"admin"` + ListKeyBinds func(ctx context.Context, fromIndex, toIndex int) ([]*wallet.KeyBind, error) `perm:"admin"` + ListMethodTemplates func(ctx context.Context, fromIndex, toIndex int) ([]*wallet.MethodTemplate, error) `perm:"admin"` + ListMsgTypeTemplates func(ctx context.Context, fromIndex, toIndex int) ([]*wallet.MsgTypeTemplate, error) `perm:"admin"` + NewGroup func(ctx context.Context, name string, keyBindNames []string) error `perm:"admin"` + NewKeyBindCustom func(ctx context.Context, name string, address address.Address, codes []int, methods []wallet.MethodName) error `perm:"admin"` + NewKeyBindFromTemplate func(ctx context.Context, name string, address address.Address, mttName, mtName string) error `perm:"admin"` + NewMethodTemplate func(ctx context.Context, name string, methods []string) error `perm:"admin"` + NewMsgTypeTemplate func(ctx context.Context, name string, codes []int) error `perm:"admin"` + NewStToken func(ctx context.Context, groupName string) (token string, err error) `perm:"admin"` + RemoveGroup func(ctx context.Context, name string) error `perm:"admin"` + RemoveKeyBind func(ctx context.Context, name string) error `perm:"admin"` + RemoveKeyBindByAddress func(ctx context.Context, address address.Address) (int64, error) `perm:"admin"` + RemoveMethodFromKeyBind func(ctx context.Context, name string, methods []string) (*wallet.KeyBind, error) `perm:"admin"` + RemoveMethodTemplate func(ctx context.Context, name string) error `perm:"admin"` + RemoveMsgTypeFromKeyBind func(ctx context.Context, name string, codes []int) (*wallet.KeyBind, error) `perm:"admin"` + RemoveMsgTypeTemplate func(ctx context.Context, name string) error `perm:"admin"` + RemoveStToken func(ctx context.Context, token string) error `perm:"admin"` + } +} + +func (s *IStrategyStruct) AddMethodIntoKeyBind(p0 context.Context, p1 string, p2 []string) (*wallet.KeyBind, error) { + return s.Internal.AddMethodIntoKeyBind(p0, p1, p2) +} +func (s *IStrategyStruct) AddMsgTypeIntoKeyBind(p0 context.Context, p1 string, p2 []int) (*wallet.KeyBind, error) { + return s.Internal.AddMsgTypeIntoKeyBind(p0, p1, p2) +} +func (s *IStrategyStruct) GetGroupByName(p0 context.Context, p1 string) (*wallet.Group, error) { + return s.Internal.GetGroupByName(p0, p1) +} +func (s *IStrategyStruct) GetKeyBindByName(p0 context.Context, p1 string) (*wallet.KeyBind, error) { + return s.Internal.GetKeyBindByName(p0, p1) +} +func (s *IStrategyStruct) GetKeyBinds(p0 context.Context, p1 address.Address) ([]*wallet.KeyBind, error) { + return s.Internal.GetKeyBinds(p0, p1) +} +func (s *IStrategyStruct) GetMethodTemplateByName(p0 context.Context, p1 string) (*wallet.MethodTemplate, error) { + return s.Internal.GetMethodTemplateByName(p0, p1) +} +func (s *IStrategyStruct) GetMsgTypeTemplate(p0 context.Context, p1 string) (*wallet.MsgTypeTemplate, error) { + return s.Internal.GetMsgTypeTemplate(p0, p1) +} +func (s *IStrategyStruct) GetWalletTokenInfo(p0 context.Context, p1 string) (*wallet.GroupAuth, error) { + return s.Internal.GetWalletTokenInfo(p0, p1) +} +func (s *IStrategyStruct) GetWalletTokensByGroup(p0 context.Context, p1 string) ([]string, error) { + return s.Internal.GetWalletTokensByGroup(p0, p1) +} +func (s *IStrategyStruct) ListGroups(p0 context.Context, p1, p2 int) ([]*wallet.Group, error) { + return s.Internal.ListGroups(p0, p1, p2) +} +func (s *IStrategyStruct) ListKeyBinds(p0 context.Context, p1, p2 int) ([]*wallet.KeyBind, error) { + return s.Internal.ListKeyBinds(p0, p1, p2) +} +func (s *IStrategyStruct) ListMethodTemplates(p0 context.Context, p1, p2 int) ([]*wallet.MethodTemplate, error) { + return s.Internal.ListMethodTemplates(p0, p1, p2) +} +func (s *IStrategyStruct) ListMsgTypeTemplates(p0 context.Context, p1, p2 int) ([]*wallet.MsgTypeTemplate, error) { + return s.Internal.ListMsgTypeTemplates(p0, p1, p2) +} +func (s *IStrategyStruct) NewGroup(p0 context.Context, p1 string, p2 []string) error { + return s.Internal.NewGroup(p0, p1, p2) +} +func (s *IStrategyStruct) NewKeyBindCustom(p0 context.Context, p1 string, p2 address.Address, p3 []int, p4 []wallet.MethodName) error { + return s.Internal.NewKeyBindCustom(p0, p1, p2, p3, p4) +} +func (s *IStrategyStruct) NewKeyBindFromTemplate(p0 context.Context, p1 string, p2 address.Address, p3, p4 string) error { + return s.Internal.NewKeyBindFromTemplate(p0, p1, p2, p3, p4) +} +func (s *IStrategyStruct) NewMethodTemplate(p0 context.Context, p1 string, p2 []string) error { + return s.Internal.NewMethodTemplate(p0, p1, p2) +} +func (s *IStrategyStruct) NewMsgTypeTemplate(p0 context.Context, p1 string, p2 []int) error { + return s.Internal.NewMsgTypeTemplate(p0, p1, p2) +} +func (s *IStrategyStruct) NewStToken(p0 context.Context, p1 string) (string, error) { + return s.Internal.NewStToken(p0, p1) +} +func (s *IStrategyStruct) RemoveGroup(p0 context.Context, p1 string) error { + return s.Internal.RemoveGroup(p0, p1) +} +func (s *IStrategyStruct) RemoveKeyBind(p0 context.Context, p1 string) error { + return s.Internal.RemoveKeyBind(p0, p1) +} +func (s *IStrategyStruct) RemoveKeyBindByAddress(p0 context.Context, p1 address.Address) (int64, error) { + return s.Internal.RemoveKeyBindByAddress(p0, p1) +} +func (s *IStrategyStruct) RemoveMethodFromKeyBind(p0 context.Context, p1 string, p2 []string) (*wallet.KeyBind, error) { + return s.Internal.RemoveMethodFromKeyBind(p0, p1, p2) +} +func (s *IStrategyStruct) RemoveMethodTemplate(p0 context.Context, p1 string) error { + return s.Internal.RemoveMethodTemplate(p0, p1) +} +func (s *IStrategyStruct) RemoveMsgTypeFromKeyBind(p0 context.Context, p1 string, p2 []int) (*wallet.KeyBind, error) { + return s.Internal.RemoveMsgTypeFromKeyBind(p0, p1, p2) +} +func (s *IStrategyStruct) RemoveMsgTypeTemplate(p0 context.Context, p1 string) error { + return s.Internal.RemoveMsgTypeTemplate(p0, p1) +} +func (s *IStrategyStruct) RemoveStToken(p0 context.Context, p1 string) error { + return s.Internal.RemoveStToken(p0, p1) +} + +type ILocalStrategyStruct struct { + IStrategyVerifyStruct + IStrategyStruct +} + +type IWalletStruct struct { + Internal struct { + WalletDelete func(ctx context.Context, addr address.Address) error `perm:"admin"` + WalletExport func(ctx context.Context, addr address.Address) (*types.KeyInfo, error) `perm:"admin"` + WalletHas func(ctx context.Context, address address.Address) (bool, error) `perm:"read"` + WalletImport func(ctx context.Context, ki *types.KeyInfo) (address.Address, error) `perm:"admin"` + WalletList func(ctx context.Context) ([]address.Address, error) `perm:"read"` + WalletNew func(ctx context.Context, kt types.KeyType) (address.Address, error) `perm:"admin"` + WalletSign func(ctx context.Context, signer address.Address, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) `perm:"sign"` + } +} + +func (s *IWalletStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletDelete(p0, p1) +} +func (s *IWalletStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + return s.Internal.WalletExport(p0, p1) +} +func (s *IWalletStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1) +} +func (s *IWalletStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return s.Internal.WalletImport(p0, p1) +} +func (s *IWalletStruct) WalletList(p0 context.Context) ([]address.Address, error) { + return s.Internal.WalletList(p0) +} +func (s *IWalletStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + return s.Internal.WalletNew(p0, p1) +} +func (s *IWalletStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 types.MsgMeta) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2, p3) +} + +type IWalletLockStruct struct { + Internal struct { + Lock func(ctx context.Context, password string) error `perm:"admin"` + LockState func(ctx context.Context) bool `perm:"admin"` + SetPassword func(ctx context.Context, password string) error `perm:"admin"` + Unlock func(ctx context.Context, password string) error `perm:"admin"` + VerifyPassword func(ctx context.Context, password string) error `perm:"admin"` + } +} + +func (s *IWalletLockStruct) Lock(p0 context.Context, p1 string) error { return s.Internal.Lock(p0, p1) } +func (s *IWalletLockStruct) LockState(p0 context.Context) bool { return s.Internal.LockState(p0) } +func (s *IWalletLockStruct) SetPassword(p0 context.Context, p1 string) error { + return s.Internal.SetPassword(p0, p1) +} +func (s *IWalletLockStruct) Unlock(p0 context.Context, p1 string) error { + return s.Internal.Unlock(p0, p1) +} +func (s *IWalletLockStruct) VerifyPassword(p0 context.Context, p1 string) error { + return s.Internal.VerifyPassword(p0, p1) +} + +type ILocalWalletStruct struct { + IWalletStruct + IWalletLockStruct +} + +type ICommonStruct struct { + Internal struct { + AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"` + AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"read"` + LogList func(context.Context) ([]string, error) `perm:"read"` + LogSetLevel func(context.Context, string, string) error `perm:"write"` + Version func(ctx context.Context) (types.Version, error) `perm:"read"` + } +} + +func (s *ICommonStruct) AuthNew(p0 context.Context, p1 []auth.Permission) ([]byte, error) { + return s.Internal.AuthNew(p0, p1) +} +func (s *ICommonStruct) AuthVerify(p0 context.Context, p1 string) ([]auth.Permission, error) { + return s.Internal.AuthVerify(p0, p1) +} +func (s *ICommonStruct) LogList(p0 context.Context) ([]string, error) { return s.Internal.LogList(p0) } +func (s *ICommonStruct) LogSetLevel(p0 context.Context, p1 string, p2 string) error { + return s.Internal.LogSetLevel(p0, p1, p2) +} +func (s *ICommonStruct) Version(p0 context.Context) (types.Version, error) { + return s.Internal.Version(p0) +} + +type IWalletEventStruct struct { + Internal struct { + AddNewAddress func(ctx context.Context, newAddrs []address.Address) error `perm:"admin"` + AddSupportAccount func(ctx context.Context, supportAccount string) error `perm:"admin"` + } +} + +func (s *IWalletEventStruct) AddNewAddress(p0 context.Context, p1 []address.Address) error { + return s.Internal.AddNewAddress(p0, p1) +} +func (s *IWalletEventStruct) AddSupportAccount(p0 context.Context, p1 string) error { + return s.Internal.AddSupportAccount(p0, p1) +} + +type IFullAPIStruct struct { + ILocalStrategyStruct + ILocalWalletStruct + ICommonStruct + IWalletEventStruct +} diff --git a/venus-shared/api/wallet/strategy.go b/venus-shared/api/wallet/strategy.go new file mode 100644 index 0000000000..b19f643fdb --- /dev/null +++ b/venus-shared/api/wallet/strategy.go @@ -0,0 +1,82 @@ +package wallet + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/wallet" +) + +type ILocalStrategy interface { + IStrategyVerify + IStrategy +} + +type IStrategyVerify interface { + // Verify verify the address strategy permissions + Verify(ctx context.Context, address address.Address, msgType types.MsgType, msg *types.Message) error //perm:admin + // ScopeWallet get the wallet scope + ScopeWallet(ctx context.Context) (*wallet.AddressScope, error) //perm:admin + // ContainWallet Check if it is visible to the wallet + ContainWallet(ctx context.Context, address address.Address) bool //perm:admin +} + +type IStrategy interface { + // NewMsgTypeTemplate create a msgType template + NewMsgTypeTemplate(ctx context.Context, name string, codes []int) error //perm:admin + // NewMethodTemplate create a method template + NewMethodTemplate(ctx context.Context, name string, methods []string) error //perm:admin + // NewKeyBindCustom create a keyBind with custom msyTypes and methods + NewKeyBindCustom(ctx context.Context, name string, address address.Address, codes []int, methods []wallet.MethodName) error //perm:admin + // NewKeyBindFromTemplate create a keyBind form msgType template and method template + NewKeyBindFromTemplate(ctx context.Context, name string, address address.Address, mttName, mtName string) error //perm:admin + // NewGroup create a group to group multiple keyBinds together + NewGroup(ctx context.Context, name string, keyBindNames []string) error //perm:admin + // NewStToken generate a random token from group + NewStToken(ctx context.Context, groupName string) (token string, err error) //perm:admin + // GetMsgTypeTemplate get a msgType template by name + GetMsgTypeTemplate(ctx context.Context, name string) (*wallet.MsgTypeTemplate, error) //perm:admin + // GetMethodTemplateByName get a method template by name + GetMethodTemplateByName(ctx context.Context, name string) (*wallet.MethodTemplate, error) //perm:admin + // GetKeyBindByName get a keyBind by name + GetKeyBindByName(ctx context.Context, name string) (*wallet.KeyBind, error) //perm:admin + // GetKeyBinds list keyBinds by address + GetKeyBinds(ctx context.Context, address address.Address) ([]*wallet.KeyBind, error) //perm:admin + // GetGroupByName get a group by name + GetGroupByName(ctx context.Context, name string) (*wallet.Group, error) //perm:admin + // GetWalletTokensByGroup list strategy tokens under the group + GetWalletTokensByGroup(ctx context.Context, groupName string) ([]string, error) //perm:admin + // GetWalletTokenInfo get group details by token + GetWalletTokenInfo(ctx context.Context, token string) (*wallet.GroupAuth, error) //perm:admin + // ListGroups list groups' simple information + ListGroups(ctx context.Context, fromIndex, toIndex int) ([]*wallet.Group, error) //perm:admin + // ListKeyBinds list keyBinds' details + ListKeyBinds(ctx context.Context, fromIndex, toIndex int) ([]*wallet.KeyBind, error) //perm:admin + // ListMethodTemplates list method templates' details + ListMethodTemplates(ctx context.Context, fromIndex, toIndex int) ([]*wallet.MethodTemplate, error) //perm:admin + // ListMsgTypeTemplates list msgType templates' details + ListMsgTypeTemplates(ctx context.Context, fromIndex, toIndex int) ([]*wallet.MsgTypeTemplate, error) //perm:admin + + // AddMsgTypeIntoKeyBind append msgTypes into keyBind + AddMsgTypeIntoKeyBind(ctx context.Context, name string, codes []int) (*wallet.KeyBind, error) //perm:admin + // AddMethodIntoKeyBind append methods into keyBind + AddMethodIntoKeyBind(ctx context.Context, name string, methods []string) (*wallet.KeyBind, error) //perm:admin + // RemoveMsgTypeFromKeyBind remove msgTypes form keyBind + RemoveMsgTypeFromKeyBind(ctx context.Context, name string, codes []int) (*wallet.KeyBind, error) //perm:admin + // RemoveMethodFromKeyBind remove methods from keyBind + RemoveMethodFromKeyBind(ctx context.Context, name string, methods []string) (*wallet.KeyBind, error) //perm:admin + + // RemoveMsgTypeTemplate delete msgType template by name + RemoveMsgTypeTemplate(ctx context.Context, name string) error //perm:admin + // RemoveGroup delete group by name + RemoveGroup(ctx context.Context, name string) error //perm:admin + // RemoveMethodTemplate delete method template by name + RemoveMethodTemplate(ctx context.Context, name string) error //perm:admin + // RemoveKeyBind delete keyBind by name + RemoveKeyBind(ctx context.Context, name string) error //perm:admin + // RemoveKeyBindByAddress delete some keyBinds by address + RemoveKeyBindByAddress(ctx context.Context, address address.Address) (int64, error) //perm:admin + // RemoveStToken delete strategy token + RemoveStToken(ctx context.Context, token string) error //perm:admin +} diff --git a/venus-shared/api/wallet/wallet.go b/venus-shared/api/wallet/wallet.go new file mode 100644 index 0000000000..120f9036f2 --- /dev/null +++ b/venus-shared/api/wallet/wallet.go @@ -0,0 +1,42 @@ +package wallet + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type ILocalWallet interface { + IWallet + IWalletLock +} + +type IWalletLock interface { + // SetPassword do it first after program setup + SetPassword(ctx context.Context, password string) error //perm:admin + // unlock the wallet and enable IWallet logic + Unlock(ctx context.Context, password string) error //perm:admin + // lock the wallet and disable IWallet logic + Lock(ctx context.Context, password string) error //perm:admin + // show lock state + LockState(ctx context.Context) bool //perm:admin + // VerifyPassword verify that the passwords are consistent + VerifyPassword(ctx context.Context, password string) error //perm:admin +} + +type IWallet interface { + WalletNew(ctx context.Context, kt types.KeyType) (address.Address, error) //perm:admin + WalletHas(ctx context.Context, address address.Address) (bool, error) //perm:read + WalletList(ctx context.Context) ([]address.Address, error) //perm:read + WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta types.MsgMeta) (*crypto.Signature, error) //perm:sign + WalletExport(ctx context.Context, addr address.Address) (*types.KeyInfo, error) //perm:admin + WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) //perm:admin + WalletDelete(ctx context.Context, addr address.Address) error //perm:admin +} + +type IWalletEvent interface { + AddSupportAccount(ctx context.Context, supportAccount string) error //perm:admin + AddNewAddress(ctx context.Context, newAddrs []address.Address) error //perm:admin +} diff --git a/venus-shared/bazaar/bazaar.go b/venus-shared/bazaar/bazaar.go new file mode 100644 index 0000000000..f81843bdeb --- /dev/null +++ b/venus-shared/bazaar/bazaar.go @@ -0,0 +1,12 @@ +package bazaar + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/peer" +) + +type Host interface { + RegisterHandler(typ Event, hdl EventHandler) error + Emit(ctx context.Context, evt Event, to ...peer.ID) error +} diff --git a/venus-shared/bazaar/event.go b/venus-shared/bazaar/event.go new file mode 100644 index 0000000000..db2d2c44b9 --- /dev/null +++ b/venus-shared/bazaar/event.go @@ -0,0 +1,16 @@ +package bazaar + +import "github.com/libp2p/go-libp2p/core/peer" + +// developers can define the types except Ping +const EventPing EventType = 0 + +type EventType uint64 + +type Event struct { + Type EventType + Data []byte + Time int64 +} + +type EventHandler func(peer.ID, *Event) error diff --git a/venus-shared/blockstore/apibstore.go b/venus-shared/blockstore/apibstore.go new file mode 100644 index 0000000000..cdc803d130 --- /dev/null +++ b/venus-shared/blockstore/apibstore.go @@ -0,0 +1,65 @@ +package blockstore + +import ( + "context" + "errors" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type ChainIO interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) +} + +type apiBlockstore struct { + api ChainIO +} + +// This blockstore is adapted in the constructor. +var _ BasicBlockstore = (*apiBlockstore)(nil) + +func NewAPIBlockstore(cio ChainIO) Blockstore { + bs := &apiBlockstore{api: cio} + return Adapt(bs) // return an adapted blockstore. +} + +func (a *apiBlockstore) DeleteBlock(context.Context, cid.Cid) error { + return errors.New("not supported") +} + +func (a *apiBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + return a.api.ChainHasObj(ctx, c) +} + +func (a *apiBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + bb, err := a.api.ChainReadObj(ctx, c) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(bb, c) +} + +func (a *apiBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + bb, err := a.api.ChainReadObj(ctx, c) + if err != nil { + return 0, err + } + return len(bb), nil +} + +func (a *apiBlockstore) Put(context.Context, blocks.Block) error { + return errors.New("not supported") +} + +func (a *apiBlockstore) PutMany(context.Context, []blocks.Block) error { + return errors.New("not supported") +} + +func (a *apiBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, errors.New("not supported") +} + +func (a *apiBlockstore) HashOnRead(enabled bool) { +} diff --git a/venus-shared/blockstore/autobatch.go b/venus-shared/blockstore/autobatch.go new file mode 100644 index 0000000000..be06ee8a69 --- /dev/null +++ b/venus-shared/blockstore/autobatch.go @@ -0,0 +1,266 @@ +package blockstore + +import ( + "context" + "errors" + "sync" + "time" + + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +// autolog is a logger for the autobatching blockstore. It is subscoped from the +// blockstore logger. +var autolog = log.Named("auto") + +// contains the same set of blocks twice, once as an ordered list for flushing, and as a map for fast access +type blockBatch struct { + blockList []block.Block + blockMap map[cid.Cid]block.Block +} + +type AutobatchBlockstore struct { + // TODO: drop if memory consumption is too high + addedCids map[cid.Cid]struct{} + + stateLock sync.Mutex + bufferedBatch blockBatch + + flushingBatch blockBatch + flushErr error + + flushCh chan struct{} + + doFlushLock sync.Mutex + flushRetryDelay time.Duration + doneCh chan struct{} + shutdown context.CancelFunc + + backingBs Blockstore + + bufferCapacity int + bufferSize int +} + +func NewAutobatch(ctx context.Context, backingBs Blockstore, bufferCapacity int) *AutobatchBlockstore { + ctx, cancel := context.WithCancel(ctx) + bs := &AutobatchBlockstore{ + addedCids: make(map[cid.Cid]struct{}), + backingBs: backingBs, + bufferCapacity: bufferCapacity, + flushCh: make(chan struct{}, 1), + doneCh: make(chan struct{}), + // could be made configable + flushRetryDelay: time.Millisecond * 100, + shutdown: cancel, + } + + bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block) + + go bs.flushWorker(ctx) + + return bs +} + +func (bs *AutobatchBlockstore) Put(ctx context.Context, blk block.Block) error { + bs.stateLock.Lock() + defer bs.stateLock.Unlock() + + _, ok := bs.addedCids[blk.Cid()] + if !ok { + bs.addedCids[blk.Cid()] = struct{}{} + bs.bufferedBatch.blockList = append(bs.bufferedBatch.blockList, blk) + bs.bufferedBatch.blockMap[blk.Cid()] = blk + bs.bufferSize += len(blk.RawData()) + if bs.bufferSize >= bs.bufferCapacity { + // signal that a flush is appropriate, may be ignored + select { + case bs.flushCh <- struct{}{}: + default: + // do nothing + } + } + } + + return nil +} + +func (bs *AutobatchBlockstore) flushWorker(ctx context.Context) { + defer close(bs.doneCh) + for { + select { + case <-bs.flushCh: + // TODO: check if we _should_ actually flush. We could get a spurious wakeup + // here. + putErr := bs.doFlush(ctx, false) + for putErr != nil { + select { + case <-ctx.Done(): + return + case <-time.After(bs.flushRetryDelay): + autolog.Errorf("FLUSH ERRORED: %w, retrying after %v", putErr, bs.flushRetryDelay) + putErr = bs.doFlush(ctx, true) + } + } + case <-ctx.Done(): + // Do one last flush. + _ = bs.doFlush(ctx, false) + return + } + } +} + +// caller must NOT hold stateLock +// set retryOnly to true to only retry a failed flush and not flush anything new. +func (bs *AutobatchBlockstore) doFlush(ctx context.Context, retryOnly bool) error { + bs.doFlushLock.Lock() + defer bs.doFlushLock.Unlock() + + // If we failed to flush last time, try flushing again. + if bs.flushErr != nil { + bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList) + } + + // If we failed, or we're _only_ retrying, bail. + if retryOnly || bs.flushErr != nil { + return bs.flushErr + } + + // Then take the current batch... + bs.stateLock.Lock() + // We do NOT clear addedCids here, because its purpose is to expedite Puts + bs.flushingBatch = bs.bufferedBatch + bs.bufferedBatch.blockList = make([]block.Block, 0, len(bs.flushingBatch.blockList)) + bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block, len(bs.flushingBatch.blockMap)) + bs.stateLock.Unlock() + + // And try to flush it. + bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList) + + // If we succeeded, reset the batch. Otherwise, we'll try again next time. + if bs.flushErr == nil { + bs.stateLock.Lock() + bs.flushingBatch = blockBatch{} + bs.stateLock.Unlock() + } + + return bs.flushErr +} + +// caller must NOT hold stateLock +func (bs *AutobatchBlockstore) Flush(ctx context.Context) error { + return bs.doFlush(ctx, false) +} + +func (bs *AutobatchBlockstore) Shutdown(ctx context.Context) error { + // TODO: Prevent puts after we call this to avoid losing data. + bs.shutdown() + select { + case <-bs.doneCh: + case <-ctx.Done(): + return ctx.Err() + } + + bs.doFlushLock.Lock() + defer bs.doFlushLock.Unlock() + + return bs.flushErr +} + +func (bs *AutobatchBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) { + // may seem backward to check the backingBs first, but that is the likeliest case + blk, err := bs.backingBs.Get(ctx, c) + if err == nil { + return blk, nil + } + + if !ipld.IsNotFound(err) { + return blk, err + } + + bs.stateLock.Lock() + v, ok := bs.flushingBatch.blockMap[c] + if ok { + bs.stateLock.Unlock() + return v, nil + } + + v, ok = bs.bufferedBatch.blockMap[c] + if ok { + bs.stateLock.Unlock() + return v, nil + } + bs.stateLock.Unlock() + + // We have to check the backing store one more time because it may have been flushed by the + // time we were able to take the lock above. + return bs.backingBs.Get(ctx, c) +} + +func (bs *AutobatchBlockstore) DeleteBlock(context.Context, cid.Cid) error { + // if we wanted to support this, we would have to: + // - flush + // - delete from the backingBs (if present) + // - remove from addedCids (if present) + // - if present in addedCids, also walk the ordered lists and remove if present + return errors.New("deletion is unsupported") +} + +func (bs *AutobatchBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + // see note in DeleteBlock() + return errors.New("deletion is unsupported") +} + +func (bs *AutobatchBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + _, err := bs.Get(ctx, c) + if err == nil { + return true, nil + } + if ipld.IsNotFound(err) { + return false, nil + } + + return false, err +} + +func (bs *AutobatchBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + blk, err := bs.Get(ctx, c) + if err != nil { + return 0, err + } + + return len(blk.RawData()), nil +} + +func (bs *AutobatchBlockstore) PutMany(ctx context.Context, blks []block.Block) error { + for _, blk := range blks { + if err := bs.Put(ctx, blk); err != nil { + return err + } + } + + return nil +} + +func (bs *AutobatchBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + if err := bs.Flush(ctx); err != nil { + return nil, err + } + + return bs.backingBs.AllKeysChan(ctx) +} + +func (bs *AutobatchBlockstore) HashOnRead(enabled bool) { + bs.backingBs.HashOnRead(enabled) +} + +func (bs *AutobatchBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + blk, err := bs.Get(ctx, cid) + if err != nil { + return err + } + + return callback(blk.RawData()) +} diff --git a/venus-shared/blockstore/autobatch_test.go b/venus-shared/blockstore/autobatch_test.go new file mode 100644 index 0000000000..d82ecf2268 --- /dev/null +++ b/venus-shared/blockstore/autobatch_test.go @@ -0,0 +1,50 @@ +package blockstore + +import ( + "context" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + blocks "github.com/ipfs/go-block-format" + ipld "github.com/ipfs/go-ipld-format" + "github.com/stretchr/testify/require" +) + +var ( + b0 = blocks.NewBlock([]byte("abc")) + b1 = blocks.NewBlock([]byte("foo")) + b2 = blocks.NewBlock([]byte("bar")) + b3 = blocks.NewBlock([]byte("baz")) +) + +func TestAutobatchBlockstore(t *testing.T) { + tf.UnitTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ab := NewAutobatch(ctx, NewMemory(), len(b0.RawData())+len(b1.RawData())-1) + + require.NoError(t, ab.Put(ctx, b0)) + require.NoError(t, ab.Put(ctx, b1)) + require.NoError(t, ab.Put(ctx, b2)) + + v0, err := ab.Get(ctx, b0.Cid()) + require.NoError(t, err) + require.Equal(t, b0.RawData(), v0.RawData()) + + v1, err := ab.Get(ctx, b1.Cid()) + require.NoError(t, err) + require.Equal(t, b1.RawData(), v1.RawData()) + + v2, err := ab.Get(ctx, b2.Cid()) + require.NoError(t, err) + require.Equal(t, b2.RawData(), v2.RawData()) + + // Regression test for a deadlock. + _, err = ab.Get(ctx, b3.Cid()) + require.True(t, ipld.IsNotFound(err)) + + require.NoError(t, ab.Flush(ctx)) + require.NoError(t, ab.Shutdown(ctx)) +} diff --git a/venus-shared/blockstore/badger.go b/venus-shared/blockstore/badger.go new file mode 100644 index 0000000000..5211108a71 --- /dev/null +++ b/venus-shared/blockstore/badger.go @@ -0,0 +1,448 @@ +package blockstore + +import ( + "context" + "fmt" + "io" + "sync/atomic" + + "github.com/dgraph-io/badger/v2" + "github.com/dgraph-io/badger/v2/options" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/keytransform" + blockstore "github.com/ipfs/go-ipfs-blockstore" + dshelp "github.com/ipfs/go-ipfs-ds-help" + ipld "github.com/ipfs/go-ipld-format" + "go.uber.org/zap" +) + +// ErrBlockstoreClosed is returned from blockstore operations after +// the blockstore has been closed. +var ErrBlockstoreClosed = fmt.Errorf("badger blockstore closed") + +// aliases to mask badger dependencies. +const ( + // FileIO is equivalent to badger/options.FileIO. + FileIO = options.FileIO + // MemoryMap is equivalent to badger/options.MemoryMap. + MemoryMap = options.MemoryMap + // LoadToRAM is equivalent to badger/options.LoadToRAM. + LoadToRAM = options.LoadToRAM +) + +// Options embeds the badger options themselves, and augments them with +// blockstore-specific options. +type Options struct { + badger.Options + + // Prefix is an optional prefix to prepend to keys. Default: "". + Prefix string +} + +func DefaultOptions(path string) Options { + return Options{ + Options: badger.DefaultOptions(path), + Prefix: "", + } +} + +// BadgerBlockstoreOptions returns the badger options to apply for the provided +// domain. +func BadgerBlockstoreOptions(path string, readonly bool) (Options, error) { + opts := DefaultOptions(path) + + // Due to legacy usage of blockstore.blockstore, over a datastore, all + // blocks are prefixed with this namespace. In the future, this can go away, + // in order to shorten keys, but it'll require a migration. + opts.Prefix = "" + + // blockstore values are immutable; therefore we do not expect any + // conflicts to emerge. + opts.DetectConflicts = false + + // This is to optimize the database on close so it can be opened + // read-only and efficiently queried. We don't do that and hanging on + // stop isn't nice. + opts.CompactL0OnClose = false + + // The alternative is "crash on start and tell the user to fix it". This + // will truncate corrupt and unsynced data, which we don't guarantee to + // persist anyways. + opts.Truncate = true + + // We mmap the index and the value logs; this is important to enable + // zero-copy value access. + opts.ValueLogLoadingMode = FileIO + opts.TableLoadingMode = FileIO + + // Embed only values < 128 bytes in the LSM tree; larger values are stored + // in value logs. + opts.ValueThreshold = 128 + + // Default table size is already 64MiB. This is here to make it explicit. + opts.MaxTableSize = 64 << 20 + + // NOTE: The chain blockstore doesn't require any GC (blocks are never + // deleted). This will change if we move to a tiered blockstore. + + opts.ReadOnly = readonly + + return opts, nil +} + +// badgerLogger is a local wrapper for go-log to make the interface +// compatible with badger.Logger (namely, aliasing Warnf to Warningf) +type badgerLogger struct { + *zap.SugaredLogger // skips 1 caller to get useful line info, skipping over badger.Options. + + skip2 *zap.SugaredLogger // skips 2 callers, just like above + this logger. +} + +// Warningf is required by the badger logger APIs. +func (b *badgerLogger) Warningf(format string, args ...interface{}) { + b.skip2.Warnf(format, args...) +} + +const ( + stateOpen int64 = iota + stateClosing + stateClosed +) + +// blockstore is a badger-backed IPLD blockstore. +// +// NOTE: once Close() is called, methods will try their best to return +// ErrBlockstoreClosed. This will guaranteed to happen for all subsequent +// operation calls after Close() has returned, but it may not happen for +// operations in progress. Those are likely to fail with a different error. +type BadgerBlockstore struct { + DB *badger.DB + + // state is guarded by atomic. + state int64 + + keyTransform *keytransform.PrefixTransform + + cache IBlockCache +} + +var ( + _ blockstore.Blockstore = (*BadgerBlockstore)(nil) + _ blockstore.Viewer = (*BadgerBlockstore)(nil) + _ io.Closer = (*BadgerBlockstore)(nil) +) + +// Open creates a new badger-backed blockstore, with the supplied options. +func Open(opts Options) (*BadgerBlockstore, error) { + opts.Logger = &badgerLogger{ + SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), + skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), + } + keyTransform := &keytransform.PrefixTransform{ + Prefix: datastore.NewKey(opts.Prefix), + } + db, err := badger.Open(opts.Options) + if err != nil { + return nil, fmt.Errorf("failed to open badger blockstore: %w", err) + } + + cache := NewLruCache(10 * 10000) + bs := &BadgerBlockstore{ + DB: db, + keyTransform: keyTransform, + cache: cache, + } + return bs, nil +} + +// Close closes the store. If the store has already been closed, this noops and +// returns an error, even if the first closure resulted in error. +func (b *BadgerBlockstore) Close() error { + if !atomic.CompareAndSwapInt64(&b.state, stateOpen, stateClosing) { + return nil + } + + defer atomic.StoreInt64(&b.state, stateClosed) + return b.DB.Close() +} + +func (b *BadgerBlockstore) ReadonlyDatastore() *TxBlockstore { + return &TxBlockstore{ + cache: b.cache, + tx: b.DB.NewTransaction(false), + keyTransform: b.keyTransform, + } +} + +// View implements blockstore.Viewer, which leverages zero-copy read-only +// access to values. +func (b *BadgerBlockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) error) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + key := b.ConvertKey(cid) + return b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(key.Bytes()); err { + case nil: + return item.Value(fn) + case badger.ErrKeyNotFound: + return ipld.ErrNotFound{Cid: cid} + default: + return fmt.Errorf("failed to view block from badger blockstore: %w", err) + } + }) +} + +// Has implements blockstore.Has. +func (b *BadgerBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + if atomic.LoadInt64(&b.state) != stateOpen { + return false, ErrBlockstoreClosed + } + + key := b.ConvertKey(cid) + if b.cache != nil { + if _, has := b.cache.Get(key.String()); has { + return true, nil + } + } + + err := b.DB.View(func(txn *badger.Txn) error { + _, err := txn.Get(key.Bytes()) + return err + }) + + switch err { + case badger.ErrKeyNotFound: + return false, nil + case nil: + return true, nil + default: + return false, fmt.Errorf("failed to check if block exists in badger blockstore: %w", err) + } +} + +// Get implements blockstore.Get. +func (b *BadgerBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + if !cid.Defined() { + return nil, ipld.ErrNotFound{Cid: cid} + } + + if atomic.LoadInt64(&b.state) != stateOpen { + return nil, ErrBlockstoreClosed + } + + key := b.ConvertKey(cid) + if b.cache != nil { + if val, has := b.cache.Get(key.String()); has { + return val.(blocks.Block), nil + } + } + + // migrate + // todo just for test + var val []byte + err := b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(key.Bytes()); err { + case nil: + val, err = item.ValueCopy(nil) + return err + case badger.ErrKeyNotFound: + return ipld.ErrNotFound{Cid: cid} + default: + return fmt.Errorf("failed to get block from badger blockstore: %w", err) + } + }) + if err != nil { + return nil, err + } + blk, err := blocks.NewBlockWithCid(val, cid) + if err != nil { + return nil, err + } + + b.cache.Add(key.String(), blk) + return blk, nil +} + +// GetSize implements blockstore.GetSize. +func (b *BadgerBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + if atomic.LoadInt64(&b.state) != stateOpen { + return -1, ErrBlockstoreClosed + } + + key := b.ConvertKey(cid) + if b.cache != nil { + if val, has := b.cache.Get(key.String()); has { + return len(val.(blocks.Block).RawData()), nil + } + } + + var size int + err := b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(key.Bytes()); err { + case nil: + size = int(item.ValueSize()) + case badger.ErrKeyNotFound: + return ipld.ErrNotFound{Cid: cid} + default: + return fmt.Errorf("failed to get block size from badger blockstore: %w", err) + } + return nil + }) + if err != nil { + size = -1 + } + return size, err +} + +// Put implements blockstore.Put. +func (b *BadgerBlockstore) Put(ctx context.Context, block blocks.Block) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + key := b.ConvertKey(block.Cid()) + if _, ok := b.cache.Get(key.String()); ok { + return nil + } + + err := b.DB.Update(func(txn *badger.Txn) error { + err := txn.Set(key.Bytes(), block.RawData()) + if err == nil { + b.cache.Add(key.String(), block) + } + return err + }) + if err != nil { + err = fmt.Errorf("failed to put block in badger blockstore: %w", err) + } + return err +} + +// PutMany implements blockstore.PutMany. +func (b *BadgerBlockstore) PutMany(ctx context.Context, blks []blocks.Block) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + batch := b.DB.NewWriteBatch() + defer batch.Cancel() + + flushToCache := map[string]blocks.Block{} + for _, block := range blks { + key := b.ConvertKey(block.Cid()) + if _, ok := b.cache.Get(key.String()); ok { + continue + } + + if err := batch.Set(key.Bytes(), block.RawData()); err != nil { + return err + } + + flushToCache[key.String()] = block + } + + err := batch.Flush() + if err != nil { + err = fmt.Errorf("failed to put blocks in badger blockstore: %w", err) + } + // flush to cache + for k, v := range flushToCache { + b.cache.Add(k, v) + } + return err +} + +// DeleteBlock implements blockstore.DeleteBlock. +func (b *BadgerBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + key := b.ConvertKey(cid) + return b.DB.Update(func(txn *badger.Txn) error { + err := txn.Delete(key.Bytes()) + if err == nil { + b.cache.Remove(key.String()) + } + return err + }) +} + +func (b *BadgerBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + if atomic.LoadInt64(&b.state) != stateOpen { + return ErrBlockstoreClosed + } + + for _, cid := range cids { + key := b.ConvertKey(cid) + if err := b.DB.Update(func(txn *badger.Txn) error { + err := txn.Delete(key.Bytes()) + if err == nil { + b.cache.Remove(key.String()) + } + return err + }); err != nil { + return err + } + } + return nil +} + +// AllKeysChan implements blockstore.AllKeysChan. +func (b *BadgerBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + if atomic.LoadInt64(&b.state) != stateOpen { + return nil, ErrBlockstoreClosed + } + + txn := b.DB.NewTransaction(false) + opts := badger.IteratorOptions{PrefetchSize: 100} + iter := txn.NewIterator(opts) + + ch := make(chan cid.Cid) + go func() { + defer close(ch) + defer iter.Close() + + // NewCidV1 makes a copy of the multihash buffer, so we can reuse it to + // contain allocs. + for iter.Rewind(); iter.Valid(); iter.Next() { + if ctx.Err() != nil { + return // context has fired. + } + if atomic.LoadInt64(&b.state) != stateOpen { + // open iterators will run even after the database is closed... + return // closing, yield. + } + k := iter.Item().Key() + // need to convert to key.Key using key.KeyFromDsKey. + bk, err := dshelp.BinaryFromDsKey(datastore.RawKey(string(k))) + if err != nil { + log.Warnf("error parsing key from binary: %s", err) + continue + } + cidKey := cid.NewCidV1(cid.Raw, bk) + select { + case <-ctx.Done(): + return + case ch <- cidKey: + } + } + }() + + return ch, nil +} + +// HashOnRead implements blockstore.HashOnRead. It is not supported by this +// blockstore. +func (b *BadgerBlockstore) HashOnRead(_ bool) { + log.Warnf("called HashOnRead on badger blockstore; function not supported; ignoring") +} + +func (b *BadgerBlockstore) ConvertKey(cid cid.Cid) datastore.Key { + key := dshelp.MultihashToDsKey(cid.Hash()) + return b.keyTransform.ConvertKey(key) +} diff --git a/venus-shared/blockstore/blockstore.go b/venus-shared/blockstore/blockstore.go new file mode 100644 index 0000000000..b48c8960ef --- /dev/null +++ b/venus-shared/blockstore/blockstore.go @@ -0,0 +1,151 @@ +// blockstore contains all the basic blockstore constructors used by lotus. Any +// blockstore not ultimately constructed out of the building blocks in this +// package may not work properly. +// +// - This package correctly wraps blockstores with the IdBlockstore. This blockstore: +// - Filters out all puts for blocks with CIDs using the "identity" hash function. +// - Extracts inlined blocks from CIDs using the identity hash function and +// returns them on get/has, ignoring the contents of the blockstore. +// - In the future, this package may enforce additional restrictions on block +// sizes, CID validity, etc. +// +// To make auditing for misuse of blockstores tractable, this package re-exports +// parts of the go-ipfs-blockstore package such that no other package needs to +// import it directly. +package blockstore + +import ( + "context" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +type BasicBlockstore = blockstore.Blockstore + +// NewTemporary returns a temporary blockstore. +func NewTemporary() MemBlockstore { + return NewMemory() +} + +// NewTemporarySync returns a thread-safe temporary blockstore. +func NewTemporarySync() *SyncStore { + return &SyncStore{bs: NewMemory()} +} + +// WrapIDStore wraps the underlying blockstore in an "identity" blockstore. +func WrapIDStore(bstore blockstore.Blockstore) Blockstore { + return Adapt(blockstore.NewIdStore(bstore)) +} + +// NewBlockstore creates a new blockstore wrapped by the given datastore. +func NewBlockstore(dstore ds.Batching) Blockstore { + return WrapIDStore(blockstore.NewBlockstore(dstore)) +} + +// Blockstore is the blockstore interface used by Lotus. It is the union +// of the basic go-ipfs blockstore, with other capabilities required by Lotus, +// e.g. View or Sync. +type Blockstore interface { + blockstore.Blockstore + blockstore.Viewer + BatchDeleter +} + +// Alias so other packages don't have to import go-ipfs-blockstore +// type Blockstore = blockstore.Blockstore +type ( + Viewer = blockstore.Viewer + GCBlockstore = blockstore.GCBlockstore + CacheOpts = blockstore.CacheOpts + GCLocker = blockstore.GCLocker +) + +type BatchDeleter interface { + DeleteMany(ctx context.Context, cids []cid.Cid) error +} + +var ( + NewGCLocker = blockstore.NewGCLocker + NewGCBlockstore = blockstore.NewGCBlockstore +) + +func DefaultCacheOpts() CacheOpts { + return CacheOpts{ + HasBloomFilterSize: 0, + HasBloomFilterHashes: 0, + HasARCCacheSize: 512 << 10, + } +} + +func CachedBlockstore(ctx context.Context, bs blockstore.Blockstore, opts CacheOpts) (Blockstore, error) { + bsTmp, err := blockstore.CachedBlockstore(ctx, bs, opts) + if err != nil { + return nil, err + } + return WrapIDStore(bsTmp), nil +} + +type adaptedBlockstore struct { + blockstore.Blockstore +} + +var _ Blockstore = (*adaptedBlockstore)(nil) + +func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + blk, err := a.Get(ctx, cid) + if err != nil { + return err + } + return callback(blk.RawData()) +} + +func (a *adaptedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + for _, cid := range cids { + err := a.DeleteBlock(ctx, cid) + if err != nil { + return err + } + } + + return nil +} + +// Adapt adapts a standard blockstore to a Lotus blockstore by +// enriching it with the extra methods that Lotus requires (e.g. View, Sync). +// +// View proxies over to Get and calls the callback with the value supplied by Get. +// Sync noops. +func Adapt(bs blockstore.Blockstore) Blockstore { + if ret, ok := bs.(Blockstore); ok { + return ret + } + return &adaptedBlockstore{bs} +} + +// FromDatastore creates a new blockstore backed by the given datastore. +func FromDatastore(dstore ds.Batching) Blockstore { + return WrapIDStore(blockstore.NewBlockstore(dstore)) +} + +// BlockstoreGC is a trait for blockstores that support online garbage collection +// consider +type BlockstoreGC interface { // nolint + CollectGarbage(options ...BlockstoreGCOption) error +} + +// BlockstoreGCOption is a functional interface for controlling blockstore GC options +type BlockstoreGCOption = func(*BlockstoreGCOptions) error // nolint + +// BlockstoreGCOptions is a struct with GC options +type BlockstoreGCOptions struct { // nolint + FullGC bool +} + +func WithFullGC(fullgc bool) BlockstoreGCOption { + return func(opts *BlockstoreGCOptions) error { + opts.FullGC = fullgc + return nil + } +} diff --git a/venus-shared/blockstore/buf_bstore.go b/venus-shared/blockstore/buf_bstore.go new file mode 100644 index 0000000000..7959267652 --- /dev/null +++ b/venus-shared/blockstore/buf_bstore.go @@ -0,0 +1,199 @@ +package blockstore + +import ( + "context" + "os" + + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("bufbs") + +type BufferedBS struct { + read Blockstore + write Blockstore + + readviewer Viewer + writeviewer Viewer +} + +func NewBufferedBstore(base Blockstore) *BufferedBS { + var buf Blockstore + if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" { + log.Warn("VM BLOCKSTORE BUFFERING IS DISABLED") + buf = base + } else { + buf = NewTemporary() + } + + bs := &BufferedBS{ + read: base, + write: buf, + } + if v, ok := base.(Viewer); ok { + bs.readviewer = v + } + if v, ok := buf.(Viewer); ok { + bs.writeviewer = v + } + if (bs.writeviewer == nil) != (bs.readviewer == nil) { + log.Warnf("one of the stores is not viewable; running less efficiently") + } + return bs +} + +func NewTieredBstore(r Blockstore, w Blockstore) *BufferedBS { + return &BufferedBS{ + read: r, + write: w, + } +} + +var ( + _ Blockstore = (*BufferedBS)(nil) + _ Viewer = (*BufferedBS)(nil) +) + +func (bs *BufferedBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + a, err := bs.read.AllKeysChan(ctx) + if err != nil { + return nil, err + } + + b, err := bs.write.AllKeysChan(ctx) + if err != nil { + return nil, err + } + + out := make(chan cid.Cid) + go func() { + defer close(out) + for a != nil || b != nil { + select { + case val, ok := <-a: + if !ok { + a = nil + } else { + select { + case out <- val: + case <-ctx.Done(): + return + } + } + case val, ok := <-b: + if !ok { + b = nil + } else { + select { + case out <- val: + case <-ctx.Done(): + return + } + } + } + } + }() + + return out, nil +} + +func (bs *BufferedBS) DeleteBlock(ctx context.Context, c cid.Cid) error { + if err := bs.read.DeleteBlock(ctx, c); err != nil { + return err + } + + return bs.write.DeleteBlock(ctx, c) +} + +func (bs *BufferedBS) DeleteMany(ctx context.Context, cids []cid.Cid) error { + if err := bs.read.DeleteMany(ctx, cids); err != nil { + return err + } + + return bs.write.DeleteMany(ctx, cids) +} + +func (bs *BufferedBS) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error { + if bs.writeviewer == nil || bs.readviewer == nil { + // one of the stores isn't Viewer; fall back to pure Get behaviour. + blk, err := bs.Get(ctx, c) + if err != nil { + return err + } + return callback(blk.RawData()) + } + + // both stores are viewable. + if err := bs.writeviewer.View(ctx, c, callback); ipld.IsNotFound(err) { + // not found in write blockstore; fall through. + } else { + return err // propagate errors, or nil, i.e. found. + } + return bs.readviewer.View(ctx, c, callback) +} + +func (bs *BufferedBS) Get(ctx context.Context, c cid.Cid) (block.Block, error) { + if out, err := bs.write.Get(ctx, c); err != nil { + if !ipld.IsNotFound(err) { + return nil, err + } + } else { + return out, nil + } + + return bs.read.Get(ctx, c) +} + +func (bs *BufferedBS) GetSize(ctx context.Context, c cid.Cid) (int, error) { + s, err := bs.read.GetSize(ctx, c) + if ipld.IsNotFound(err) || s == 0 { + return bs.write.GetSize(ctx, c) + } + + return s, err +} + +func (bs *BufferedBS) Put(ctx context.Context, blk block.Block) error { + has, err := bs.read.Has(ctx, blk.Cid()) // TODO: consider dropping this check + if err != nil { + return err + } + + if has { + return nil + } + + return bs.write.Put(ctx, blk) +} + +func (bs *BufferedBS) Has(ctx context.Context, c cid.Cid) (bool, error) { + has, err := bs.write.Has(ctx, c) + if err != nil { + return false, err + } + if has { + return true, nil + } + + return bs.read.Has(ctx, c) +} + +func (bs *BufferedBS) HashOnRead(hor bool) { + bs.read.HashOnRead(hor) + bs.write.HashOnRead(hor) +} + +func (bs *BufferedBS) PutMany(ctx context.Context, blks []block.Block) error { + return bs.write.PutMany(ctx, blks) +} + +func (bs *BufferedBS) Read() Blockstore { + return bs.read +} + +func (bs *BufferedBS) Write() Blockstore { + return bs.write +} diff --git a/venus-shared/blockstore/cache_store.go b/venus-shared/blockstore/cache_store.go new file mode 100644 index 0000000000..5551487773 --- /dev/null +++ b/venus-shared/blockstore/cache_store.go @@ -0,0 +1,78 @@ +package blockstore + +import ( + "time" + + "github.com/bluele/gcache" + tcache "github.com/patrickmn/go-cache" +) + +type IBlockCache interface { + Get(key string) (value interface{}, ok bool) + Remove(key string) + Add(key string, value interface{}) + AddWithExpire(key string, value interface{}, dur time.Duration) +} + +var _ IBlockCache = (*TimeCache)(nil) + +type TimeCache struct { + cache *tcache.Cache +} + +func NewTimeCache(expireTime, cleanTime time.Duration) *TimeCache { + tCache := tcache.New(expireTime, cleanTime) + return &TimeCache{cache: tCache} +} + +func (timeCache TimeCache) Get(key string) (interface{}, bool) { + return timeCache.cache.Get(key) +} + +func (timeCache TimeCache) Remove(key string) { + timeCache.cache.Delete(key) +} + +func (timeCache TimeCache) Add(key string, value interface{}) { + timeCache.cache.Set(key, value, 0) +} + +func (timeCache TimeCache) AddWithExpire(key string, value interface{}, dur time.Duration) { + timeCache.cache.Set(key, value, dur) +} + +var _ IBlockCache = (*LruCache)(nil) + +type LruCache struct { + cache gcache.Cache +} + +func NewLruCache(size int) *LruCache { + cache := gcache.New(size).LRU().Build() + go printRate(cache) + return &LruCache{cache: cache} +} + +func (l LruCache) Get(key string) (interface{}, bool) { + val, err := l.cache.Get(key) + return val, err == nil +} + +func (l LruCache) Remove(key string) { + l.cache.Remove(key) +} + +func (l LruCache) Add(key string, value interface{}) { + _ = l.cache.Set(key, value) +} + +func (l LruCache) AddWithExpire(key string, value interface{}, dur time.Duration) { + _ = l.cache.SetWithExpire(key, value, dur) +} + +func printRate(cache gcache.Cache) { + tm := time.NewTicker(time.Minute) + for range tm.C { + log.Infof("lru database cache hitrate:%f", cache.HitRate()) + } +} diff --git a/venus-shared/blockstore/cbor_gen.go b/venus-shared/blockstore/cbor_gen.go new file mode 100644 index 0000000000..49997aedc1 --- /dev/null +++ b/venus-shared/blockstore/cbor_gen.go @@ -0,0 +1,441 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package blockstore + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufNetRPCReq = []byte{132} + +func (t *NetRPCReq) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufNetRPCReq); err != nil { + return err + } + + // t.Type (blockstore.NetRPCReqType) (uint8) + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + return err + } + + // t.ID (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Cid ([]cid.Cid) (slice) + if len(t.Cid) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Cid was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Cid))); err != nil { + return err + } + for _, v := range t.Cid { + if err := cbg.WriteCid(w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Cid: %w", err) + } + } + + // t.Data ([][]uint8) (slice) + if len(t.Data) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Data was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Data))); err != nil { + return err + } + for _, v := range t.Data { + if len(v) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field v was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(v))); err != nil { + return err + } + + if _, err := cw.Write(v[:]); err != nil { + return err + } + } + return nil +} + +func (t *NetRPCReq) UnmarshalCBOR(r io.Reader) (err error) { + *t = NetRPCReq{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Type (blockstore.NetRPCReqType) (uint8) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint8 field") + } + if extra > math.MaxUint8 { + return fmt.Errorf("integer in input was too large for uint8 field") + } + t.Type = NetRPCReqType(extra) + // t.ID (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = uint64(extra) + + } + // t.Cid ([]cid.Cid) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Cid: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Cid = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("reading cid field t.Cid failed: %w", err) + } + t.Cid[i] = c + } + + // t.Data ([][]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Data: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Data = make([][]uint8, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Data[i] = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil { + return err + } + } + } + + return nil +} + +var lengthBufNetRPCResp = []byte{131} + +func (t *NetRPCResp) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufNetRPCResp); err != nil { + return err + } + + // t.Type (blockstore.NetRPCRespType) (uint8) + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + return err + } + + // t.ID (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Data ([]uint8) (slice) + if len(t.Data) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Data was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Data))); err != nil { + return err + } + + if _, err := cw.Write(t.Data[:]); err != nil { + return err + } + return nil +} + +func (t *NetRPCResp) UnmarshalCBOR(r io.Reader) (err error) { + *t = NetRPCResp{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Type (blockstore.NetRPCRespType) (uint8) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint8 field") + } + if extra > math.MaxUint8 { + return fmt.Errorf("integer in input was too large for uint8 field") + } + t.Type = NetRPCRespType(extra) + // t.ID (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = uint64(extra) + + } + // t.Data ([]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Data: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Data = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + return err + } + return nil +} + +var lengthBufNetRPCErr = []byte{131} + +func (t *NetRPCErr) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufNetRPCErr); err != nil { + return err + } + + // t.Type (blockstore.NetRPCErrType) (uint8) + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + return err + } + + // t.Msg (string) (string) + if len(t.Msg) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Msg was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Msg))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Msg)); err != nil { + return err + } + + // t.Cid (cid.Cid) (struct) + + if t.Cid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.Cid); err != nil { + return xerrors.Errorf("failed to write cid field t.Cid: %w", err) + } + } + + return nil +} + +func (t *NetRPCErr) UnmarshalCBOR(r io.Reader) (err error) { + *t = NetRPCErr{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Type (blockstore.NetRPCErrType) (uint8) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint8 field") + } + if extra > math.MaxUint8 { + return fmt.Errorf("integer in input was too large for uint8 field") + } + t.Type = NetRPCErrType(extra) + // t.Msg (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Msg = string(sval) + } + // t.Cid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Cid: %w", err) + } + + t.Cid = &c + } + + } + return nil +} diff --git a/venus-shared/blockstore/copy.go b/venus-shared/blockstore/copy.go new file mode 100644 index 0000000000..b773078cdd --- /dev/null +++ b/venus-shared/blockstore/copy.go @@ -0,0 +1,178 @@ +package blockstore + +import ( + "bytes" + "context" + "fmt" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/trace" +) + +func CopyBlockstore(ctx context.Context, from, to Blockstore) error { + ctx, span := trace.StartSpan(ctx, "copyBlockstore") + defer span.End() + cids, err := from.AllKeysChan(ctx) + if err != nil { + return err + } + + // TODO: should probably expose better methods on the blockstore for this operation + var blks []blocks.Block + for c := range cids { + b, err := from.Get(ctx, c) + if err != nil { + return err + } + + blks = append(blks, b) + } + + return to.PutMany(ctx, blks) +} + +func linksForObj(blk blocks.Block, cb func(cid.Cid)) error { + switch blk.Cid().Prefix().Codec { + case cid.DagCBOR: + err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb) + if err != nil { + return fmt.Errorf("cbg.ScanForLinks: %v", err) + } + return nil + case cid.Raw: + // We implicitly have all children of raw blocks. + return nil + default: + return fmt.Errorf("vm flush copy method only supports dag cbor") + } +} + +func CopyParticial(ctx context.Context, from, to Blockstore, root cid.Cid) error { + ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint + defer span.End() + + var numBlocks int + var totalCopySize int + + const batchSize = 128 + const bufCount = 3 + freeBufs := make(chan []blocks.Block, bufCount) + toFlush := make(chan []blocks.Block, bufCount) + for i := 0; i < bufCount; i++ { + freeBufs <- make([]blocks.Block, 0, batchSize) + } + + errFlushChan := make(chan error) + + go func() { + for b := range toFlush { + if err := to.PutMany(ctx, b); err != nil { + close(freeBufs) + errFlushChan <- fmt.Errorf("batch put in copy: %v", err) + return + } + freeBufs <- b[:0] + } + close(errFlushChan) + close(freeBufs) + }() + + batch := <-freeBufs + batchCp := func(blk blocks.Block) error { + numBlocks++ + totalCopySize += len(blk.RawData()) + + batch = append(batch, blk) + + if len(batch) >= batchSize { + toFlush <- batch + var ok bool + batch, ok = <-freeBufs + if !ok { + return <-errFlushChan + } + } + return nil + } + + if err := copyRec(ctx, from, to, root, batchCp); err != nil { + return fmt.Errorf("copyRec: %v", err) + } + + if len(batch) > 0 { + toFlush <- batch + } + close(toFlush) // close the toFlush triggering the loop to end + err := <-errFlushChan // get error out or get nil if it was closed + if err != nil { + return err + } + + span.AddAttributes( + trace.Int64Attribute("numBlocks", int64(numBlocks)), + trace.Int64Attribute("copySize", int64(totalCopySize)), + ) + return nil +} + +func copyRec(ctx context.Context, from, to Blockstore, root cid.Cid, cp func(blocks.Block) error) error { + if root.Prefix().MhType == 0 { + // identity cid, skip + return nil + } + + blk, err := from.Get(ctx, root) + if err != nil { + return fmt.Errorf("get %s failed: %v", root, err) + } + + var lerr error + err = linksForObj(blk, func(link cid.Cid) { + if lerr != nil { + // Theres no erorr return on linksForObj callback :( + return + } + + prefix := link.Prefix() + if prefix.Codec == cid.FilCommitmentSealed || prefix.Codec == cid.FilCommitmentUnsealed { + return + } + + // We always have blocks inlined into CIDs, but we may not have their children. + if prefix.MhType == mh.IDENTITY { + // Unless the inlined block has no children. + if prefix.Codec == cid.Raw { + return + } + } else { + // If we have an object, we already have its children, skip the object. + has, err := to.Has(ctx, link) + if err != nil { + lerr = fmt.Errorf("has: %v", err) + return + } + if has { + return + } + } + + if err := copyRec(ctx, from, to, link, cp); err != nil { + lerr = err + return + } + }) + if err != nil { + return fmt.Errorf("linksForObj (%x): %v", blk.RawData(), err) + } + if lerr != nil { + return lerr + } + + if err := cp(blk); err != nil { + return fmt.Errorf("copy: %v", err) + } + return nil +} diff --git a/venus-shared/blockstore/mem.go b/venus-shared/blockstore/mem.go new file mode 100644 index 0000000000..d3bfcae2dd --- /dev/null +++ b/venus-shared/blockstore/mem.go @@ -0,0 +1,110 @@ +package blockstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" +) + +// NewMemory returns a temporary memory-backed blockstore. +func NewMemory() MemBlockstore { + return make(MemBlockstore) +} + +// MemBlockstore is a terminal blockstore that keeps blocks in memory. +type MemBlockstore map[string]blocks.Block + +func (m MemBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error { + delete(m, genKey(c)) + return nil +} + +func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { + for _, k := range ks { + delete(m, genKey(k)) + } + return nil +} + +func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { + _, ok := m[genKey(k)] + return ok, nil +} + +func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { + b, ok := m[genKey(k)] + if !ok { + return ipld.ErrNotFound{Cid: k} + } + return callback(b.RawData()) +} + +func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { + b, ok := m[genKey(k)] + if !ok { + return nil, ipld.ErrNotFound{Cid: k} + } + if b.Cid().Prefix().Codec != k.Prefix().Codec { + return blocks.NewBlockWithCid(b.RawData(), k) + } + return b, nil +} + +// GetSize returns the CIDs mapped BlockSize +func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { + b, ok := m[genKey(k)] + if !ok { + return 0, ipld.ErrNotFound{Cid: k} + } + return len(b.RawData()), nil +} + +// Put puts a given block to the underlying datastore +func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error { + // Convert to a basic block for safety, but try to reuse the existing + // block if it's already a basic block. + k := b.Cid() + if _, ok := b.(*blocks.BasicBlock); !ok { + // If we already have the block, abort. + if _, ok := m[genKey(k)]; ok { + return nil + } + // the error is only for debugging. + b, _ = blocks.NewBlockWithCid(b.RawData(), b.Cid()) + } + m[genKey(b.Cid())] = b + return nil +} + +// PutMany puts a slice of blocks at the same time using batching +// capabilities of the underlying datastore whenever possible. +func (m MemBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { + for _, b := range bs { + _ = m.Put(ctx, b) // can't fail + } + return nil +} + +// AllKeysChan returns a channel from which +// the CIDs in the Blockstore can be read. It should respect +// the given context, closing the channel if it becomes Done. +func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ch := make(chan cid.Cid, len(m)) + for _, b := range m { + ch <- b.Cid() + } + close(ch) + return ch, nil +} + +// HashOnRead specifies if every read block should be +// rehashed to make sure it matches its CID. +func (m MemBlockstore) HashOnRead(enabled bool) { + // no-op +} + +func genKey(cid cid.Cid) string { + return string(cid.Hash()) +} diff --git a/venus-shared/blockstore/mem_test.go b/venus-shared/blockstore/mem_test.go new file mode 100644 index 0000000000..4d4a776245 --- /dev/null +++ b/venus-shared/blockstore/mem_test.go @@ -0,0 +1,45 @@ +package blockstore + +import ( + "context" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" +) + +func TestMemGetCodec(t *testing.T) { + ctx := context.Background() + bs := NewMemory() + + cborArr := []byte{0x82, 1, 2} + + h, err := mh.Sum(cborArr, mh.SHA2_256, -1) + require.NoError(t, err) + + rawCid := cid.NewCidV1(cid.Raw, h) + rawBlk, err := blocks.NewBlockWithCid(cborArr, rawCid) + require.NoError(t, err) + + err = bs.Put(ctx, rawBlk) + require.NoError(t, err) + + cborCid := cid.NewCidV1(cid.DagCBOR, h) + + cborBlk, err := bs.Get(ctx, cborCid) + require.NoError(t, err) + + require.Equal(t, cborCid.Prefix(), cborBlk.Cid().Prefix()) + require.EqualValues(t, cborArr, cborBlk.RawData()) + + // was allocated + require.NotEqual(t, cborBlk, rawBlk) + + gotRawBlk, err := bs.Get(ctx, rawCid) + require.NoError(t, err) + + // not allocated + require.Equal(t, rawBlk, gotRawBlk) +} diff --git a/venus-shared/blockstore/net.go b/venus-shared/blockstore/net.go new file mode 100644 index 0000000000..9eee66c3a1 --- /dev/null +++ b/venus-shared/blockstore/net.go @@ -0,0 +1,423 @@ +package blockstore + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "sync" + "sync/atomic" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "github.com/libp2p/go-msgio" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +type NetRPCReqType byte + +const ( + NRpcHas NetRPCReqType = iota + NRpcGet + NRpcGetSize + NRpcPut + NRpcDelete + + // todo cancel req +) + +type NetRPCRespType byte + +const ( + NRpcOK NetRPCRespType = iota + NRpcErr + NRpcMore +) + +type NetRPCErrType byte + +const ( + NRpcErrGeneric NetRPCErrType = iota + NRpcErrNotFound +) + +type NetRPCReq struct { + Type NetRPCReqType + ID uint64 + + Cid []cid.Cid // todo maxsize? + Data [][]byte // todo maxsize? +} + +type NetRPCResp struct { + Type NetRPCRespType + ID uint64 + + // error or cids in allkeys + Data []byte // todo maxsize? + + next <-chan NetRPCResp +} + +type NetRPCErr struct { + Type NetRPCErrType + + Msg string + + // in case of NRpcErrNotFound + Cid *cid.Cid +} + +type NetworkStore struct { + // note: writer is thread-safe + msgStream msgio.ReadWriteCloser + + // atomic + reqCount uint64 + + respLk sync.Mutex + + // respMap is nil after store closes + respMap map[uint64]chan<- NetRPCResp + + closing chan struct{} + closed chan struct{} + + closeLk sync.Mutex + onClose []func() +} + +func NewNetworkStore(mss msgio.ReadWriteCloser) *NetworkStore { + ns := &NetworkStore{ + msgStream: mss, + + respMap: map[uint64]chan<- NetRPCResp{}, + + closing: make(chan struct{}), + closed: make(chan struct{}), + } + + go ns.receive() + + return ns +} + +func (n *NetworkStore) shutdown(msg string) { + if err := n.msgStream.Close(); err != nil { + log.Errorw("closing netstore msg stream", "error", err) + } + + nerr := NetRPCErr{ + Type: NRpcErrGeneric, + Msg: msg, + Cid: nil, + } + + var errb bytes.Buffer + if err := nerr.MarshalCBOR(&errb); err != nil { + log.Errorw("netstore shutdown: error marshaling error", "err", err) + } + + n.respLk.Lock() + for id, resps := range n.respMap { + resps <- NetRPCResp{ + Type: NRpcErr, + ID: id, + Data: errb.Bytes(), + } + } + + n.respMap = nil + + n.respLk.Unlock() +} + +func (n *NetworkStore) OnClose(cb func()) { + n.closeLk.Lock() + defer n.closeLk.Unlock() + + select { + case <-n.closed: + cb() + default: + n.onClose = append(n.onClose, cb) + } +} + +func (n *NetworkStore) receive() { + defer func() { + n.closeLk.Lock() + defer n.closeLk.Unlock() + + close(n.closed) + if n.onClose != nil { + for _, f := range n.onClose { + f() + } + } + }() + + for { + select { + case <-n.closing: + n.shutdown("netstore stopping") + return + default: + } + + msg, err := n.msgStream.ReadMsg() + if err != nil { + n.shutdown(fmt.Sprintf("netstore ReadMsg: %s", err)) + return + } + + var resp NetRPCResp + if err := resp.UnmarshalCBOR(bytes.NewReader(msg)); err != nil { + n.shutdown(fmt.Sprintf("unmarshaling netstore response: %s", err)) + return + } + + n.msgStream.ReleaseMsg(msg) + + n.respLk.Lock() + if ch, ok := n.respMap[resp.ID]; ok { + if resp.Type == NRpcMore { + nch := make(chan NetRPCResp, 1) + resp.next = nch + n.respMap[resp.ID] = nch + } else { + delete(n.respMap, resp.ID) + } + + ch <- resp + } + n.respLk.Unlock() + } +} + +func (n *NetworkStore) sendRpc(rt NetRPCReqType, cids []cid.Cid, data [][]byte) (uint64, <-chan NetRPCResp, error) { + rid := atomic.AddUint64(&n.reqCount, 1) + + respCh := make(chan NetRPCResp, 1) // todo pool? + + n.respLk.Lock() + if n.respMap == nil { + n.respLk.Unlock() + return 0, nil, xerrors.Errorf("netstore closed") + } + n.respMap[rid] = respCh + n.respLk.Unlock() + + req := NetRPCReq{ + Type: rt, + ID: rid, + Cid: cids, + Data: data, + } + + var rbuf bytes.Buffer // todo buffer pool + if err := req.MarshalCBOR(&rbuf); err != nil { + n.respLk.Lock() + defer n.respLk.Unlock() + + if n.respMap == nil { + return 0, nil, xerrors.Errorf("netstore closed") + } + delete(n.respMap, rid) + + return 0, nil, err + } + + if err := n.msgStream.WriteMsg(rbuf.Bytes()); err != nil { + n.respLk.Lock() + defer n.respLk.Unlock() + + if n.respMap == nil { + return 0, nil, xerrors.Errorf("netstore closed") + } + delete(n.respMap, rid) + + return 0, nil, err + } + + return rid, respCh, nil +} + +func (n *NetworkStore) waitResp(ctx context.Context, rch <-chan NetRPCResp, rid uint64) (NetRPCResp, error) { + select { + case resp := <-rch: + if resp.Type == NRpcErr { + var e NetRPCErr + if err := e.UnmarshalCBOR(bytes.NewReader(resp.Data)); err != nil { + return NetRPCResp{}, xerrors.Errorf("unmarshaling error data: %w", err) + } + + var err error + switch e.Type { + case NRpcErrNotFound: + if e.Cid != nil { + err = ipld.ErrNotFound{ + Cid: *e.Cid, + } + } else { + err = xerrors.Errorf("block not found, but cid was null") + } + case NRpcErrGeneric: + err = xerrors.Errorf("generic error") + default: + err = xerrors.Errorf("unknown error type") + } + + return NetRPCResp{}, xerrors.Errorf("netstore error response: %s (%w)", e.Msg, err) + } + + return resp, nil + case <-ctx.Done(): + // todo send cancel req + + n.respLk.Lock() + if n.respMap != nil { + delete(n.respMap, rid) + } + n.respLk.Unlock() + + return NetRPCResp{}, ctx.Err() + } +} + +func (n *NetworkStore) Has(ctx context.Context, c cid.Cid) (bool, error) { + req, rch, err := n.sendRpc(NRpcHas, []cid.Cid{c}, nil) + if err != nil { + return false, err + } + + resp, err := n.waitResp(ctx, rch, req) + if err != nil { + return false, err + } + + if len(resp.Data) != 1 { + return false, xerrors.Errorf("expected reposnse length to be 1 byte") + } + switch resp.Data[0] { + case cbg.CborBoolTrue[0]: + return true, nil + case cbg.CborBoolFalse[0]: + return false, nil + default: + return false, xerrors.Errorf("has: bad response: %x", resp.Data[0]) + } +} + +func (n *NetworkStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil) + if err != nil { + return nil, err + } + + resp, err := n.waitResp(ctx, rch, req) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(resp.Data, c) +} + +func (n *NetworkStore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error { + req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil) + if err != nil { + return err + } + + resp, err := n.waitResp(ctx, rch, req) + if err != nil { + return err + } + + return callback(resp.Data) // todo return buf to pool +} + +func (n *NetworkStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + req, rch, err := n.sendRpc(NRpcGetSize, []cid.Cid{c}, nil) + if err != nil { + return 0, err + } + + resp, err := n.waitResp(ctx, rch, req) + if err != nil { + return 0, err + } + + if len(resp.Data) != 4 { + return 0, xerrors.Errorf("expected getsize response to be 4 bytes, was %d", resp.Data) + } + + return int(binary.LittleEndian.Uint32(resp.Data)), nil +} + +func (n *NetworkStore) Put(ctx context.Context, block blocks.Block) error { + return n.PutMany(ctx, []blocks.Block{block}) +} + +func (n *NetworkStore) PutMany(ctx context.Context, blocks []blocks.Block) error { + // todo pool + cids := make([]cid.Cid, len(blocks)) + blkDatas := make([][]byte, len(blocks)) + for i, block := range blocks { + cids[i] = block.Cid() + blkDatas[i] = block.RawData() + } + + req, rch, err := n.sendRpc(NRpcPut, cids, blkDatas) + if err != nil { + return err + } + + _, err = n.waitResp(ctx, rch, req) + if err != nil { + return err + } + + return nil +} + +func (n *NetworkStore) DeleteBlock(ctx context.Context, c cid.Cid) error { + return n.DeleteMany(ctx, []cid.Cid{c}) +} + +func (n *NetworkStore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + req, rch, err := n.sendRpc(NRpcDelete, cids, nil) + if err != nil { + return err + } + + _, err = n.waitResp(ctx, rch, req) + if err != nil { + return err + } + + return nil +} + +func (n *NetworkStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, xerrors.Errorf("not supported") +} + +func (n *NetworkStore) HashOnRead(enabled bool) { + // todo +} + +func (n *NetworkStore) Stop(ctx context.Context) error { + close(n.closing) + + select { + case <-n.closed: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +var _ Blockstore = &NetworkStore{} diff --git a/venus-shared/blockstore/net_serve.go b/venus-shared/blockstore/net_serve.go new file mode 100644 index 0000000000..0a2310743f --- /dev/null +++ b/venus-shared/blockstore/net_serve.go @@ -0,0 +1,237 @@ +package blockstore + +import ( + "bytes" + "context" + "encoding/binary" + + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "github.com/libp2p/go-msgio" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +type NetworkStoreHandler struct { + msgStream msgio.ReadWriteCloser + + bs Blockstore +} + +// NOTE: This code isn't yet hardened to accept untrusted input. See TODOs here and in net.go +func HandleNetBstoreStream(ctx context.Context, bs Blockstore, mss msgio.ReadWriteCloser) *NetworkStoreHandler { + ns := &NetworkStoreHandler{ + msgStream: mss, + bs: bs, + } + + go ns.handle(ctx) + + return ns +} + +func (h *NetworkStoreHandler) handle(ctx context.Context) { + defer func() { + if err := h.msgStream.Close(); err != nil { + log.Errorw("error closing blockstore stream", "error", err) + } + }() + + for { + var req NetRPCReq + + ms, err := h.msgStream.ReadMsg() + if err != nil { + log.Warnw("bstore stream err", "error", err) + return + } + + if err := req.UnmarshalCBOR(bytes.NewReader(ms)); err != nil { + return + } + + h.msgStream.ReleaseMsg(ms) + + switch req.Type { + case NRpcHas: + if len(req.Cid) != 1 { + if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + res, err := h.bs.Has(ctx, req.Cid[0]) + if err != nil { + if err := h.respondError(req.ID, err, req.Cid[0]); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + var resData [1]byte + if res { + resData[0] = cbg.CborBoolTrue[0] + } else { + resData[0] = cbg.CborBoolFalse[0] + } + + if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil { + log.Warnw("writing response", "error", err) + return + } + + case NRpcGet: + if len(req.Cid) != 1 { + if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + err := h.bs.View(ctx, req.Cid[0], func(bdata []byte) error { + return h.respond(req.ID, NRpcOK, bdata) + }) + if err != nil { + if err := h.respondError(req.ID, err, req.Cid[0]); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + case NRpcGetSize: + if len(req.Cid) != 1 { + if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + sz, err := h.bs.GetSize(ctx, req.Cid[0]) + if err != nil { + if err := h.respondError(req.ID, err, req.Cid[0]); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + var resData [4]byte + binary.LittleEndian.PutUint32(resData[:], uint32(sz)) + + if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil { + log.Warnw("writing response", "error", err) + return + } + + case NRpcPut: + blocks := make([]block.Block, len(req.Cid)) + + if len(req.Cid) != len(req.Data) { + if err := h.respondError(req.ID, xerrors.New("cid count didn't match data count"), cid.Undef); err != nil { + log.Warnw("writing error response", "error", err) + } + return + } + + for i := range req.Cid { + blocks[i], err = block.NewBlockWithCid(req.Data[i], req.Cid[i]) + if err != nil { + log.Warnw("make block", "error", err) + return + } + } + + err := h.bs.PutMany(ctx, blocks) + if err != nil { + if err := h.respondError(req.ID, err, cid.Undef); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil { + log.Warnw("writing response", "error", err) + return + } + case NRpcDelete: + err := h.bs.DeleteMany(ctx, req.Cid) + if err != nil { + if err := h.respondError(req.ID, err, cid.Undef); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + + if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil { + log.Warnw("writing response", "error", err) + return + } + default: + if err := h.respondError(req.ID, xerrors.New("unsupported request type"), cid.Undef); err != nil { + log.Warnw("writing error response", "error", err) + return + } + continue + } + } +} + +func (h *NetworkStoreHandler) respondError(req uint64, uerr error, c cid.Cid) error { + var resp NetRPCResp + resp.ID = req + resp.Type = NRpcErr + + nerr := NetRPCErr{ + Type: NRpcErrGeneric, + Msg: uerr.Error(), + } + if ipld.IsNotFound(uerr) { + nerr.Type = NRpcErrNotFound + nerr.Cid = &c + } + + var edata bytes.Buffer + if err := nerr.MarshalCBOR(&edata); err != nil { + return xerrors.Errorf("marshaling error data: %w", err) + } + + resp.Data = edata.Bytes() + + var msg bytes.Buffer + if err := resp.MarshalCBOR(&msg); err != nil { + return xerrors.Errorf("marshaling error response: %w", err) + } + + if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil { + return xerrors.Errorf("write error response: %w", err) + } + + return nil +} + +func (h *NetworkStoreHandler) respond(req uint64, rt NetRPCRespType, data []byte) error { + var resp NetRPCResp + resp.ID = req + resp.Type = rt + resp.Data = data + + var msg bytes.Buffer + if err := resp.MarshalCBOR(&msg); err != nil { + return xerrors.Errorf("marshaling response: %w", err) + } + + if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil { + return xerrors.Errorf("write response: %w", err) + } + + return nil +} diff --git a/venus-shared/blockstore/net_test.go b/venus-shared/blockstore/net_test.go new file mode 100644 index 0000000000..9b91514631 --- /dev/null +++ b/venus-shared/blockstore/net_test.go @@ -0,0 +1,63 @@ +package blockstore + +import ( + "context" + "io" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + block "github.com/ipfs/go-block-format" + ipld "github.com/ipfs/go-ipld-format" + "github.com/libp2p/go-msgio" + "github.com/stretchr/testify/require" +) + +func TestNetBstore(t *testing.T) { + tf.UnitTest(t) + ctx := context.Background() + + cr, sw := io.Pipe() + sr, cw := io.Pipe() + + cm := msgio.Combine(msgio.NewWriter(cw), msgio.NewReader(cr)) + sm := msgio.Combine(msgio.NewWriter(sw), msgio.NewReader(sr)) + + bbs := NewTemporarySync() + _ = HandleNetBstoreStream(ctx, bbs, sm) + + nbs := NewNetworkStore(cm) + + tb1 := block.NewBlock([]byte("aoeu")) + + h, err := nbs.Has(ctx, tb1.Cid()) + require.NoError(t, err) + require.False(t, h) + + err = nbs.Put(ctx, tb1) + require.NoError(t, err) + + h, err = nbs.Has(ctx, tb1.Cid()) + require.NoError(t, err) + require.True(t, h) + + sz, err := nbs.GetSize(ctx, tb1.Cid()) + require.NoError(t, err) + require.Equal(t, 4, sz) + + err = nbs.DeleteBlock(ctx, tb1.Cid()) + require.NoError(t, err) + + h, err = nbs.Has(ctx, tb1.Cid()) + require.NoError(t, err) + require.False(t, h) + + _, err = nbs.Get(ctx, tb1.Cid()) + require.True(t, ipld.IsNotFound(err)) + + err = nbs.Put(ctx, tb1) + require.NoError(t, err) + + b, err := nbs.Get(ctx, tb1.Cid()) + require.NoError(t, err) + require.Equal(t, "aoeu", string(b.RawData())) +} diff --git a/venus-shared/blockstore/net_ws.go b/venus-shared/blockstore/net_ws.go new file mode 100644 index 0000000000..5c9a70d843 --- /dev/null +++ b/venus-shared/blockstore/net_ws.go @@ -0,0 +1,100 @@ +package blockstore + +import ( + "bytes" + "context" + + "github.com/gorilla/websocket" + "github.com/libp2p/go-msgio" + "golang.org/x/xerrors" +) + +type wsWrapper struct { + wc *websocket.Conn + + nextMsg []byte +} + +func (w *wsWrapper) Read(b []byte) (int, error) { + return 0, xerrors.New("read unsupported") +} + +func (w *wsWrapper) ReadMsg() ([]byte, error) { + if w.nextMsg != nil { + nm := w.nextMsg + w.nextMsg = nil + return nm, nil + } + + mt, r, err := w.wc.NextReader() + if err != nil { + return nil, err + } + + switch mt { + case websocket.BinaryMessage, websocket.TextMessage: + default: + return nil, xerrors.Errorf("unexpected message type") + } + + // todo pool + // todo limit sizes + var mbuf bytes.Buffer + if _, err := mbuf.ReadFrom(r); err != nil { + return nil, err + } + + return mbuf.Bytes(), nil +} + +func (w *wsWrapper) ReleaseMsg(bytes []byte) { + // todo use a pool +} + +func (w *wsWrapper) NextMsgLen() (int, error) { + if w.nextMsg != nil { + return len(w.nextMsg), nil + } + + mt, msg, err := w.wc.ReadMessage() + if err != nil { + return 0, err + } + + switch mt { + case websocket.BinaryMessage, websocket.TextMessage: + default: + return 0, xerrors.Errorf("unexpected message type") + } + + w.nextMsg = msg + return len(w.nextMsg), nil +} + +func (w *wsWrapper) Write(bytes []byte) (int, error) { + return 0, xerrors.New("write unsupported") +} + +func (w *wsWrapper) WriteMsg(bytes []byte) error { + return w.wc.WriteMessage(websocket.BinaryMessage, bytes) +} + +func (w *wsWrapper) Close() error { + return w.wc.Close() +} + +var _ msgio.ReadWriteCloser = &wsWrapper{} + +func wsConnToMio(wc *websocket.Conn) msgio.ReadWriteCloser { + return &wsWrapper{ + wc: wc, + } +} + +func HandleNetBstoreWS(ctx context.Context, bs Blockstore, wc *websocket.Conn) *NetworkStoreHandler { + return HandleNetBstoreStream(ctx, bs, wsConnToMio(wc)) +} + +func NewNetworkStoreWS(wc *websocket.Conn) *NetworkStore { + return NewNetworkStore(wsConnToMio(wc)) +} diff --git a/venus-shared/blockstore/safe_cid_set.go b/venus-shared/blockstore/safe_cid_set.go new file mode 100644 index 0000000000..051800074d --- /dev/null +++ b/venus-shared/blockstore/safe_cid_set.go @@ -0,0 +1,97 @@ +package blockstore + +import ( + "sync" + + "github.com/ipfs/go-cid" +) + +// Set is a implementation of a set of Cids, that is, a structure +// to which holds a single copy of every Cids that is added to it. +type Set struct { + set map[cid.Cid]struct{} + lk sync.Mutex +} + +// NewSet initializes and returns a new Set. +func NewSet() *Set { + return &Set{set: make(map[cid.Cid]struct{}), lk: sync.Mutex{}} +} + +// Add puts a Cid in the Set. +func (s *Set) Add(c cid.Cid) { + s.lk.Lock() + defer s.lk.Unlock() + s.set[c] = struct{}{} +} + +// Add puts a Cid in the Set. +func (s *Set) add(c cid.Cid) { + s.set[c] = struct{}{} +} + +// Has returns if the Set contains a given Cid. +func (s *Set) Has(c cid.Cid) bool { + s.lk.Lock() + defer s.lk.Unlock() + _, ok := s.set[c] + return ok +} + +// Has returns if the Set contains a given Cid. +func (s *Set) has(c cid.Cid) bool { + _, ok := s.set[c] + return ok +} + +// Remove deletes a Cid from the Set. +func (s *Set) Remove(c cid.Cid) { + s.lk.Lock() + defer s.lk.Unlock() + delete(s.set, c) +} + +// Len returns how many elements the Set has. +func (s *Set) Len() int { + s.lk.Lock() + defer s.lk.Unlock() + return len(s.set) +} + +// Keys returns the Cids in the set. +func (s *Set) Keys() []cid.Cid { + s.lk.Lock() + defer s.lk.Unlock() + out := make([]cid.Cid, 0, len(s.set)) + for k := range s.set { + out = append(out, k) + } + return out +} + +// Visit adds a Cid to the set only if it is +// not in it already. +func (s *Set) Visit(c cid.Cid) bool { + s.lk.Lock() + defer s.lk.Unlock() + if !s.has(c) { + s.add(c) + return true + } + + return false +} + +// ForEach allows to run a custom function on each +// Cid in the set. +func (s *Set) ForEach(f func(c cid.Cid) error) error { + s.lk.Lock() + defer s.lk.Unlock() + for c := range s.set { + err := f(c) + if err != nil { + return err + } + } + return nil +} diff --git a/venus-shared/blockstore/syncstore.go b/venus-shared/blockstore/syncstore.go new file mode 100644 index 0000000000..122dcd4a92 --- /dev/null +++ b/venus-shared/blockstore/syncstore.go @@ -0,0 +1,84 @@ +package blockstore + +import ( + "context" + "sync" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +var _ Blockstore = (*SyncStore)(nil) + +type SyncStore struct { + mu sync.RWMutex + bs MemBlockstore // specifically use a memStore to save indirection overhead. +} + +func (m *SyncStore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.View(ctx, cid, callback) +} + +func (m *SyncStore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.DeleteMany(ctx, cids) +} + +func (m *SyncStore) DeleteBlock(ctx context.Context, k cid.Cid) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.DeleteBlock(ctx, k) +} + +func (m *SyncStore) Has(ctx context.Context, k cid.Cid) (bool, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Has(ctx, k) +} + +func (m *SyncStore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Get(ctx, k) +} + +// GetSize returns the CIDs mapped BlockSize +func (m *SyncStore) GetSize(ctx context.Context, k cid.Cid) (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.GetSize(ctx, k) +} + +// Put puts a given block to the underlying datastore +func (m *SyncStore) Put(ctx context.Context, b blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.Put(ctx, b) +} + +// PutMany puts a slice of blocks at the same time using batching +// capabilities of the underlying datastore whenever possible. +func (m *SyncStore) PutMany(ctx context.Context, bs []blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.PutMany(ctx, bs) +} + +// AllKeysChan returns a channel from which +// the CIDs in the blockstore can be read. It should respect +// the given context, closing the channel if it becomes Done. +func (m *SyncStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + m.mu.RLock() + defer m.mu.RUnlock() + // this blockstore implementation doesn't do any async work. + return m.bs.AllKeysChan(ctx) +} + +// HashOnRead specifies if every read block should be +// rehashed to make sure it matches its CID. +func (m *SyncStore) HashOnRead(enabled bool) { + // noop +} diff --git a/venus-shared/blockstore/view_blockstore.go b/venus-shared/blockstore/view_blockstore.go new file mode 100644 index 0000000000..2d6ad629fa --- /dev/null +++ b/venus-shared/blockstore/view_blockstore.go @@ -0,0 +1,197 @@ +package blockstore + +import ( + "context" + "errors" + "fmt" + + "github.com/dgraph-io/badger/v2" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/keytransform" + dshelp "github.com/ipfs/go-ipfs-ds-help" + ipld "github.com/ipfs/go-ipld-format" +) + +var _ Blockstore = (*TxBlockstore)(nil) + +type TxBlockstore struct { + tx *badger.Txn + cache IBlockCache + keyTransform *keytransform.PrefixTransform +} + +func (txBlockstore *TxBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { + return errors.New("readonly blocksgtore") +} + +func (txBlockstore *TxBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + return errors.New("readonly blocksgtore") +} + +func (txBlockstore *TxBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + key := txBlockstore.ConvertKey(cid) + if txBlockstore.cache != nil { + if _, has := txBlockstore.cache.Get(key.String()); has { + return true, nil + } + } + + _, err := txBlockstore.tx.Get(key.Bytes()) + switch err { + case badger.ErrKeyNotFound: + return false, nil + case nil: + return true, nil + default: + return false, fmt.Errorf("failed to check if block exists in badger blockstore: %w", err) + } +} + +func (txBlockstore *TxBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + if !cid.Defined() { + return nil, ipld.ErrNotFound{Cid: cid} + } + + key := txBlockstore.ConvertKey(cid) + if txBlockstore.cache != nil { + if val, has := txBlockstore.cache.Get(key.String()); has { + return val.(blocks.Block), nil + } + } + + var val []byte + var err error + var item *badger.Item + switch item, err = txBlockstore.tx.Get(key.Bytes()); err { + case nil: + val, err = item.ValueCopy(nil) + case badger.ErrKeyNotFound: + return nil, ipld.ErrNotFound{Cid: cid} + default: + return nil, fmt.Errorf("failed to get block from badger blockstore: %w", err) + } + if err != nil { + return nil, err + } + + blk, err := blocks.NewBlockWithCid(val, cid) + if err != nil { + return nil, err + } + + txBlockstore.cache.Add(key.String(), blk) + return blk, nil +} + +func (txBlockstore *TxBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + if !cid.Defined() { + return ipld.ErrNotFound{Cid: cid} + } + + key := txBlockstore.ConvertKey(cid) + if txBlockstore.cache != nil { + if val, has := txBlockstore.cache.Get(key.String()); has { + return callback(val.(blocks.Block).RawData()) + } + } + + var val []byte + var err error + var item *badger.Item + switch item, err = txBlockstore.tx.Get(key.Bytes()); err { + case nil: + val, err = item.ValueCopy(nil) + case badger.ErrKeyNotFound: + return ipld.ErrNotFound{Cid: cid} + default: + return fmt.Errorf("failed to get block from badger blockstore: %w", err) + } + if err != nil { + return err + } + + blk, err := blocks.NewBlockWithCid(val, cid) + if err != nil { + return err + } + + txBlockstore.cache.Add(key.String(), blk) + return callback(blk.RawData()) +} + +func (txBlockstore *TxBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + key := txBlockstore.ConvertKey(cid) + if txBlockstore.cache != nil { + if val, has := txBlockstore.cache.Get(key.String()); has { + return len(val.(blocks.Block).RawData()), nil + } + } + + var size int + var err error + var item *badger.Item + switch item, err = txBlockstore.tx.Get(key.Bytes()); err { + case nil: + size = int(item.ValueSize()) + case badger.ErrKeyNotFound: + return -1, ipld.ErrNotFound{Cid: cid} + default: + return -1, fmt.Errorf("failed to get block size from badger blockstore: %w", err) + } + return size, err +} + +func (txBlockstore *TxBlockstore) Put(ctx context.Context, block blocks.Block) error { + return errors.New("readonly blocksgtore") +} + +func (txBlockstore *TxBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + return errors.New("readonly blocksgtore") +} + +func (txBlockstore *TxBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + opts := badger.IteratorOptions{PrefetchSize: 100} + iter := txBlockstore.tx.NewIterator(opts) + + ch := make(chan cid.Cid) + go func() { + defer close(ch) + defer iter.Close() + + // NewCidV1 makes a copy of the multihash buffer, so we can reuse it to + // contain allocs. + for iter.Rewind(); iter.Valid(); iter.Next() { + if ctx.Err() != nil { + return // context has fired. + } + + k := iter.Item().Key() + // need to convert to key.Key using key.KeyFromDsKey. + + dsKey := txBlockstore.keyTransform.InvertKey(datastore.RawKey(string(k))) + bk, err := dshelp.BinaryFromDsKey(dsKey) + if err != nil { + log.Warnf("error parsing key from binary: %s", err) + continue + } + cidKey := cid.NewCidV1(cid.Raw, bk) + select { + case <-ctx.Done(): + return + case ch <- cidKey: + } + } + }() + return ch, nil +} + +func (txBlockstore *TxBlockstore) ConvertKey(cid cid.Cid) datastore.Key { + key := dshelp.MultihashToDsKey(cid.Hash()) + return txBlockstore.keyTransform.ConvertKey(key) +} + +func (txBlockstore *TxBlockstore) HashOnRead(enabled bool) { + log.Warnf("called HashOnRead on badger blockstore; function not supported; ignoring") +} diff --git a/venus-shared/compatible-checks/actor-sources.txt b/venus-shared/compatible-checks/actor-sources.txt new file mode 100644 index 0000000000..c16eddcabd --- /dev/null +++ b/venus-shared/compatible-checks/actor-sources.txt @@ -0,0 +1,157 @@ +SOURCES IN chain/actors: + actor_cids.go + adt/adt.go + adt/diff_adt.go + adt/diff_adt_test.go + adt/store.go + aerrors/error.go + aerrors/error_test.go + aerrors/wrap.go + agen/main.go + builtin/account/account.go + builtin/account/v0.go + builtin/account/v2.go + builtin/account/v3.go + builtin/account/v4.go + builtin/account/v5.go + builtin/account/v6.go + builtin/account/v7.go + builtin/account/v8.go + builtin/account/v9.go + builtin/builtin.go + builtin/cron/cron.go + builtin/cron/v0.go + builtin/cron/v2.go + builtin/cron/v3.go + builtin/cron/v4.go + builtin/cron/v5.go + builtin/cron/v6.go + builtin/cron/v7.go + builtin/cron/v8.go + builtin/cron/v9.go + builtin/datacap/datacap.go + builtin/datacap/util.go + builtin/datacap/v9.go + builtin/init/diff.go + builtin/init/init.go + builtin/init/v0.go + builtin/init/v2.go + builtin/init/v3.go + builtin/init/v4.go + builtin/init/v5.go + builtin/init/v6.go + builtin/init/v7.go + builtin/init/v8.go + builtin/init/v9.go + builtin/market/diff.go + builtin/market/market.go + builtin/market/v0.go + builtin/market/v2.go + builtin/market/v3.go + builtin/market/v4.go + builtin/market/v5.go + builtin/market/v6.go + builtin/market/v7.go + builtin/market/v8.go + builtin/market/v9.go + builtin/miner/diff.go + builtin/miner/diff_deadlines.go + builtin/miner/miner.go + builtin/miner/utils.go + builtin/miner/v0.go + builtin/miner/v2.go + builtin/miner/v3.go + builtin/miner/v4.go + builtin/miner/v5.go + builtin/miner/v6.go + builtin/miner/v7.go + builtin/miner/v8.go + builtin/miner/v9.go + builtin/multisig/diff.go + builtin/multisig/message0.go + builtin/multisig/message2.go + builtin/multisig/message3.go + builtin/multisig/message4.go + builtin/multisig/message5.go + builtin/multisig/message6.go + builtin/multisig/message7.go + builtin/multisig/message8.go + builtin/multisig/message9.go + builtin/multisig/multisig.go + builtin/multisig/v0.go + builtin/multisig/v2.go + builtin/multisig/v3.go + builtin/multisig/v4.go + builtin/multisig/v5.go + builtin/multisig/v6.go + builtin/multisig/v7.go + builtin/multisig/v8.go + builtin/multisig/v9.go + builtin/paych/message0.go + builtin/paych/message2.go + builtin/paych/message3.go + builtin/paych/message4.go + builtin/paych/message5.go + builtin/paych/message6.go + builtin/paych/message7.go + builtin/paych/message8.go + builtin/paych/message9.go + builtin/paych/mock/mock.go + builtin/paych/paych.go + builtin/paych/v0.go + builtin/paych/v2.go + builtin/paych/v3.go + builtin/paych/v4.go + builtin/paych/v5.go + builtin/paych/v6.go + builtin/paych/v7.go + builtin/paych/v8.go + builtin/paych/v9.go + builtin/power/diff.go + builtin/power/power.go + builtin/power/v0.go + builtin/power/v2.go + builtin/power/v3.go + builtin/power/v4.go + builtin/power/v5.go + builtin/power/v6.go + builtin/power/v7.go + builtin/power/v8.go + builtin/power/v9.go + builtin/registry.go + builtin/reward/reward.go + builtin/reward/v0.go + builtin/reward/v2.go + builtin/reward/v3.go + builtin/reward/v4.go + builtin/reward/v5.go + builtin/reward/v6.go + builtin/reward/v7.go + builtin/reward/v8.go + builtin/reward/v9.go + builtin/system/system.go + builtin/system/v0.go + builtin/system/v2.go + builtin/system/v3.go + builtin/system/v4.go + builtin/system/v5.go + builtin/system/v6.go + builtin/system/v7.go + builtin/system/v8.go + builtin/system/v9.go + builtin/verifreg/util.go + builtin/verifreg/v0.go + builtin/verifreg/v2.go + builtin/verifreg/v3.go + builtin/verifreg/v4.go + builtin/verifreg/v5.go + builtin/verifreg/v6.go + builtin/verifreg/v7.go + builtin/verifreg/v8.go + builtin/verifreg/v9.go + builtin/verifreg/verifreg.go + manifest.go + params.go + policy/policy.go + policy/policy_test.go + version.go diff --git a/venus-shared/compatible-checks/actor-templates.txt b/venus-shared/compatible-checks/actor-templates.txt new file mode 100644 index 0000000000..6b7ef4c1eb --- /dev/null +++ b/venus-shared/compatible-checks/actor-templates.txt @@ -0,0 +1,30 @@ +TEMPLATES IN chain/actors: + builtin/account/actor.go.template + builtin/account/state.go.template + builtin/builtin.go.template + builtin/cron/actor.go.template + builtin/cron/state.go.template + builtin/datacap/actor.go.template + builtin/datacap/state.go.template + builtin/init/actor.go.template + builtin/init/state.go.template + builtin/market/actor.go.template + builtin/market/state.go.template + builtin/miner/actor.go.template + builtin/miner/state.go.template + builtin/multisig/actor.go.template + builtin/multisig/message.go.template + builtin/multisig/state.go.template + builtin/paych/actor.go.template + builtin/paych/message.go.template + builtin/paych/state.go.template + builtin/power/actor.go.template + builtin/power/state.go.template + builtin/registry.go.template + builtin/reward/actor.go.template + builtin/reward/state.go.template + builtin/system/actor.go.template + builtin/system/state.go.template + builtin/verifreg/actor.go.template + builtin/verifreg/state.go.template + policy/policy.go.template diff --git a/venus-shared/compatible-checks/api-checksum.txt b/venus-shared/compatible-checks/api-checksum.txt new file mode 100644 index 0000000000..338b334da0 --- /dev/null +++ b/venus-shared/compatible-checks/api-checksum.txt @@ -0,0 +1,458 @@ +v0api.FullNode: + AuthNew: In=2, Out=2, CheckSum=faeef9dc68d6f2533bdf7d8f22ef902d + AuthVerify: In=2, Out=2, CheckSum=3cb63db9d6f8869fd4e9da953cc9cdcb + BeaconGetEntry: In=2, Out=2, CheckSum=c74f5c0f4039207ea45c11bfe3319c38 + ChainDeleteObj: In=2, Out=1, CheckSum=50d40a0afa31dabb8a61693aabea61b7 + ChainExport: In=4, Out=2, CheckSum=aaf98926e0ba36ec808a96de76a3cd98 + ChainGetBlock: In=2, Out=2, CheckSum=0d2bba23d84a51413bbbebe218dcbcf9 + ChainGetBlockMessages: In=2, Out=2, CheckSum=944437b34bb952bf3d50aa6415c9e0d3 + ChainGetGenesis: In=1, Out=2, CheckSum=6d1e4c4d8184dc8d645a56278f14cfad + ChainGetMessage: In=2, Out=2, CheckSum=d1c0ae931458a2a7b07de152ca3de558 + ChainGetMessagesInTipset: In=2, Out=2, CheckSum=6c0bfa48ffcacdfa9dbed1b9e23ad88e + ChainGetNode: In=2, Out=2, CheckSum=66a718f5dbb8b0fcddcf761b58744e38 + ChainGetParentMessages: In=2, Out=2, CheckSum=e659caaae75cd643a7100b1c8cc3125a + ChainGetParentReceipts: In=2, Out=2, CheckSum=de820c5da3d8d036d854ce42ea9d85a3 + ChainGetPath: In=3, Out=2, CheckSum=a745af1a69b379493504c432b15839e7 + ChainGetRandomnessFromBeacon: In=5, Out=2, CheckSum=555559ae8c2c861ff4a73cd5f5955b25 + ChainGetRandomnessFromTickets: In=5, Out=2, CheckSum=555559ae8c2c861ff4a73cd5f5955b25 + ChainGetTipSet: In=2, Out=2, CheckSum=854d79b3a35822ab54e7459ad95816ad + ChainGetTipSetByHeight: In=3, Out=2, CheckSum=9831731a08357a0247d802268b57a497 + ChainHasObj: In=2, Out=2, CheckSum=7fe71bcffa1b110db106e0104e98a32f + ChainHead: In=1, Out=2, CheckSum=6d1e4c4d8184dc8d645a56278f14cfad + ChainNotify: In=1, Out=2, CheckSum=9525148e93a5b83600ebfbde4d24f3e9 + ChainPutObj: In=2, Out=1, CheckSum=8f14a26d66dd2a48d50d58af2ff7d722 + ChainReadObj: In=2, Out=2, CheckSum=6fd9244d87bf5d14fb5e79b0dbc0940d + ChainSetHead: In=2, Out=1, CheckSum=cdfe593ac791e823186abb77bfad49a0 + ChainStatObj: In=3, Out=2, CheckSum=9db2a0d97998daaf9c15e7c3d6ffe82d + ChainTipSetWeight: In=2, Out=2, CheckSum=22ac180fa0c583b4036666bfc155fd63 + ClientCalcCommP: In=2, Out=2, CheckSum=f83c6f3b9d823d22dfcd13e5aee9fb6b + ClientCancelDataTransfer: In=4, Out=1, CheckSum=36cccd49df4625759a10cc5494fb6250 + ClientCancelRetrievalDeal: In=2, Out=1, CheckSum=d289f4c3758ed7d6ab991b2cf569c969 + ClientDataTransferUpdates: In=1, Out=2, CheckSum=2d33b419e4b215c06ab0e8b66b5f8430 + ClientDealPieceCID: In=2, Out=2, CheckSum=93537a844ba6b74ec1ce82c887359a75 + ClientDealSize: In=2, Out=2, CheckSum=cca1dd5f58136af28546bdbdef2f948a + ClientFindData: In=3, Out=2, CheckSum=232da9eb987d92a76b37572c8f44bf53 + ClientGenCar: In=3, Out=1, CheckSum=b8a0c89fc2c7c16464ac968865e9c156 + ClientGetDealInfo: In=2, Out=2, CheckSum=e67f0e7b88aff099976b0f275b601c9c + ClientGetDealStatus: In=2, Out=2, CheckSum=1297eb9ae9a9ed209cfcb5899910f415 + ClientGetDealUpdates: In=1, Out=2, CheckSum=459cb2faa373d8dcf4cf8e33a2854366 + ClientGetRetrievalUpdates: In=1, Out=2, CheckSum=9390e4328ba9fe6c034341d574fd6f3d + ClientHasLocal: In=2, Out=2, CheckSum=7fe71bcffa1b110db106e0104e98a32f + ClientImport: In=2, Out=2, CheckSum=8c5886f6f2eb7b5db886d639c62e5808 + ClientListDataTransfers: In=1, Out=2, CheckSum=aa113d3aaf47f5b34fc947d26d58d001 + ClientListDeals: In=1, Out=2, CheckSum=fddf5a2bf3dd94a5aec97248ff7db918 + ClientListImports: In=1, Out=2, CheckSum=1966c3c721d57e0a728d3b51ba3a61d4 + ClientListRetrievals: In=1, Out=2, CheckSum=d5485e62b97cf31e1880d54808968895 + ClientMinerQueryOffer: In=4, Out=2, CheckSum=f57fabb5f04c1086302b43bd5087b9e2 + ClientQueryAsk: In=3, Out=2, CheckSum=929f732f0ba36128dc6e0250bee150e8 + ClientRemoveImport: In=2, Out=1, CheckSum=a3fcb78bf5bac553baac584941c1771f + ClientRestartDataTransfer: In=4, Out=1, CheckSum=36cccd49df4625759a10cc5494fb6250 + ClientRetrieve: In=3, Out=1, CheckSum=3b17e80e3a84f40959f87bff6359a49d + ClientRetrieveTryRestartInsufficientFunds: In=2, Out=1, CheckSum=98613e8d41d3a2e52b37f97908560bd9 + ClientRetrieveWithEvents: In=3, Out=2, CheckSum=56e4fa49192938527c1f089b446f0860 + ClientStartDeal: In=2, Out=2, CheckSum=85256e9ef86dbb8ebeb76097c3eea078 + ClientStatelessDeal: In=2, Out=2, CheckSum=85256e9ef86dbb8ebeb76097c3eea078 + Closing: In=1, Out=2, CheckSum=3e0dae65a2378c12be14cee8a60a3bfe + CreateBackup: In=2, Out=1, CheckSum=7b0679c2c73ab9606a7da21c5251ad19 + Discover: In=1, Out=2, CheckSum=09640e20c6d71c3e96c7a56c8698acc9 + GasEstimateFeeCap: In=4, Out=2, CheckSum=0fcac02de1d21c9ac6e10696a6499eba + GasEstimateGasLimit: In=3, Out=2, CheckSum=4d1bd57eef0ee90d4c2e89f097d0604d + GasEstimateGasPremium: In=5, Out=2, CheckSum=550724ed37e2fdaa64e55147e82214b1 + GasEstimateMessageGas: In=4, Out=2, CheckSum=6ff6179b579feed33897d96429504624 + ID: In=1, Out=2, CheckSum=1635810444d2b13b381cbefece853ba7 + LogAlerts: In=1, Out=2, CheckSum=c9262fa7c93e891ec80868e0b83a2222 + LogList: In=1, Out=2, CheckSum=c6d763b6ec7190283b7c648e735725c0 + LogSetLevel: In=3, Out=1, CheckSum=ffdc3b95db02b9026a12c29d899e0059 + MarketAddBalance: In=4, Out=2, CheckSum=a8810156a1b234f90924c1f966f44538 + MarketGetReserved: In=2, Out=2, CheckSum=f1426bf8d7a1afdeaae0cd561fbbfbb0 + MarketReleaseFunds: In=3, Out=1, CheckSum=c20c4060f72c1a1ab8176a4936292a1e + MarketReserveFunds: In=4, Out=2, CheckSum=a8810156a1b234f90924c1f966f44538 + MarketWithdraw: In=4, Out=2, CheckSum=a8810156a1b234f90924c1f966f44538 + MinerCreateBlock: In=2, Out=2, CheckSum=c8749e2acc2143226c34fdf3965fd4b9 + MinerGetBaseInfo: In=4, Out=2, CheckSum=a9ead3443999aba71e9444c0cd9730a2 + MpoolBatchPush: In=2, Out=2, CheckSum=4f34d25c5ddbe15922545c0c28c40830 + MpoolBatchPushMessage: In=3, Out=2, CheckSum=66b4628bc63c8ca4bc31db0393ade01b + MpoolBatchPushUntrusted: In=2, Out=2, CheckSum=4f34d25c5ddbe15922545c0c28c40830 + MpoolClear: In=2, Out=1, CheckSum=790632b52ba8d2fb863afad93556528c + MpoolGetConfig: In=1, Out=2, CheckSum=b28b0828ec4c43c705feb84536e66aa8 + MpoolGetNonce: In=2, Out=2, CheckSum=2f0992ed1f0be8ff9bee0c3f0a39e1e1 + MpoolPending: In=2, Out=2, CheckSum=867ae9cbeb080683492ec76cc0c1ea7f + MpoolPush: In=2, Out=2, CheckSum=3b29294f39a940d77be58a07e30e8d4b + MpoolPushMessage: In=3, Out=2, CheckSum=97e029b4d408862c42035318e3c21d05 + MpoolPushUntrusted: In=2, Out=2, CheckSum=3b29294f39a940d77be58a07e30e8d4b + MpoolSelect: In=3, Out=2, CheckSum=a524ad1b1c0d7d27b21c69c62e926f9c + MpoolSetConfig: In=2, Out=1, CheckSum=f1ed0cdb9842cfb5176188c281577fba + MpoolSub: In=1, Out=2, CheckSum=337ebc027637bda66141bad6af2f6cc4 + MsigAddApprove: In=7, Out=2, CheckSum=4dbd86148df51d0975f2f5c6adac7c00 + MsigAddCancel: In=6, Out=2, CheckSum=afb744caa58c307fffa99d052c97e9c7 + MsigAddPropose: In=5, Out=2, CheckSum=6d40bd8a8f2563f75402a23915a23000 + MsigApprove: In=4, Out=2, CheckSum=09bc2481f1717ddc5147c1e8eb76bbbc + MsigApproveTxnHash: In=9, Out=2, CheckSum=7715b69bd67ece799d3da978457a2915 + MsigCancel: In=8, Out=2, CheckSum=6a9360010be0849bd6c588dd6759af41 + MsigCreate: In=7, Out=2, CheckSum=ef93a3c107e73884b4073f01da738bce + MsigGetAvailableBalance: In=3, Out=2, CheckSum=a05010da3c73edfba49c3b5d28a216cd + MsigGetPending: In=3, Out=2, CheckSum=895d889866adf330c83b8dffdbd27fac + MsigGetVested: In=4, Out=2, CheckSum=33d14f7f35b833b5b9dd6b2188c84324 + MsigGetVestingSchedule: In=3, Out=2, CheckSum=b9caf183f11d2a8e611b1daea98ad69e + MsigPropose: In=7, Out=2, CheckSum=f297212379eac659948e303e5be88c0b + MsigRemoveSigner: In=5, Out=2, CheckSum=6d40bd8a8f2563f75402a23915a23000 + MsigSwapApprove: In=7, Out=2, CheckSum=17c75be766c59016bc4a4d583b297d2f + MsigSwapCancel: In=6, Out=2, CheckSum=e2222a10bb1cf80d1fdb56be22b07800 + MsigSwapPropose: In=5, Out=2, CheckSum=f207e1bd168bfd4258481aacf03b4ed4 + NetAddrsListen: In=1, Out=2, CheckSum=f0ad0033f727ad97d0aa84b2dfee9bab + NetAgentVersion: In=2, Out=2, CheckSum=67e7eff9de9e4548c2cb4bc0f8c84a8f + NetAutoNatStatus: In=1, Out=2, CheckSum=05d1ab4460587ed623f3ce1e1f193a83 + NetBandwidthStats: In=1, Out=2, CheckSum=62e1b527d4bd35cb8181ae920b3f99f3 + NetBandwidthStatsByPeer: In=1, Out=2, CheckSum=7e4b993e8865f45cf23de7f8530ffe1e + NetBandwidthStatsByProtocol: In=1, Out=2, CheckSum=059d41fef438a2f075d26d0ea72be672 + NetBlockAdd: In=2, Out=1, CheckSum=bb826402a0a2406da30862a2c08af435 + NetBlockList: In=1, Out=2, CheckSum=174b57557c7d7487b5b204f961e33abc + NetBlockRemove: In=2, Out=1, CheckSum=bb826402a0a2406da30862a2c08af435 + NetConnect: In=2, Out=1, CheckSum=4e114a54b8340f10912a0411fa428967 + NetConnectedness: In=2, Out=2, CheckSum=b6120671f89af497075d3a3e6211dee8 + NetDisconnect: In=2, Out=1, CheckSum=5ac3aa2dccdc990c45ca8ef35b1bdb9f + NetFindPeer: In=2, Out=2, CheckSum=9f9de93a69acd82e4195b253d17a9e8b + NetLimit: In=2, Out=2, CheckSum=9a35d6e94508e633a1cc5c5dc3f983eb + NetPeerInfo: In=2, Out=2, CheckSum=2f530e3029ddebf897c3d3029ad763bf + NetPeers: In=1, Out=2, CheckSum=165dfc6f0c55818a2e2be3fcacea8045 + NetPing: In=2, Out=2, CheckSum=31c80ab2bc55adad54a85e38b45430b7 + NetProtectAdd: In=2, Out=1, CheckSum=774c426db2e5936d7575ddb774b4c2a8 + NetProtectList: In=1, Out=2, CheckSum=0e08860c7745c6ee6c790e36e3d7f073 + NetProtectRemove: In=2, Out=1, CheckSum=774c426db2e5936d7575ddb774b4c2a8 + NetPubsubScores: In=1, Out=2, CheckSum=66c7a1061de3d6e11d39ce90692aa885 + NetSetLimit: In=3, Out=1, CheckSum=f591c226feead6f0c1d9442a2320cd1e + NetStat: In=2, Out=2, CheckSum=82ea34376644e023a4314cde3fb1e4fc + PaychAllocateLane: In=2, Out=2, CheckSum=2f0992ed1f0be8ff9bee0c3f0a39e1e1 + PaychAvailableFunds: In=2, Out=2, CheckSum=f40942f65ce181601e7bc49e6378a89c + PaychAvailableFundsByFromTo: In=3, Out=2, CheckSum=829ff29d78db755f3c13ab3259adb32b + PaychCollect: In=2, Out=2, CheckSum=fdb746bab5f269b63dc0256d50570d81 + PaychGet: In=4, Out=2, CheckSum=f7fb571ad0461c404a0789f6b8c38501 + PaychGetWaitReady: In=2, Out=2, CheckSum=59546a25118c9e0dbb3ad569d3ec0c6f + PaychList: In=1, Out=2, CheckSum=566d37a33889023f9f73c632fa4e8279 + PaychNewPayment: In=4, Out=2, CheckSum=8bb33e38d75917df1357fcf071335ba9 + PaychSettle: In=2, Out=2, CheckSum=fdb746bab5f269b63dc0256d50570d81 + PaychStatus: In=2, Out=2, CheckSum=ae911e02219f1412acb95e95a4065478 + PaychVoucherAdd: In=5, Out=2, CheckSum=4bd3099043e7f90e37c1f30476cd7ff5 + PaychVoucherCheckSpendable: In=5, Out=2, CheckSum=b9d2610776265f877521a9a9521b3a43 + PaychVoucherCheckValid: In=3, Out=1, CheckSum=e991badd914254dda0d6c41b7eb36958 + PaychVoucherCreate: In=4, Out=2, CheckSum=e5113a58d351abf5cadd77b46f690c2d + PaychVoucherList: In=2, Out=2, CheckSum=c4a81e1915ceee53e3ecde9f1ae30b4c + PaychVoucherSubmit: In=5, Out=2, CheckSum=03f7a7f3a90849ea5da8eedd01643a1c + Session: In=1, Out=2, CheckSum=cdb04ef6a97114c8f24f456a2e70f1cd + Shutdown: In=1, Out=1, CheckSum=c39be30cc5a8826024fcf4d23e7017d6 + StateAccountKey: In=3, Out=2, CheckSum=9b6f9fdaea5bb22c60772266c418d98f + StateActorCodeCIDs: In=2, Out=2, CheckSum=d52881195cc100121739e6c85c45dd9f + StateActorManifestCID: In=2, Out=2, CheckSum=e04ec685bb3d32a892d45c5215586589 + StateAllMinerFaults: In=3, Out=2, CheckSum=a17e05b21e1ecc8da867e2f76df6c46c + StateCall: In=3, Out=2, CheckSum=b33ab6c7df31d805c256c8ab6691b085 + StateChangedActors: In=3, Out=2, CheckSum=cbc0cd36e495552a6672caab9f839468 + StateCirculatingSupply: In=2, Out=2, CheckSum=22ac180fa0c583b4036666bfc155fd63 + StateCompute: In=4, Out=2, CheckSum=1d429c389354255c7f865f00a59e3888 + StateDealProviderCollateralBounds: In=4, Out=2, CheckSum=5231e44843c0b74a04371e1e7170d4b2 + StateDecodeParams: In=5, Out=2, CheckSum=8c6311be4dc064a657368516c33e1307 + StateGetActor: In=3, Out=2, CheckSum=adcd0bbd36e3ab94f777c7cb3df1fb34 + StateGetAllocation: In=4, Out=2, CheckSum=306a8d2eb65b1bd436f2230bb96666b5 + StateGetAllocationForPendingDeal: In=3, Out=2, CheckSum=3d2912f0c80606576bb697499140ae2f + StateGetAllocations: In=3, Out=2, CheckSum=570266653b3643cf22e6b47609db7e55 + StateGetClaim: In=4, Out=2, CheckSum=79bc50cf65a4d6b102267fd020b97510 + StateGetClaims: In=3, Out=2, CheckSum=06f189feb545746b5e123702405416fb + StateGetNetworkParams: In=1, Out=2, CheckSum=7bc6ff254ba803762ffb166c6d96a921 + StateGetRandomnessFromBeacon: In=5, Out=2, CheckSum=b98225c36bf011979dbcad0fb938e659 + StateGetRandomnessFromTickets: In=5, Out=2, CheckSum=b98225c36bf011979dbcad0fb938e659 + StateGetReceipt: In=3, Out=2, CheckSum=03f4f567eaa55a6ccad8b4a4c95b590a + StateListActors: In=2, Out=2, CheckSum=57bcc4526adaf4b0582c0b117d39b042 + StateListMessages: In=4, Out=2, CheckSum=f12ce9e8a127bf320fb0b289a19b4ea6 + StateListMiners: In=2, Out=2, CheckSum=57bcc4526adaf4b0582c0b117d39b042 + StateLookupID: In=3, Out=2, CheckSum=9b6f9fdaea5bb22c60772266c418d98f + StateMarketBalance: In=3, Out=2, CheckSum=bbab976bd25166200f737c94fc970bc7 + StateMarketDeals: In=2, Out=2, CheckSum=46b5eef922e8252939bf00e1e80ae89e + StateMarketParticipants: In=2, Out=2, CheckSum=acc9019d0b6e389743c6f992350f407c + StateMarketStorageDeal: In=3, Out=2, CheckSum=94af302d39beba9f0ce7648f4118b6aa + StateMinerActiveSectors: In=3, Out=2, CheckSum=a6cc03c30ff0302d5dcd3002e55585b7 + StateMinerAvailableBalance: In=3, Out=2, CheckSum=a05010da3c73edfba49c3b5d28a216cd + StateMinerDeadlines: In=3, Out=2, CheckSum=5b128c75ba4953740906520cca96b962 + StateMinerFaults: In=3, Out=2, CheckSum=9d26d848f93597964f751b43edd3d476 + StateMinerInfo: In=3, Out=2, CheckSum=4ba684a8519aa97d4df405cee3496e7c + StateMinerInitialPledgeCollateral: In=4, Out=2, CheckSum=b456dc2029fe2ac176ade895bda96dd5 + StateMinerPartitions: In=4, Out=2, CheckSum=b3cba1da3bd87c433cb8f9df7f7edc09 + StateMinerPower: In=3, Out=2, CheckSum=0e70a6360616c25624118181f764d7df + StateMinerPreCommitDepositForPower: In=4, Out=2, CheckSum=b456dc2029fe2ac176ade895bda96dd5 + StateMinerProvingDeadline: In=3, Out=2, CheckSum=8c6037a054ad720ecfb3d0c3f4f90fe6 + StateMinerRecoveries: In=3, Out=2, CheckSum=9d26d848f93597964f751b43edd3d476 + StateMinerSectorAllocated: In=4, Out=2, CheckSum=2accd56a9e9196fa1ca85a1d60e19fc2 + StateMinerSectorCount: In=3, Out=2, CheckSum=573483cbcf3ccdd38063b9ce16453dc2 + StateMinerSectors: In=4, Out=2, CheckSum=532dd2620e4430f0ae9113b75104e12f + StateNetworkName: In=1, Out=2, CheckSum=afb82130640a26dcd9d7010f31c69572 + StateNetworkVersion: In=2, Out=2, CheckSum=47de92e3b59793ade53abd96c347bace + StateReadState: In=3, Out=2, CheckSum=f53f39943eea93d5f3dd64f1389e4c64 + StateReplay: In=3, Out=2, CheckSum=ec89143eb01290212c3ce0032f62c5fc + StateSearchMsg: In=2, Out=2, CheckSum=e4186ab56a9d2e3c99174110e3f697dc + StateSearchMsgLimited: In=3, Out=2, CheckSum=8990ed018d033c44ebb38c0d077bc6d0 + StateSectorExpiration: In=4, Out=2, CheckSum=bb669c8b66b6f74c681b4345c9e4cc8f + StateSectorGetInfo: In=4, Out=2, CheckSum=8d176fd8a2054079b0017f795f9a4d56 + StateSectorPartition: In=4, Out=2, CheckSum=e3743df55a360243a32ac64ce1282c53 + StateSectorPreCommitInfo: In=4, Out=2, CheckSum=f0961d63ab3679dafcf6563a83126d56 + StateVMCirculatingSupplyInternal: In=2, Out=2, CheckSum=05c2114e08be095cece55db19e214d2c + StateVerifiedClientStatus: In=3, Out=2, CheckSum=e33ae4cd2315832f2d6f2aa74b68c34e + StateVerifiedRegistryRootKey: In=2, Out=2, CheckSum=5ad3a497ee24e321c780a69b8d2f0936 + StateVerifierStatus: In=3, Out=2, CheckSum=e33ae4cd2315832f2d6f2aa74b68c34e + StateWaitMsg: In=3, Out=2, CheckSum=f997714e2214b7122462163c5e7bc9a2 + StateWaitMsgLimited: In=4, Out=2, CheckSum=0b59c44082d62b85343ca111441315a1 + SyncCheckBad: In=2, Out=2, CheckSum=ba06470da0ca1d6cc2f9ada7f0288a6c + SyncCheckpoint: In=2, Out=1, CheckSum=cdfe593ac791e823186abb77bfad49a0 + SyncIncomingBlocks: In=1, Out=2, CheckSum=f6ad051ba2ce73511f74f9c08032acc3 + SyncMarkBad: In=2, Out=1, CheckSum=50d40a0afa31dabb8a61693aabea61b7 + SyncState: In=1, Out=2, CheckSum=7004fd3e7bf60990cb1695fa5883d08f + SyncSubmitBlock: In=2, Out=1, CheckSum=04cdc7641df5628e2ef8c90584936142 + SyncUnmarkAllBad: In=1, Out=1, CheckSum=c39be30cc5a8826024fcf4d23e7017d6 + SyncUnmarkBad: In=2, Out=1, CheckSum=50d40a0afa31dabb8a61693aabea61b7 + SyncValidateTipset: In=2, Out=2, CheckSum=a244b60fe32e540879ec4eeded71136b + Version: In=1, Out=2, CheckSum=0e78a1023c652297e66079f521e11624 + WalletBalance: In=2, Out=2, CheckSum=f1426bf8d7a1afdeaae0cd561fbbfbb0 + WalletDefaultAddress: In=1, Out=2, CheckSum=5591bbacb9b8345a4a07a149c963df55 + WalletDelete: In=2, Out=1, CheckSum=98613e8d41d3a2e52b37f97908560bd9 + WalletExport: In=2, Out=2, CheckSum=5db217de8a3ec9ecbed4cc583e473991 + WalletHas: In=2, Out=2, CheckSum=9ad76c8c583af5b5a5d0202c0a3fe7f4 + WalletImport: In=2, Out=2, CheckSum=617245aaab0381dc634a2ad50dd440b3 + WalletList: In=1, Out=2, CheckSum=566d37a33889023f9f73c632fa4e8279 + WalletNew: In=2, Out=2, CheckSum=266c59d3ee6f0b3a66e8dbdec099ef06 + WalletSetDefault: In=2, Out=1, CheckSum=98613e8d41d3a2e52b37f97908560bd9 + WalletSign: In=3, Out=2, CheckSum=b5a8800f8b60b92c4d99e9cb0fcff086 + WalletSignMessage: In=3, Out=2, CheckSum=c9f0ea6123a189cfeab06ce6bdeda7b3 + WalletValidateAddress: In=2, Out=2, CheckSum=610b35f7aa87b32463e5c2a9b4bd3f55 + WalletVerify: In=4, Out=2, CheckSum=9b22231e95efdfb94d9d55038f500636 + +api.FullNode: + AuthNew: In=2, Out=2, CheckSum=faeef9dc68d6f2533bdf7d8f22ef902d + AuthVerify: In=2, Out=2, CheckSum=3cb63db9d6f8869fd4e9da953cc9cdcb + ChainBlockstoreInfo: In=1, Out=2, CheckSum=09640e20c6d71c3e96c7a56c8698acc9 + ChainCheckBlockstore: In=1, Out=1, CheckSum=c39be30cc5a8826024fcf4d23e7017d6 + ChainDeleteObj: In=2, Out=1, CheckSum=50d40a0afa31dabb8a61693aabea61b7 + ChainExport: In=4, Out=2, CheckSum=aaf98926e0ba36ec808a96de76a3cd98 + ChainGetBlock: In=2, Out=2, CheckSum=0d2bba23d84a51413bbbebe218dcbcf9 + ChainGetBlockMessages: In=2, Out=2, CheckSum=944437b34bb952bf3d50aa6415c9e0d3 + ChainGetGenesis: In=1, Out=2, CheckSum=6d1e4c4d8184dc8d645a56278f14cfad + ChainGetMessage: In=2, Out=2, CheckSum=d1c0ae931458a2a7b07de152ca3de558 + ChainGetMessagesInTipset: In=2, Out=2, CheckSum=6c0bfa48ffcacdfa9dbed1b9e23ad88e + ChainGetNode: In=2, Out=2, CheckSum=66a718f5dbb8b0fcddcf761b58744e38 + ChainGetParentMessages: In=2, Out=2, CheckSum=e659caaae75cd643a7100b1c8cc3125a + ChainGetParentReceipts: In=2, Out=2, CheckSum=de820c5da3d8d036d854ce42ea9d85a3 + ChainGetPath: In=3, Out=2, CheckSum=a745af1a69b379493504c432b15839e7 + ChainGetTipSet: In=2, Out=2, CheckSum=854d79b3a35822ab54e7459ad95816ad + ChainGetTipSetAfterHeight: In=3, Out=2, CheckSum=9831731a08357a0247d802268b57a497 + ChainGetTipSetByHeight: In=3, Out=2, CheckSum=9831731a08357a0247d802268b57a497 + ChainHasObj: In=2, Out=2, CheckSum=7fe71bcffa1b110db106e0104e98a32f + ChainHead: In=1, Out=2, CheckSum=6d1e4c4d8184dc8d645a56278f14cfad + ChainNotify: In=1, Out=2, CheckSum=9525148e93a5b83600ebfbde4d24f3e9 + ChainPrune: In=2, Out=1, CheckSum=af574df2ee0daa338f8d54b5f5da15a3 + ChainPutObj: In=2, Out=1, CheckSum=8f14a26d66dd2a48d50d58af2ff7d722 + ChainReadObj: In=2, Out=2, CheckSum=6fd9244d87bf5d14fb5e79b0dbc0940d + ChainSetHead: In=2, Out=1, CheckSum=cdfe593ac791e823186abb77bfad49a0 + ChainStatObj: In=3, Out=2, CheckSum=9db2a0d97998daaf9c15e7c3d6ffe82d + ChainTipSetWeight: In=2, Out=2, CheckSum=22ac180fa0c583b4036666bfc155fd63 + ClientCalcCommP: In=2, Out=2, CheckSum=f83c6f3b9d823d22dfcd13e5aee9fb6b + ClientCancelDataTransfer: In=4, Out=1, CheckSum=36cccd49df4625759a10cc5494fb6250 + ClientCancelRetrievalDeal: In=2, Out=1, CheckSum=d289f4c3758ed7d6ab991b2cf569c969 + ClientDataTransferUpdates: In=1, Out=2, CheckSum=2d33b419e4b215c06ab0e8b66b5f8430 + ClientDealPieceCID: In=2, Out=2, CheckSum=93537a844ba6b74ec1ce82c887359a75 + ClientDealSize: In=2, Out=2, CheckSum=cca1dd5f58136af28546bdbdef2f948a + ClientExport: In=3, Out=1, CheckSum=77c3a3632e06654aee2dade7aa467433 + ClientFindData: In=3, Out=2, CheckSum=232da9eb987d92a76b37572c8f44bf53 + ClientGenCar: In=3, Out=1, CheckSum=b8a0c89fc2c7c16464ac968865e9c156 + ClientGetDealInfo: In=2, Out=2, CheckSum=e67f0e7b88aff099976b0f275b601c9c + ClientGetDealStatus: In=2, Out=2, CheckSum=1297eb9ae9a9ed209cfcb5899910f415 + ClientGetDealUpdates: In=1, Out=2, CheckSum=459cb2faa373d8dcf4cf8e33a2854366 + ClientGetRetrievalUpdates: In=1, Out=2, CheckSum=9390e4328ba9fe6c034341d574fd6f3d + ClientHasLocal: In=2, Out=2, CheckSum=7fe71bcffa1b110db106e0104e98a32f + ClientImport: In=2, Out=2, CheckSum=8c5886f6f2eb7b5db886d639c62e5808 + ClientListDataTransfers: In=1, Out=2, CheckSum=aa113d3aaf47f5b34fc947d26d58d001 + ClientListDeals: In=1, Out=2, CheckSum=fddf5a2bf3dd94a5aec97248ff7db918 + ClientListImports: In=1, Out=2, CheckSum=1966c3c721d57e0a728d3b51ba3a61d4 + ClientListRetrievals: In=1, Out=2, CheckSum=d5485e62b97cf31e1880d54808968895 + ClientMinerQueryOffer: In=4, Out=2, CheckSum=f57fabb5f04c1086302b43bd5087b9e2 + ClientQueryAsk: In=3, Out=2, CheckSum=c6a0b229078419aa2fbb69f747bd6e89 + ClientRemoveImport: In=2, Out=1, CheckSum=a3fcb78bf5bac553baac584941c1771f + ClientRestartDataTransfer: In=4, Out=1, CheckSum=36cccd49df4625759a10cc5494fb6250 + ClientRetrieve: In=2, Out=2, CheckSum=b941d6f05cbec209e2f347b6826717e1 + ClientRetrieveTryRestartInsufficientFunds: In=2, Out=1, CheckSum=98613e8d41d3a2e52b37f97908560bd9 + ClientRetrieveWait: In=2, Out=1, CheckSum=d289f4c3758ed7d6ab991b2cf569c969 + ClientStartDeal: In=2, Out=2, CheckSum=85256e9ef86dbb8ebeb76097c3eea078 + ClientStatelessDeal: In=2, Out=2, CheckSum=85256e9ef86dbb8ebeb76097c3eea078 + Closing: In=1, Out=2, CheckSum=3e0dae65a2378c12be14cee8a60a3bfe + CreateBackup: In=2, Out=1, CheckSum=7b0679c2c73ab9606a7da21c5251ad19 + Discover: In=1, Out=2, CheckSum=09640e20c6d71c3e96c7a56c8698acc9 + GasEstimateFeeCap: In=4, Out=2, CheckSum=0fcac02de1d21c9ac6e10696a6499eba + GasEstimateGasLimit: In=3, Out=2, CheckSum=4d1bd57eef0ee90d4c2e89f097d0604d + GasEstimateGasPremium: In=5, Out=2, CheckSum=550724ed37e2fdaa64e55147e82214b1 + GasEstimateMessageGas: In=4, Out=2, CheckSum=6ff6179b579feed33897d96429504624 + ID: In=1, Out=2, CheckSum=1635810444d2b13b381cbefece853ba7 + LogAlerts: In=1, Out=2, CheckSum=c9262fa7c93e891ec80868e0b83a2222 + LogList: In=1, Out=2, CheckSum=c6d763b6ec7190283b7c648e735725c0 + LogSetLevel: In=3, Out=1, CheckSum=ffdc3b95db02b9026a12c29d899e0059 + MarketAddBalance: In=4, Out=2, CheckSum=a8810156a1b234f90924c1f966f44538 + MarketGetReserved: In=2, Out=2, CheckSum=f1426bf8d7a1afdeaae0cd561fbbfbb0 + MarketReleaseFunds: In=3, Out=1, CheckSum=c20c4060f72c1a1ab8176a4936292a1e + MarketReserveFunds: In=4, Out=2, CheckSum=a8810156a1b234f90924c1f966f44538 + MarketWithdraw: In=4, Out=2, CheckSum=a8810156a1b234f90924c1f966f44538 + MinerCreateBlock: In=2, Out=2, CheckSum=c8749e2acc2143226c34fdf3965fd4b9 + MinerGetBaseInfo: In=4, Out=2, CheckSum=a9ead3443999aba71e9444c0cd9730a2 + MpoolBatchPush: In=2, Out=2, CheckSum=4f34d25c5ddbe15922545c0c28c40830 + MpoolBatchPushMessage: In=3, Out=2, CheckSum=66b4628bc63c8ca4bc31db0393ade01b + MpoolBatchPushUntrusted: In=2, Out=2, CheckSum=4f34d25c5ddbe15922545c0c28c40830 + MpoolCheckMessages: In=2, Out=2, CheckSum=4bf8491c98028f49b05ff0f1d6e3eaf7 + MpoolCheckPendingMessages: In=2, Out=2, CheckSum=f4a139f9278ea6fb2158a05c24c769fb + MpoolCheckReplaceMessages: In=2, Out=2, CheckSum=d8312bb585f0696c77b4d02c84293cdd + MpoolClear: In=2, Out=1, CheckSum=790632b52ba8d2fb863afad93556528c + MpoolGetConfig: In=1, Out=2, CheckSum=b28b0828ec4c43c705feb84536e66aa8 + MpoolGetNonce: In=2, Out=2, CheckSum=2f0992ed1f0be8ff9bee0c3f0a39e1e1 + MpoolPending: In=2, Out=2, CheckSum=867ae9cbeb080683492ec76cc0c1ea7f + MpoolPush: In=2, Out=2, CheckSum=3b29294f39a940d77be58a07e30e8d4b + MpoolPushMessage: In=3, Out=2, CheckSum=97e029b4d408862c42035318e3c21d05 + MpoolPushUntrusted: In=2, Out=2, CheckSum=3b29294f39a940d77be58a07e30e8d4b + MpoolSelect: In=3, Out=2, CheckSum=a524ad1b1c0d7d27b21c69c62e926f9c + MpoolSetConfig: In=2, Out=1, CheckSum=f1ed0cdb9842cfb5176188c281577fba + MpoolSub: In=1, Out=2, CheckSum=337ebc027637bda66141bad6af2f6cc4 + MsigAddApprove: In=7, Out=2, CheckSum=f622ab6c302078342d7834f0c32ddbdb + MsigAddCancel: In=6, Out=2, CheckSum=31b5b364ddd3e12c9d310a9868c6fda2 + MsigAddPropose: In=5, Out=2, CheckSum=801a2539853f3e7206fd87fc4171c40b + MsigApprove: In=4, Out=2, CheckSum=3384d4133b5559461f919c3bdb1dac41 + MsigApproveTxnHash: In=9, Out=2, CheckSum=9a7460af699e6aa82452157078046018 + MsigCancel: In=4, Out=2, CheckSum=3384d4133b5559461f919c3bdb1dac41 + MsigCancelTxnHash: In=8, Out=2, CheckSum=912010a9da971230c20e5503503e33e2 + MsigCreate: In=7, Out=2, CheckSum=3b8196718c58238e8579cd1e2b8368d9 + MsigGetAvailableBalance: In=3, Out=2, CheckSum=a05010da3c73edfba49c3b5d28a216cd + MsigGetPending: In=3, Out=2, CheckSum=895d889866adf330c83b8dffdbd27fac + MsigGetVested: In=4, Out=2, CheckSum=33d14f7f35b833b5b9dd6b2188c84324 + MsigGetVestingSchedule: In=3, Out=2, CheckSum=b9caf183f11d2a8e611b1daea98ad69e + MsigPropose: In=7, Out=2, CheckSum=22d693fea428a547510ddd48222c5f7a + MsigRemoveSigner: In=5, Out=2, CheckSum=801a2539853f3e7206fd87fc4171c40b + MsigSwapApprove: In=7, Out=2, CheckSum=15b73f7c86aa2009ab368b43d96b5485 + MsigSwapCancel: In=6, Out=2, CheckSum=47441127af7eaa6580d48b2daa13fd1d + MsigSwapPropose: In=5, Out=2, CheckSum=2766f47b61843d276e20ab487ac9849a + NetAddrsListen: In=1, Out=2, CheckSum=f0ad0033f727ad97d0aa84b2dfee9bab + NetAgentVersion: In=2, Out=2, CheckSum=67e7eff9de9e4548c2cb4bc0f8c84a8f + NetAutoNatStatus: In=1, Out=2, CheckSum=05d1ab4460587ed623f3ce1e1f193a83 + NetBandwidthStats: In=1, Out=2, CheckSum=62e1b527d4bd35cb8181ae920b3f99f3 + NetBandwidthStatsByPeer: In=1, Out=2, CheckSum=7e4b993e8865f45cf23de7f8530ffe1e + NetBandwidthStatsByProtocol: In=1, Out=2, CheckSum=059d41fef438a2f075d26d0ea72be672 + NetBlockAdd: In=2, Out=1, CheckSum=bb826402a0a2406da30862a2c08af435 + NetBlockList: In=1, Out=2, CheckSum=174b57557c7d7487b5b204f961e33abc + NetBlockRemove: In=2, Out=1, CheckSum=bb826402a0a2406da30862a2c08af435 + NetConnect: In=2, Out=1, CheckSum=4e114a54b8340f10912a0411fa428967 + NetConnectedness: In=2, Out=2, CheckSum=b6120671f89af497075d3a3e6211dee8 + NetDisconnect: In=2, Out=1, CheckSum=5ac3aa2dccdc990c45ca8ef35b1bdb9f + NetFindPeer: In=2, Out=2, CheckSum=9f9de93a69acd82e4195b253d17a9e8b + NetLimit: In=2, Out=2, CheckSum=9a35d6e94508e633a1cc5c5dc3f983eb + NetPeerInfo: In=2, Out=2, CheckSum=2f530e3029ddebf897c3d3029ad763bf + NetPeers: In=1, Out=2, CheckSum=165dfc6f0c55818a2e2be3fcacea8045 + NetPing: In=2, Out=2, CheckSum=31c80ab2bc55adad54a85e38b45430b7 + NetProtectAdd: In=2, Out=1, CheckSum=774c426db2e5936d7575ddb774b4c2a8 + NetProtectList: In=1, Out=2, CheckSum=0e08860c7745c6ee6c790e36e3d7f073 + NetProtectRemove: In=2, Out=1, CheckSum=774c426db2e5936d7575ddb774b4c2a8 + NetPubsubScores: In=1, Out=2, CheckSum=66c7a1061de3d6e11d39ce90692aa885 + NetSetLimit: In=3, Out=1, CheckSum=f591c226feead6f0c1d9442a2320cd1e + NetStat: In=2, Out=2, CheckSum=82ea34376644e023a4314cde3fb1e4fc + NodeStatus: In=2, Out=2, CheckSum=b46530b68171c85301db86c1b7c19824 + PaychAllocateLane: In=2, Out=2, CheckSum=2f0992ed1f0be8ff9bee0c3f0a39e1e1 + PaychAvailableFunds: In=2, Out=2, CheckSum=f40942f65ce181601e7bc49e6378a89c + PaychAvailableFundsByFromTo: In=3, Out=2, CheckSum=829ff29d78db755f3c13ab3259adb32b + PaychCollect: In=2, Out=2, CheckSum=fdb746bab5f269b63dc0256d50570d81 + PaychFund: In=4, Out=2, CheckSum=f7fb571ad0461c404a0789f6b8c38501 + PaychGet: In=5, Out=2, CheckSum=f0c77308814c6aefa820f1801e8eef8e + PaychGetWaitReady: In=2, Out=2, CheckSum=59546a25118c9e0dbb3ad569d3ec0c6f + PaychList: In=1, Out=2, CheckSum=566d37a33889023f9f73c632fa4e8279 + PaychNewPayment: In=4, Out=2, CheckSum=8bb33e38d75917df1357fcf071335ba9 + PaychSettle: In=2, Out=2, CheckSum=fdb746bab5f269b63dc0256d50570d81 + PaychStatus: In=2, Out=2, CheckSum=ae911e02219f1412acb95e95a4065478 + PaychVoucherAdd: In=5, Out=2, CheckSum=4bd3099043e7f90e37c1f30476cd7ff5 + PaychVoucherCheckSpendable: In=5, Out=2, CheckSum=b9d2610776265f877521a9a9521b3a43 + PaychVoucherCheckValid: In=3, Out=1, CheckSum=e991badd914254dda0d6c41b7eb36958 + PaychVoucherCreate: In=4, Out=2, CheckSum=e5113a58d351abf5cadd77b46f690c2d + PaychVoucherList: In=2, Out=2, CheckSum=c4a81e1915ceee53e3ecde9f1ae30b4c + PaychVoucherSubmit: In=5, Out=2, CheckSum=03f7a7f3a90849ea5da8eedd01643a1c + Session: In=1, Out=2, CheckSum=cdb04ef6a97114c8f24f456a2e70f1cd + Shutdown: In=1, Out=1, CheckSum=c39be30cc5a8826024fcf4d23e7017d6 + StateAccountKey: In=3, Out=2, CheckSum=9b6f9fdaea5bb22c60772266c418d98f + StateActorCodeCIDs: In=2, Out=2, CheckSum=d52881195cc100121739e6c85c45dd9f + StateActorManifestCID: In=2, Out=2, CheckSum=e04ec685bb3d32a892d45c5215586589 + StateAllMinerFaults: In=3, Out=2, CheckSum=a17e05b21e1ecc8da867e2f76df6c46c + StateCall: In=3, Out=2, CheckSum=b33ab6c7df31d805c256c8ab6691b085 + StateChangedActors: In=3, Out=2, CheckSum=cbc0cd36e495552a6672caab9f839468 + StateCirculatingSupply: In=2, Out=2, CheckSum=22ac180fa0c583b4036666bfc155fd63 + StateCompute: In=4, Out=2, CheckSum=1d429c389354255c7f865f00a59e3888 + StateComputeDataCID: In=5, Out=2, CheckSum=83d963e02e49a6747b0f83daf1d249be + StateDealProviderCollateralBounds: In=4, Out=2, CheckSum=5231e44843c0b74a04371e1e7170d4b2 + StateDecodeParams: In=5, Out=2, CheckSum=8c6311be4dc064a657368516c33e1307 + StateEncodeParams: In=4, Out=2, CheckSum=3e1a5390b92b1b69f9be038cd7400e38 + StateGetActor: In=3, Out=2, CheckSum=adcd0bbd36e3ab94f777c7cb3df1fb34 + StateGetAllocation: In=4, Out=2, CheckSum=306a8d2eb65b1bd436f2230bb96666b5 + StateGetAllocationForPendingDeal: In=3, Out=2, CheckSum=3d2912f0c80606576bb697499140ae2f + StateGetAllocations: In=3, Out=2, CheckSum=570266653b3643cf22e6b47609db7e55 + StateGetBeaconEntry: In=2, Out=2, CheckSum=c74f5c0f4039207ea45c11bfe3319c38 + StateGetClaim: In=4, Out=2, CheckSum=79bc50cf65a4d6b102267fd020b97510 + StateGetClaims: In=3, Out=2, CheckSum=06f189feb545746b5e123702405416fb + StateGetNetworkParams: In=1, Out=2, CheckSum=7bc6ff254ba803762ffb166c6d96a921 + StateGetRandomnessFromBeacon: In=5, Out=2, CheckSum=b98225c36bf011979dbcad0fb938e659 + StateGetRandomnessFromTickets: In=5, Out=2, CheckSum=b98225c36bf011979dbcad0fb938e659 + StateListActors: In=2, Out=2, CheckSum=57bcc4526adaf4b0582c0b117d39b042 + StateListMessages: In=4, Out=2, CheckSum=f12ce9e8a127bf320fb0b289a19b4ea6 + StateListMiners: In=2, Out=2, CheckSum=57bcc4526adaf4b0582c0b117d39b042 + StateLookupID: In=3, Out=2, CheckSum=9b6f9fdaea5bb22c60772266c418d98f + StateLookupRobustAddress: In=3, Out=2, CheckSum=9b6f9fdaea5bb22c60772266c418d98f + StateMarketBalance: In=3, Out=2, CheckSum=bbab976bd25166200f737c94fc970bc7 + StateMarketDeals: In=2, Out=2, CheckSum=46b5eef922e8252939bf00e1e80ae89e + StateMarketParticipants: In=2, Out=2, CheckSum=acc9019d0b6e389743c6f992350f407c + StateMarketStorageDeal: In=3, Out=2, CheckSum=94af302d39beba9f0ce7648f4118b6aa + StateMinerActiveSectors: In=3, Out=2, CheckSum=a6cc03c30ff0302d5dcd3002e55585b7 + StateMinerAllocated: In=3, Out=2, CheckSum=808e4357d56c3d08769a9d7a249ea7c0 + StateMinerAvailableBalance: In=3, Out=2, CheckSum=a05010da3c73edfba49c3b5d28a216cd + StateMinerDeadlines: In=3, Out=2, CheckSum=5b128c75ba4953740906520cca96b962 + StateMinerFaults: In=3, Out=2, CheckSum=9d26d848f93597964f751b43edd3d476 + StateMinerInfo: In=3, Out=2, CheckSum=4ba684a8519aa97d4df405cee3496e7c + StateMinerInitialPledgeCollateral: In=4, Out=2, CheckSum=b456dc2029fe2ac176ade895bda96dd5 + StateMinerPartitions: In=4, Out=2, CheckSum=b3cba1da3bd87c433cb8f9df7f7edc09 + StateMinerPower: In=3, Out=2, CheckSum=0e70a6360616c25624118181f764d7df + StateMinerPreCommitDepositForPower: In=4, Out=2, CheckSum=b456dc2029fe2ac176ade895bda96dd5 + StateMinerProvingDeadline: In=3, Out=2, CheckSum=8c6037a054ad720ecfb3d0c3f4f90fe6 + StateMinerRecoveries: In=3, Out=2, CheckSum=9d26d848f93597964f751b43edd3d476 + StateMinerSectorAllocated: In=4, Out=2, CheckSum=2accd56a9e9196fa1ca85a1d60e19fc2 + StateMinerSectorCount: In=3, Out=2, CheckSum=573483cbcf3ccdd38063b9ce16453dc2 + StateMinerSectors: In=4, Out=2, CheckSum=532dd2620e4430f0ae9113b75104e12f + StateNetworkName: In=1, Out=2, CheckSum=afb82130640a26dcd9d7010f31c69572 + StateNetworkVersion: In=2, Out=2, CheckSum=47de92e3b59793ade53abd96c347bace + StateReadState: In=3, Out=2, CheckSum=f53f39943eea93d5f3dd64f1389e4c64 + StateReplay: In=3, Out=2, CheckSum=ec89143eb01290212c3ce0032f62c5fc + StateSearchMsg: In=5, Out=2, CheckSum=05ad20588933ea6ffa941f47b826708a + StateSectorExpiration: In=4, Out=2, CheckSum=bb669c8b66b6f74c681b4345c9e4cc8f + StateSectorGetInfo: In=4, Out=2, CheckSum=8d176fd8a2054079b0017f795f9a4d56 + StateSectorPartition: In=4, Out=2, CheckSum=e3743df55a360243a32ac64ce1282c53 + StateSectorPreCommitInfo: In=4, Out=2, CheckSum=80eabb64c62e90a2cf0e497107b774ff + StateVMCirculatingSupplyInternal: In=2, Out=2, CheckSum=05c2114e08be095cece55db19e214d2c + StateVerifiedClientStatus: In=3, Out=2, CheckSum=e33ae4cd2315832f2d6f2aa74b68c34e + StateVerifiedRegistryRootKey: In=2, Out=2, CheckSum=5ad3a497ee24e321c780a69b8d2f0936 + StateVerifierStatus: In=3, Out=2, CheckSum=e33ae4cd2315832f2d6f2aa74b68c34e + StateWaitMsg: In=5, Out=2, CheckSum=561c18d1417310b5cd35cfffb0b75a00 + SyncCheckBad: In=2, Out=2, CheckSum=ba06470da0ca1d6cc2f9ada7f0288a6c + SyncCheckpoint: In=2, Out=1, CheckSum=cdfe593ac791e823186abb77bfad49a0 + SyncIncomingBlocks: In=1, Out=2, CheckSum=f6ad051ba2ce73511f74f9c08032acc3 + SyncMarkBad: In=2, Out=1, CheckSum=50d40a0afa31dabb8a61693aabea61b7 + SyncState: In=1, Out=2, CheckSum=7004fd3e7bf60990cb1695fa5883d08f + SyncSubmitBlock: In=2, Out=1, CheckSum=04cdc7641df5628e2ef8c90584936142 + SyncUnmarkAllBad: In=1, Out=1, CheckSum=c39be30cc5a8826024fcf4d23e7017d6 + SyncUnmarkBad: In=2, Out=1, CheckSum=50d40a0afa31dabb8a61693aabea61b7 + SyncValidateTipset: In=2, Out=2, CheckSum=a244b60fe32e540879ec4eeded71136b + Version: In=1, Out=2, CheckSum=0e78a1023c652297e66079f521e11624 + WalletBalance: In=2, Out=2, CheckSum=f1426bf8d7a1afdeaae0cd561fbbfbb0 + WalletDefaultAddress: In=1, Out=2, CheckSum=5591bbacb9b8345a4a07a149c963df55 + WalletDelete: In=2, Out=1, CheckSum=98613e8d41d3a2e52b37f97908560bd9 + WalletExport: In=2, Out=2, CheckSum=5db217de8a3ec9ecbed4cc583e473991 + WalletHas: In=2, Out=2, CheckSum=9ad76c8c583af5b5a5d0202c0a3fe7f4 + WalletImport: In=2, Out=2, CheckSum=617245aaab0381dc634a2ad50dd440b3 + WalletList: In=1, Out=2, CheckSum=566d37a33889023f9f73c632fa4e8279 + WalletNew: In=2, Out=2, CheckSum=266c59d3ee6f0b3a66e8dbdec099ef06 + WalletSetDefault: In=2, Out=1, CheckSum=98613e8d41d3a2e52b37f97908560bd9 + WalletSign: In=3, Out=2, CheckSum=b5a8800f8b60b92c4d99e9cb0fcff086 + WalletSignMessage: In=3, Out=2, CheckSum=c9f0ea6123a189cfeab06ce6bdeda7b3 + WalletValidateAddress: In=2, Out=2, CheckSum=610b35f7aa87b32463e5c2a9b4bd3f55 + WalletVerify: In=4, Out=2, CheckSum=9b22231e95efdfb94d9d55038f500636 + diff --git a/venus-shared/compatible-checks/api-diff.txt b/venus-shared/compatible-checks/api-diff.txt new file mode 100644 index 0000000000..097ce4e202 --- /dev/null +++ b/venus-shared/compatible-checks/api-diff.txt @@ -0,0 +1,227 @@ +github.com/filecoin-project/venus/venus-shared/api/chain/v0.FullNode <> github.com/filecoin-project/lotus/api/v0api.FullNode: + - AuthNew + - AuthVerify + + BlockTime + - ChainGetNode + + ChainGetReceipts + + ChainList + + ChainSyncHandleNewTipSet + - ClientCalcCommP + - ClientCancelDataTransfer + - ClientCancelRetrievalDeal + - ClientDataTransferUpdates + - ClientDealPieceCID + - ClientDealSize + - ClientFindData + - ClientGenCar + - ClientGetDealInfo + - ClientGetDealStatus + - ClientGetDealUpdates + - ClientGetRetrievalUpdates + - ClientHasLocal + - ClientImport + - ClientListDataTransfers + - ClientListDeals + - ClientListImports + - ClientListRetrievals + - ClientMinerQueryOffer + - ClientQueryAsk + - ClientRemoveImport + - ClientRestartDataTransfer + - ClientRetrieve + - ClientRetrieveTryRestartInsufficientFunds + - ClientRetrieveWithEvents + - ClientStartDeal + - ClientStatelessDeal + - Closing + + Concurrent + - CreateBackup + - Discover + + GasBatchEstimateMessageGas + > GasEstimateMessageGas {[func(context.Context, *internal.Message, *types.MessageSendSpec, types.TipSetKey) (*internal.Message, error) <> func(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error)] base=func in type: #2 input; nested={[*types.MessageSendSpec <> *api.MessageSendSpec] base=pointed type; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=struct field; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=exported fields count: 3 != 2; nested=nil}}}} + + GetActor + + GetEntry + + GetFullBlock + + GetParentStateRootActor + + HasPassword + + ListActor + + LockWallet + - LogAlerts + - LogList + - LogSetLevel + - MarketAddBalance + - MarketGetReserved + - MarketReleaseFunds + - MarketReserveFunds + - MarketWithdraw + + MessageWait + > MpoolBatchPushMessage {[func(context.Context, []*internal.Message, *types.MessageSendSpec) ([]*types.SignedMessage, error) <> func(context.Context, []*types.Message, *api.MessageSendSpec) ([]*types.SignedMessage, error)] base=func in type: #2 input; nested={[*types.MessageSendSpec <> *api.MessageSendSpec] base=pointed type; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=struct field; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=exported fields count: 3 != 2; nested=nil}}}} + + MpoolDeleteByAdress + + MpoolPublishByAddr + + MpoolPublishMessage + > MpoolPushMessage {[func(context.Context, *internal.Message, *types.MessageSendSpec) (*types.SignedMessage, error) <> func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)] base=func in type: #2 input; nested={[*types.MessageSendSpec <> *api.MessageSendSpec] base=pointed type; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=struct field; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=exported fields count: 3 != 2; nested=nil}}}} + + MpoolSelects + > MsigCancel {[func(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) <> func(context.Context, address.Address, uint64, address.Address, big.Int, address.Address, uint64, []uint8) (cid.Cid, error)] base=func in num: 4 != 8; nested=nil} + + MsigCancelTxnHash + - MsigGetAvailableBalance + - MsigGetPending + - MsigGetVestingSchedule + - NetBlockAdd + - NetBlockList + - NetBlockRemove + + NetFindProvidersAsync + + NetGetClosestPeers + - NetLimit + - NetSetLimit + - NetStat + + ProtocolParameters + + ResolveToKeyAddr + - Session + + SetConcurrent + + SetPassword + - Shutdown + + StartTime + - StateAllMinerFaults + - StateChangedActors + - StateCompute + - StateDecodeParams + > StateGetNetworkParams {[func(context.Context) (*types.NetworkParams, error) <> func(context.Context) (*api.NetworkParams, error)] base=func out type: #0 input; nested={[*types.NetworkParams <> *api.NetworkParams] base=pointed type; nested={[types.NetworkParams <> api.NetworkParams] base=struct field; nested={[types.NetworkParams <> api.NetworkParams] base=exported field type: #5 field named ForkUpgradeParams; nested={[types.ForkUpgradeParams <> api.ForkUpgradeParams] base=struct field; nested={[types.ForkUpgradeParams <> api.ForkUpgradeParams] base=exported fields count: 21 != 22; nested=nil}}}}}} + - StateGetRandomnessFromBeacon + - StateGetRandomnessFromTickets + - StateListMessages + + StateMinerSectorSize + + StateMinerWorkerAddress + - StateReadState + - StateReplay + - SyncCheckBad + - SyncCheckpoint + - SyncIncomingBlocks + - SyncMarkBad + - SyncUnmarkAllBad + - SyncUnmarkBad + - SyncValidateTipset + + SyncerTracker + + UnLockWallet + + VerifyEntry + > Version {[func(context.Context) (types.Version, error) <> func(context.Context) (api.APIVersion, error)] base=func out type: #0 input; nested={[types.Version <> api.APIVersion] base=struct field; nested={[types.Version <> api.APIVersion] base=exported fields count: 2 != 3; nested=nil}}} + + WalletAddresses + > WalletExport {[func(context.Context, address.Address, string) (*types.KeyInfo, error) <> func(context.Context, address.Address) (*types.KeyInfo, error)] base=func in num: 3 != 2; nested=nil} + - WalletList + - WalletNew + + WalletNewAddress + > WalletSign {[func(context.Context, address.Address, []uint8, types.MsgMeta) (*crypto.Signature, error) <> func(context.Context, address.Address, []uint8) (*crypto.Signature, error)] base=func in num: 4 != 3; nested=nil} + + WalletState + - WalletValidateAddress + - WalletVerify + +github.com/filecoin-project/venus/venus-shared/api/chain/v1.FullNode <> github.com/filecoin-project/lotus/api.FullNode: + - AuthNew + - AuthVerify + + BlockTime + - ChainBlockstoreInfo + - ChainCheckBlockstore + - ChainGetNode + + ChainGetReceipts + + ChainList + - ChainPrune + + ChainSyncHandleNewTipSet + - ClientCalcCommP + - ClientCancelDataTransfer + - ClientCancelRetrievalDeal + - ClientDataTransferUpdates + - ClientDealPieceCID + - ClientDealSize + - ClientExport + - ClientFindData + - ClientGenCar + - ClientGetDealInfo + - ClientGetDealStatus + - ClientGetDealUpdates + - ClientGetRetrievalUpdates + - ClientHasLocal + - ClientImport + - ClientListDataTransfers + - ClientListDeals + - ClientListImports + - ClientListRetrievals + - ClientMinerQueryOffer + - ClientQueryAsk + - ClientRemoveImport + - ClientRestartDataTransfer + - ClientRetrieve + - ClientRetrieveTryRestartInsufficientFunds + - ClientRetrieveWait + - ClientStartDeal + - ClientStatelessDeal + - Closing + + Concurrent + - CreateBackup + - Discover + + GasBatchEstimateMessageGas + > GasEstimateMessageGas {[func(context.Context, *internal.Message, *types.MessageSendSpec, types.TipSetKey) (*internal.Message, error) <> func(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error)] base=func in type: #2 input; nested={[*types.MessageSendSpec <> *api.MessageSendSpec] base=pointed type; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=struct field; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=exported fields count: 3 != 2; nested=nil}}}} + + GetActor + + GetEntry + + GetFullBlock + + GetParentStateRootActor + + HasPassword + + ListActor + + LockWallet + - LogAlerts + - LogList + - LogSetLevel + - MarketAddBalance + - MarketGetReserved + - MarketReleaseFunds + - MarketReserveFunds + - MarketWithdraw + + MessageWait + > MpoolBatchPushMessage {[func(context.Context, []*internal.Message, *types.MessageSendSpec) ([]*types.SignedMessage, error) <> func(context.Context, []*types.Message, *api.MessageSendSpec) ([]*types.SignedMessage, error)] base=func in type: #2 input; nested={[*types.MessageSendSpec <> *api.MessageSendSpec] base=pointed type; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=struct field; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=exported fields count: 3 != 2; nested=nil}}}} + + MpoolDeleteByAdress + + MpoolPublishByAddr + + MpoolPublishMessage + > MpoolPushMessage {[func(context.Context, *internal.Message, *types.MessageSendSpec) (*types.SignedMessage, error) <> func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)] base=func in type: #2 input; nested={[*types.MessageSendSpec <> *api.MessageSendSpec] base=pointed type; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=struct field; nested={[types.MessageSendSpec <> api.MessageSendSpec] base=exported fields count: 3 != 2; nested=nil}}}} + + MpoolSelects + - MsigGetAvailableBalance + - MsigGetPending + - MsigGetVestingSchedule + - NetBlockAdd + - NetBlockList + - NetBlockRemove + + NetFindProvidersAsync + + NetGetClosestPeers + - NetLimit + - NetSetLimit + - NetStat + + ProtocolParameters + + ResolveToKeyAddr + - Session + + SetConcurrent + + SetPassword + - Shutdown + + StartTime + - StateCompute + > StateGetNetworkParams {[func(context.Context) (*types.NetworkParams, error) <> func(context.Context) (*api.NetworkParams, error)] base=func out type: #0 input; nested={[*types.NetworkParams <> *api.NetworkParams] base=pointed type; nested={[types.NetworkParams <> api.NetworkParams] base=struct field; nested={[types.NetworkParams <> api.NetworkParams] base=exported field type: #5 field named ForkUpgradeParams; nested={[types.ForkUpgradeParams <> api.ForkUpgradeParams] base=struct field; nested={[types.ForkUpgradeParams <> api.ForkUpgradeParams] base=exported fields count: 21 != 22; nested=nil}}}}}} + + StateMinerSectorSize + + StateMinerWorkerAddress + - StateReplay + - SyncCheckBad + - SyncCheckpoint + - SyncIncomingBlocks + - SyncMarkBad + - SyncUnmarkAllBad + - SyncUnmarkBad + - SyncValidateTipset + + SyncerTracker + + UnLockWallet + + VerifyEntry + > Version {[func(context.Context) (types.Version, error) <> func(context.Context) (api.APIVersion, error)] base=func out type: #0 input; nested={[types.Version <> api.APIVersion] base=struct field; nested={[types.Version <> api.APIVersion] base=exported fields count: 2 != 3; nested=nil}}} + + WalletAddresses + > WalletExport {[func(context.Context, address.Address, string) (*types.KeyInfo, error) <> func(context.Context, address.Address) (*types.KeyInfo, error)] base=func in num: 3 != 2; nested=nil} + - WalletList + - WalletNew + + WalletNewAddress + > WalletSign {[func(context.Context, address.Address, []uint8, types.MsgMeta) (*crypto.Signature, error) <> func(context.Context, address.Address, []uint8) (*crypto.Signature, error)] base=func in num: 4 != 3; nested=nil} + + WalletState + - WalletValidateAddress + - WalletVerify + diff --git a/venus-shared/compatible-checks/api-perm.txt b/venus-shared/compatible-checks/api-perm.txt new file mode 100644 index 0000000000..4fd6502b65 --- /dev/null +++ b/venus-shared/compatible-checks/api-perm.txt @@ -0,0 +1,93 @@ +v0: github.com/filecoin-project/venus/venus-shared/api/chain/v0 <> github.com/filecoin-project/lotus/api/v0api + - IBlockStore.ChainPutObj + - IActor.ListActor + - IChainInfo.BlockTime + - IChainInfo.ChainGetReceipts + - IChainInfo.ChainList + - IChainInfo.GetActor + - IChainInfo.GetEntry + - IChainInfo.GetFullBlock + - IChainInfo.GetParentStateRootActor + - IChainInfo.MessageWait + - IChainInfo.ProtocolParameters + - IChainInfo.ResolveToKeyAddr + - IChainInfo.VerifyEntry + - IMinerState.StateMinerSectorSize + - IMinerState.StateMinerWorkerAddress + - ICommon.StartTime + - ICommon.Version + - IMessagePool.GasBatchEstimateMessageGas + - IMessagePool.MpoolDeleteByAdress + - IMessagePool.MpoolPublishByAddr + - IMessagePool.MpoolPublishMessage + - IMessagePool.MpoolSelects + - IMultiSig.MsigCancelTxnHash + - INetwork.ID + - INetwork.NetAddrsListen + - INetwork.NetAgentVersion + - INetwork.NetAutoNatStatus + - INetwork.NetBandwidthStats + - INetwork.NetBandwidthStatsByPeer + - INetwork.NetBandwidthStatsByProtocol + - INetwork.NetConnect + - INetwork.NetConnectedness + - INetwork.NetDisconnect + - INetwork.NetFindPeer + - INetwork.NetFindProvidersAsync + - INetwork.NetGetClosestPeers + - INetwork.NetPeerInfo + - INetwork.NetPeers + - INetwork.NetPing + - INetwork.NetProtectAdd + - INetwork.NetProtectList + - INetwork.NetProtectRemove + - INetwork.NetPubsubScores + - ISyncer.ChainSyncHandleNewTipSet + - ISyncer.Concurrent + - ISyncer.SetConcurrent + - ISyncer.SyncerTracker + - IWallet.HasPassword + - IWallet.LockWallet + - IWallet.SetPassword + - IWallet.UnLockWallet + - IWallet.WalletAddresses + - IWallet.WalletNewAddress + - IWallet.WalletState + +v1: github.com/filecoin-project/venus/venus-shared/api/chain/v1 <> github.com/filecoin-project/lotus/api + - IActor.ListActor + - IChainInfo.BlockTime + - IChainInfo.ChainGetReceipts + - IChainInfo.ChainList + - IChainInfo.GetActor + - IChainInfo.GetEntry + - IChainInfo.GetFullBlock + - IChainInfo.GetParentStateRootActor + - IChainInfo.MessageWait + - IChainInfo.ProtocolParameters + - IChainInfo.ResolveToKeyAddr + - IChainInfo.VerifyEntry + - IMinerState.StateMinerSectorSize + - IMinerState.StateMinerWorkerAddress + - ICommon.StartTime + - IMessagePool.GasBatchEstimateMessageGas + - IMessagePool.MpoolDeleteByAdress + - IMessagePool.MpoolPublishByAddr + - IMessagePool.MpoolPublishMessage + - IMessagePool.MpoolSelects + > INetwork.NetConnect: admin <> Net.NetConnect: write + > INetwork.NetDisconnect: admin <> Net.NetDisconnect: write + - INetwork.NetFindProvidersAsync + - INetwork.NetGetClosestPeers + - ISyncer.ChainSyncHandleNewTipSet + - ISyncer.Concurrent + - ISyncer.SetConcurrent + - ISyncer.SyncerTracker + - IWallet.HasPassword + - IWallet.LockWallet + - IWallet.SetPassword + - IWallet.UnLockWallet + - IWallet.WalletAddresses + - IWallet.WalletNewAddress + - IWallet.WalletState + diff --git a/venus-shared/internal/actor.go b/venus-shared/internal/actor.go new file mode 100644 index 0000000000..12e53c521c --- /dev/null +++ b/venus-shared/internal/actor.go @@ -0,0 +1,59 @@ +package internal + +import ( + "errors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" +) + +var ErrActorNotFound = errors.New("actor not found") + +// Actor is the central abstraction of entities in the system. +// +// Both individual accounts, as well as contracts (user & system level) are +// represented as actors. An actor has the following core functionality implemented on a system level: +// - track a Filecoin balance, using the `Balance` field +// - execute code stored in the `Code` field +// - read & write memory +// - replay protection, using the `Nonce` field +// +// Value sent to a non-existent address will be tracked as an empty actor that has a Balance but +// nil Code and Memory. You must nil check Code cids before comparing them. +// +// More specific capabilities for individual accounts or contract specific must be implemented +// inside the code. +// +// Not safe for concurrent access. +type Actor struct { + // Code is a CID of the VM code for this actor's implementation (or a constant for actors implemented in Go code). + // Code may be nil for an uninitialized actor (which exists because it has received a balance). + Code cid.Cid + // Head is the CID of the root of the actor's state tree. + Head cid.Cid + // Nonce is the number expected on the next message from this actor. + // Messages are processed in strict, contiguous order. + Nonce uint64 + // Balance is the amount of attoFIL in the actor's account. + Balance abi.TokenAmount +} + +// NewActor constructs a new actor. +func NewActor(code cid.Cid, balance abi.TokenAmount, head cid.Cid) *Actor { + return &Actor{ + Code: code, + Nonce: 0, + Balance: balance, + Head: head, + } +} + +// Empty tests whether the actor's code is defined. +func (t *Actor) Empty() bool { + return !t.Code.Defined() +} + +// IncrementSeqNum increments the seq number. +func (t *Actor) IncrementSeqNum() { + t.Nonce = t.Nonce + 1 +} diff --git a/venus-shared/internal/bigint.go b/venus-shared/internal/bigint.go new file mode 100644 index 0000000000..5b1a467ad5 --- /dev/null +++ b/venus-shared/internal/bigint.go @@ -0,0 +1,59 @@ +package internal + +import ( + "fmt" + "math/big" + + big2 "github.com/filecoin-project/go-state-types/big" +) + +var EmptyInt = BigInt{} + +type BigInt = big2.Int + +func NewInt(i uint64) BigInt { + return BigInt{Int: big.NewInt(0).SetUint64(i)} +} + +func BigFromBytes(b []byte) BigInt { + i := big.NewInt(0).SetBytes(b) + return BigInt{Int: i} +} + +func BigFromString(s string) (BigInt, error) { + v, ok := big.NewInt(0).SetString(s, 10) + if !ok { + return BigInt{}, fmt.Errorf("failed to parse string as a big int") + } + + return BigInt{Int: v}, nil +} + +func BigMul(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Mul(a.Int, b.Int)} +} + +func BigDiv(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Div(a.Int, b.Int)} +} + +func BigDivFloat(num, den BigInt) float64 { + res, _ := new(big.Rat).SetFrac(num.Int, den.Int).Float64() + return res +} + +func BigMod(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Mod(a.Int, b.Int)} +} + +func BigAdd(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Add(a.Int, b.Int)} +} + +func BigSub(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Sub(a.Int, b.Int)} +} + +func BigCmp(a, b BigInt) int { + return a.Int.Cmp(b.Int) +} diff --git a/venus-shared/internal/bigint_fil.go b/venus-shared/internal/bigint_fil.go new file mode 100644 index 0000000000..466661f908 --- /dev/null +++ b/venus-shared/internal/bigint_fil.go @@ -0,0 +1,175 @@ +package internal + +import ( + "encoding" + "encoding/json" + "fmt" + "math/big" + "strings" + + fbig "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/types/params" +) + +var ZeroFIL = fbig.NewInt(0) + +type FIL BigInt + +func (f FIL) String() string { + return f.Unitless() + " FIL" +} + +var ( + AttoFil = NewInt(1) + FemtoFil = BigMul(AttoFil, NewInt(1000)) + PicoFil = BigMul(FemtoFil, NewInt(1000)) + NanoFil = BigMul(PicoFil, NewInt(1000)) +) + +func (f FIL) Unitless() string { + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(params.FilecoinPrecision))) + if r.Sign() == 0 { + return "0" + } + return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".") +} + +var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"} + +func (f FIL) Short() string { + n := BigInt(f).Abs() + + dn := uint64(1) + var prefix string + for _, p := range unitPrefixes { + if n.LessThan(NewInt(dn * 1000)) { + prefix = p + break + } + dn *= 1000 + } + + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(dn))) + if r.Sign() == 0 { + return "0" + } + + return strings.TrimRight(strings.TrimRight(r.FloatString(3), "0"), ".") + " " + prefix + "FIL" +} + +func (f FIL) Nano() string { + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(1e9))) + if r.Sign() == 0 { + return "0" + } + + return strings.TrimRight(strings.TrimRight(r.FloatString(9), "0"), ".") + " nFIL" +} + +func (f FIL) Format(s fmt.State, ch rune) { + switch ch { + case 's', 'v': + fmt.Fprint(s, f.String()) + default: + f.Int.Format(s, ch) + } +} + +func (f FIL) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f FIL) UnmarshalText(text []byte) error { + p, err := ParseFIL(string(text)) + if err != nil { + return err + } + + if f.Int == nil { + f.Int = big.NewInt(0) + } + + f.Int.Set(p.Int) + return nil +} + +func (f FIL) MarshalJSON() ([]byte, error) { + return []byte("\"" + f.String() + "\""), nil +} + +func (f *FIL) UnmarshalJSON(by []byte) error { + p, err := ParseFIL(strings.Trim(string(by), "\"")) + if err != nil { + return err + } + if f.Int != nil { + f.Int.Set(p.Int) + } else { + f.Int = p.Int + } + + return nil +} + +func ParseFIL(s string) (FIL, error) { + suffix := strings.TrimLeft(s, "-.1234567890") + s = s[:len(s)-len(suffix)] + var attofil bool + if suffix != "" { + norm := strings.ToLower(strings.TrimSpace(suffix)) + switch norm { + case "", "fil": + case "attofil", "afil": + attofil = true + default: + return FIL{}, fmt.Errorf("unrecognized suffix: %q", suffix) + } + } + + if len(s) > 50 { + return FIL{}, fmt.Errorf("string length too large: %d", len(s)) + } + + r, ok := new(big.Rat).SetString(s) + if !ok { + return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s) + } + + if !attofil { + r = r.Mul(r, big.NewRat(int64(params.FilecoinPrecision), 1)) + } + + if !r.IsInt() { + var pref string + if attofil { + pref = "atto" + } + return FIL{}, fmt.Errorf("invalid %sFIL value: %q", pref, s) + } + + return FIL{r.Num()}, nil +} + +func MustParseFIL(s string) FIL { + n, err := ParseFIL(s) + if err != nil { + panic(err) + } + + return n +} + +func FromFil(i uint64) BigInt { + return BigMul(NewInt(i), NewInt(params.FilecoinPrecision)) +} + +var ( + _ encoding.TextMarshaler = (*FIL)(nil) + _ encoding.TextUnmarshaler = (*FIL)(nil) +) + +var ( + _ json.Marshaler = (*FIL)(nil) + _ json.Unmarshaler = (*FIL)(nil) +) diff --git a/venus-shared/internal/cbor_gen.go b/venus-shared/internal/cbor_gen.go new file mode 100644 index 0000000000..707e903244 --- /dev/null +++ b/venus-shared/internal/cbor_gen.go @@ -0,0 +1,374 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package internal + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufActor = []byte{132} + +func (t *Actor) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufActor); err != nil { + return err + } + + // t.Code (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Code); err != nil { + return xerrors.Errorf("failed to write cid field t.Code: %w", err) + } + + // t.Head (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Head); err != nil { + return xerrors.Errorf("failed to write cid field t.Head: %w", err) + } + + // t.Nonce (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + // t.Balance (big.Int) (struct) + if err := t.Balance.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Actor) UnmarshalCBOR(r io.Reader) (err error) { + *t = Actor{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Code (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Code: %w", err) + } + + t.Code = c + + } + // t.Head (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Head: %w", err) + } + + t.Head = c + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + // t.Balance (big.Int) (struct) + + { + + if err := t.Balance.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Balance: %w", err) + } + + } + return nil +} + +var lengthBufMessage = []byte{138} + +func (t *Message) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMessage); err != nil { + return err + } + + // t.Version (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Version)); err != nil { + return err + } + + // t.To (address.Address) (struct) + if err := t.To.MarshalCBOR(cw); err != nil { + return err + } + + // t.From (address.Address) (struct) + if err := t.From.MarshalCBOR(cw); err != nil { + return err + } + + // t.Nonce (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + // t.Value (big.Int) (struct) + if err := t.Value.MarshalCBOR(cw); err != nil { + return err + } + + // t.GasLimit (int64) (int64) + if t.GasLimit >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.GasLimit)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.GasLimit-1)); err != nil { + return err + } + } + + // t.GasFeeCap (big.Int) (struct) + if err := t.GasFeeCap.MarshalCBOR(cw); err != nil { + return err + } + + // t.GasPremium (big.Int) (struct) + if err := t.GasPremium.MarshalCBOR(cw); err != nil { + return err + } + + // t.Method (abi.MethodNum) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Method)); err != nil { + return err + } + + // t.Params ([]uint8) (slice) + if len(t.Params) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Params was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Params))); err != nil { + return err + } + + if _, err := cw.Write(t.Params[:]); err != nil { + return err + } + return nil +} + +func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { + *t = Message{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 10 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Version (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Version = uint64(extra) + + } + // t.To (address.Address) (struct) + + { + + if err := t.To.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.To: %w", err) + } + + } + // t.From (address.Address) (struct) + + { + + if err := t.From.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.From: %w", err) + } + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + // t.Value (big.Int) (struct) + + { + + if err := t.Value.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Value: %w", err) + } + + } + // t.GasLimit (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.GasLimit = int64(extraI) + } + // t.GasFeeCap (big.Int) (struct) + + { + + if err := t.GasFeeCap.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.GasFeeCap: %w", err) + } + + } + // t.GasPremium (big.Int) (struct) + + { + + if err := t.GasPremium.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.GasPremium: %w", err) + } + + } + // t.Method (abi.MethodNum) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Method = abi.MethodNum(extra) + + } + // t.Params ([]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Params: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + return err + } + return nil +} diff --git a/venus-shared/internal/message.go b/venus-shared/internal/message.go new file mode 100644 index 0000000000..76edb48e8a --- /dev/null +++ b/venus-shared/internal/message.go @@ -0,0 +1,211 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/venus/venus-shared/types/params" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +const MessageVersion = 0 + +type ChainMsg interface { + Cid() cid.Cid + VMMessage() *Message + ToStorageBlock() (blocks.Block, error) + // FIXME: This is the *message* length, this name is misleading. + ChainLength() int + cbor.Marshaler + cbor.Unmarshaler +} + +func DecodeMessage(b []byte) (*Message, error) { + var msg Message + if err := msg.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, err + } + + if msg.Version != MessageVersion { + return nil, fmt.Errorf("decoded message had incorrect version (%d)", msg.Version) + } + + return &msg, nil +} + +type Message struct { + Version uint64 + + To address.Address + From address.Address + // When receiving a message from a user account the nonce in + // the message must match the expected nonce in the from actor. + // This prevents replay attacks. + Nonce uint64 + + Value abi.TokenAmount + + GasLimit int64 + GasFeeCap abi.TokenAmount + GasPremium abi.TokenAmount + + Method abi.MethodNum + Params []byte +} + +func (m *Message) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := m.MarshalCBOR(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (m *Message) SerializeWithCid() (cid.Cid, []byte, error) { + data, err := m.Serialize() + if err != nil { + return cid.Undef, nil, err + } + + c, err := abi.CidBuilder.Sum(data) + if err != nil { + return cid.Undef, nil, err + } + + return c, data, nil +} + +func (m *Message) ToStorageBlock() (blocks.Block, error) { + c, data, err := m.SerializeWithCid() + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) +} + +func (m *Message) Cid() cid.Cid { + c, _, err := m.SerializeWithCid() + if err != nil { + panic(err) + } + + return c +} + +func (m *Message) String() string { + errStr := "(error encoding Message)" + c, _, err := m.SerializeWithCid() + if err != nil { + return errStr + } + + js, err := json.MarshalIndent(m, "", " ") + if err != nil { + return errStr + } + + return fmt.Sprintf("Message cid=[%v]: %s", c, string(js)) +} + +func (m *Message) ChainLength() int { + ser, err := m.Serialize() + if err != nil { + panic(err) + } + + return len(ser) +} + +func (m *Message) Equals(o *Message) bool { + return m.Cid() == o.Cid() +} + +func (m *Message) EqualCall(o *Message) bool { + m1 := *m + m2 := *o + + m1.GasLimit, m2.GasLimit = 0, 0 + m1.GasFeeCap, m2.GasFeeCap = bigZero, bigZero + m1.GasPremium, m2.GasPremium = bigZero, bigZero + + return (&m1).Equals(&m2) +} + +func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) error { + if m.Version != 0 { + return fmt.Errorf("'Version' unsupported") + } + + if m.To == address.Undef { + return fmt.Errorf("'To' address cannot be empty") + } + + if m.To == ZeroAddress && version >= network.Version7 { + return fmt.Errorf("invalid 'To' address") + } + + if m.From == address.Undef { + return fmt.Errorf("'From' address cannot be empty") + } + + if m.Value.Int == nil { + return fmt.Errorf("'Value' cannot be nil") + } + + if m.Value.LessThan(bigZero) { + return fmt.Errorf("'Value' field cannot be negative") + } + + if m.Value.GreaterThan(TotalFilecoinInt) { + return fmt.Errorf("'Value' field cannot be greater than total filecoin supply") + } + + if m.GasFeeCap.Int == nil { + return fmt.Errorf("'GasFeeCap' cannot be nil") + } + + if m.GasFeeCap.LessThan(bigZero) { + return fmt.Errorf("'GasFeeCap' field cannot be negative") + } + + if m.GasPremium.Int == nil { + return fmt.Errorf("'GasPremium' cannot be nil") + } + + if m.GasPremium.LessThan(bigZero) { + return fmt.Errorf("'GasPremium' field cannot be negative") + } + + if m.GasPremium.GreaterThan(m.GasFeeCap) { + return fmt.Errorf("'GasFeeCap' less than 'GasPremium'") + } + + if m.GasLimit > params.BlockGasLimit { + return fmt.Errorf("'GasLimit' field cannot be greater than a block's gas limit") + } + + // since prices might vary with time, this is technically semantic validation + if m.GasLimit < minGas { + return fmt.Errorf("'GasLimit' field cannot be less than the cost of storing a message on chain %d < %d", m.GasLimit, minGas) + } + + return nil +} + +func (m *Message) VMMessage() *Message { + return m +} + +func (m *Message) RequiredFunds() abi.TokenAmount { + return abi.TokenAmount{Int: BigMul(BigInt{Int: m.GasFeeCap.Int}, NewInt(uint64(m.GasLimit))).Int} +} + +var _ ChainMsg = (*Message)(nil) diff --git a/venus-shared/internal/message_marshal.go b/venus-shared/internal/message_marshal.go new file mode 100644 index 0000000000..8ea050f404 --- /dev/null +++ b/venus-shared/internal/message_marshal.go @@ -0,0 +1,21 @@ +package internal + +import ( + "encoding/json" + + "github.com/ipfs/go-cid" +) + +type RawMessage Message + +type mCid struct { + CID cid.Cid + *RawMessage +} + +func (m *Message) MarshalJSON() ([]byte, error) { + return json.Marshal(&mCid{ + RawMessage: (*RawMessage)(m), + CID: m.Cid(), + }) +} diff --git a/venus-shared/internal/param.go b/venus-shared/internal/param.go new file mode 100644 index 0000000000..dd9d92c92f --- /dev/null +++ b/venus-shared/internal/param.go @@ -0,0 +1,22 @@ +package internal + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/venus-shared/types/params" +) + +var bigZero = big.Zero() + +var TotalFilecoinInt = FromFil(params.FilBase) + +var ZeroAddress = func() address.Address { + addr := "f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a" + + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +}() diff --git a/venus-shared/libp2p/exchange/cbor_gen.go b/venus-shared/libp2p/exchange/cbor_gen.go new file mode 100644 index 0000000000..ea19b5e83c --- /dev/null +++ b/venus-shared/libp2p/exchange/cbor_gen.go @@ -0,0 +1,674 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package exchange + +import ( + "fmt" + "io" + "math" + "sort" + + internal "github.com/filecoin-project/venus/venus-shared/internal" + types "github.com/filecoin-project/venus/venus-shared/types" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufRequest = []byte{131} + +func (t *Request) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufRequest); err != nil { + return err + } + + // t.Head ([]cid.Cid) (slice) + if len(t.Head) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Head was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Head))); err != nil { + return err + } + for _, v := range t.Head { + if err := cbg.WriteCid(w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Head: %w", err) + } + } + + // t.Length (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Length)); err != nil { + return err + } + + // t.Options (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Options)); err != nil { + return err + } + + return nil +} + +func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { + *t = Request{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Head ([]cid.Cid) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Head: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Head = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("reading cid field t.Head failed: %w", err) + } + t.Head[i] = c + } + + // t.Length (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = uint64(extra) + + } + // t.Options (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Options = uint64(extra) + + } + return nil +} + +var lengthBufResponse = []byte{131} + +func (t *Response) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufResponse); err != nil { + return err + } + + // t.Status (exchange.status) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.ErrorMessage (string) (string) + if len(t.ErrorMessage) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ErrorMessage was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ErrorMessage))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ErrorMessage)); err != nil { + return err + } + + // t.Chain ([]*exchange.BSTipSet) (slice) + if len(t.Chain) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Chain was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Chain))); err != nil { + return err + } + for _, v := range t.Chain { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { + *t = Response{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Status (exchange.status) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = status(extra) + + } + // t.ErrorMessage (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ErrorMessage = string(sval) + } + // t.Chain ([]*exchange.BSTipSet) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Chain: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Chain = make([]*BSTipSet, extra) + } + + for i := 0; i < int(extra); i++ { + + var v BSTipSet + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Chain[i] = &v + } + + return nil +} + +var lengthBufCompactedMessages = []byte{132} + +func (t *CompactedMessages) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufCompactedMessages); err != nil { + return err + } + + // t.Bls ([]*internal.Message) (slice) + if len(t.Bls) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Bls was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Bls))); err != nil { + return err + } + for _, v := range t.Bls { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.BlsIncludes ([][]uint64) (slice) + if len(t.BlsIncludes) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.BlsIncludes was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.BlsIncludes))); err != nil { + return err + } + for _, v := range t.BlsIncludes { + if len(v) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field v was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(v))); err != nil { + return err + } + for _, v := range v { + if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + } + + // t.Secpk ([]*types.SignedMessage) (slice) + if len(t.Secpk) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Secpk was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Secpk))); err != nil { + return err + } + for _, v := range t.Secpk { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.SecpkIncludes ([][]uint64) (slice) + if len(t.SecpkIncludes) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.SecpkIncludes was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.SecpkIncludes))); err != nil { + return err + } + for _, v := range t.SecpkIncludes { + if len(v) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field v was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(v))); err != nil { + return err + } + for _, v := range v { + if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + } + return nil +} + +func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) { + *t = CompactedMessages{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Bls ([]*internal.Message) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Bls: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Bls = make([]*internal.Message, extra) + } + + for i := 0; i < int(extra); i++ { + + var v internal.Message + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Bls[i] = &v + } + + // t.BlsIncludes ([][]uint64) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BlsIncludes: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BlsIncludes = make([][]uint64, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BlsIncludes[i]: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BlsIncludes[i] = make([]uint64, extra) + } + + for j := 0; j < int(extra); j++ { + + maj, val, err := cr.ReadHeader() + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.BlsIncludes[i] slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.BlsIncludes[i] was not a uint, instead got %d", maj) + } + + t.BlsIncludes[i][j] = uint64(val) + } + + } + } + + // t.Secpk ([]*types.SignedMessage) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Secpk: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Secpk = make([]*types.SignedMessage, extra) + } + + for i := 0; i < int(extra); i++ { + + var v types.SignedMessage + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Secpk[i] = &v + } + + // t.SecpkIncludes ([][]uint64) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.SecpkIncludes: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.SecpkIncludes = make([][]uint64, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.SecpkIncludes[i]: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.SecpkIncludes[i] = make([]uint64, extra) + } + + for j := 0; j < int(extra); j++ { + + maj, val, err := cr.ReadHeader() + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.SecpkIncludes[i] slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.SecpkIncludes[i] was not a uint, instead got %d", maj) + } + + t.SecpkIncludes[i][j] = uint64(val) + } + + } + } + + return nil +} + +var lengthBufBSTipSet = []byte{130} + +func (t *BSTipSet) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBSTipSet); err != nil { + return err + } + + // t.Blocks ([]*types.BlockHeader) (slice) + if len(t.Blocks) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Blocks was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Blocks))); err != nil { + return err + } + for _, v := range t.Blocks { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.Messages (exchange.CompactedMessages) (struct) + if err := t.Messages.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) { + *t = BSTipSet{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Blocks ([]*types.BlockHeader) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Blocks: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Blocks = make([]*types.BlockHeader, extra) + } + + for i := 0; i < int(extra); i++ { + + var v types.BlockHeader + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Blocks[i] = &v + } + + // t.Messages (exchange.CompactedMessages) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Messages = new(CompactedMessages) + if err := t.Messages.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Messages pointer: %w", err) + } + } + + } + return nil +} diff --git a/venus-shared/libp2p/exchange/client.go b/venus-shared/libp2p/exchange/client.go new file mode 100644 index 0000000000..3f84ec791d --- /dev/null +++ b/venus-shared/libp2p/exchange/client.go @@ -0,0 +1,35 @@ +package exchange + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +// Client is the requesting side of the ChainExchange protocol. It acts as +// a proxy for other components to request chain data from peers. It is chiefly +// used by the Syncer. +type Client interface { + // GetBlocks fetches block headers from the network, from the provided + // tipset *backwards*, returning as many tipsets as the count parameter, + // or less. + GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) + + // GetChainMessages fetches messages from the network, starting from the first provided tipset + // and returning messages from as many tipsets as requested or less. + GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*CompactedMessages, error) + + // GetFullTipSet fetches a full tipset from a given peer. If successful, + // the fetched object contains block headers and all messages in full form. + GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*types.FullTipSet, error) + + // AddPeer adds a peer to the pool of peers that the Client requests + // data from. + AddPeer(ctx context.Context, peer peer.ID) + + // RemovePeer removes a peer from the pool of peers that the Client + // requests data from. + RemovePeer(ctx context.Context, peer peer.ID) +} diff --git a/venus-shared/libp2p/exchange/exchange.go b/venus-shared/libp2p/exchange/exchange.go new file mode 100644 index 0000000000..af2e8e8711 --- /dev/null +++ b/venus-shared/libp2p/exchange/exchange.go @@ -0,0 +1,150 @@ +package exchange + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/actors/policy" + "github.com/filecoin-project/venus/venus-shared/types" +) + +const ( + // ChainExchangeProtocolID is the protocol ID of the chain exchange + // protocol. + ChainExchangeProtocolID = "/fil/chain/xchg/0.0.1" +) + +// FIXME: Bumped from original 800 to this to accommodate `syncFork()` +// use of `GetBlocks()`. It seems the expectation of that API is to +// fetch any amount of blocks leaving it to the internal logic here +// to partition and reassemble the requests if they go above the maximum. +// (Also as a consequence of this temporarily removing the `const` +// qualifier to avoid "const initializer [...] is not a constant" error.) +var MaxRequestLength = uint64(policy.ChainFinality) + +// FIXME: Rename. Make private. +type Request struct { + // List of ordered CIDs comprising a `TipSetKey` from where to start + // fetching backwards. + // FIXME: Consider using `TipSetKey` now (introduced after the creation + // of this protocol) instead of converting back and forth. + Head []cid.Cid + // Number of block sets to fetch from `Head` (inclusive, should always + // be in the range `[1, MaxRequestLength]`). + Length uint64 + // Request options, see `Options` type for more details. Compressed + // in a single `uint64` to save space. + Options uint64 +} + +// Request options. When fetching the chain segment we can fetch +// either block headers, messages, or both. +const ( + Headers = 1 << iota + Messages +) + +// Decompressed options into separate struct members for easy access +// during internal processing.. +type Options struct { + IncludeHeaders bool + IncludeMessages bool +} + +func (opt *Options) IsEmpty() bool { + return !opt.IncludeHeaders && !opt.IncludeMessages +} + +func (opt *Options) ToBits() uint64 { + var bits uint64 + if opt.IncludeHeaders { + bits |= Headers + } + + if opt.IncludeMessages { + bits |= Messages + } + return bits +} + +func ParseOptions(optfield uint64) *Options { + return &Options{ + IncludeHeaders: optfield&(uint64(Headers)) != 0, + IncludeMessages: optfield&(uint64(Messages)) != 0, + } +} + +// FIXME: Rename. Make private. +type Response struct { + Status status + // String that complements the error status when converting to an + // internal error (see `statusToError()`). + ErrorMessage string + + Chain []*BSTipSet +} + +type status uint64 + +const ( + Ok status = 0 + // We could not fetch all blocks requested (but at least we returned + // the `Head` requested). Not considered an error. + Partial = 101 + + // Errors + NotFound = 201 + GoAway = 202 + InternalError = 203 + BadRequest = 204 +) + +// Convert status to internal error. +func (res *Response) StatusToError() error { + switch res.Status { + case Ok, Partial: + return nil + // FIXME: Consider if we want to not process `Partial` responses + // and return an error instead. + case NotFound: + return fmt.Errorf("not found") + case GoAway: + return fmt.Errorf("not handling 'go away' chainxchg responses yet") + case InternalError: + return fmt.Errorf("block sync peer errored: %s", res.ErrorMessage) + case BadRequest: + return fmt.Errorf("block sync request invalid: %s", res.ErrorMessage) + default: + return fmt.Errorf("unrecognized response code: %d", res.Status) + } +} + +// FIXME: Rename. +type BSTipSet struct { + // List of blocks belonging to a single tipset to which the + // `CompactedMessages` are linked. + Blocks []*types.BlockHeader + Messages *CompactedMessages +} + +// All messages of a single tipset compacted together instead +// of grouped by block to save space, since there are normally +// many repeated messages per tipset in different blocks. +// +// `BlsIncludes`/`SecpkIncludes` matches `Bls`/`Secpk` messages +// to blocks in the tipsets with the format: +// `BlsIncludes[BI][MI]` +// - BI: block index in the tipset. +// - MI: message index in `Bls` list +// +// FIXME: The logic to decompress this structure should belong +// +// to itself, not to the consumer. +type CompactedMessages struct { + Bls []*types.Message + BlsIncludes [][]uint64 + + Secpk []*types.SignedMessage + SecpkIncludes [][]uint64 +} diff --git a/venus-shared/libp2p/hello/cbor_gen.go b/venus-shared/libp2p/hello/cbor_gen.go new file mode 100644 index 0000000000..8a05f11425 --- /dev/null +++ b/venus-shared/libp2p/hello/cbor_gen.go @@ -0,0 +1,287 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package hello + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufGreetingMessage = []byte{132} + +func (t *GreetingMessage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufGreetingMessage); err != nil { + return err + } + + // t.HeaviestTipSet ([]cid.Cid) (slice) + if len(t.HeaviestTipSet) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.HeaviestTipSet was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.HeaviestTipSet))); err != nil { + return err + } + for _, v := range t.HeaviestTipSet { + if err := cbg.WriteCid(w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.HeaviestTipSet: %w", err) + } + } + + // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) + if t.HeaviestTipSetHeight >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.HeaviestTipSetHeight)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.HeaviestTipSetHeight-1)); err != nil { + return err + } + } + + // t.HeaviestTipSetWeight (big.Int) (struct) + if err := t.HeaviestTipSetWeight.MarshalCBOR(cw); err != nil { + return err + } + + // t.GenesisHash (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.GenesisHash); err != nil { + return xerrors.Errorf("failed to write cid field t.GenesisHash: %w", err) + } + + return nil +} + +func (t *GreetingMessage) UnmarshalCBOR(r io.Reader) (err error) { + *t = GreetingMessage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.HeaviestTipSet ([]cid.Cid) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.HeaviestTipSet: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.HeaviestTipSet = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("reading cid field t.HeaviestTipSet failed: %w", err) + } + t.HeaviestTipSet[i] = c + } + + // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.HeaviestTipSetHeight = abi.ChainEpoch(extraI) + } + // t.HeaviestTipSetWeight (big.Int) (struct) + + { + + if err := t.HeaviestTipSetWeight.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.HeaviestTipSetWeight: %w", err) + } + + } + // t.GenesisHash (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.GenesisHash: %w", err) + } + + t.GenesisHash = c + + } + return nil +} + +var lengthBufLatencyMessage = []byte{130} + +func (t *LatencyMessage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufLatencyMessage); err != nil { + return err + } + + // t.TArrival (int64) (int64) + if t.TArrival >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TArrival)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TArrival-1)); err != nil { + return err + } + } + + // t.TSent (int64) (int64) + if t.TSent >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TSent)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TSent-1)); err != nil { + return err + } + } + return nil +} + +func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) (err error) { + *t = LatencyMessage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.TArrival (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TArrival = int64(extraI) + } + // t.TSent (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TSent = int64(extraI) + } + return nil +} diff --git a/venus-shared/libp2p/hello/hello.go b/venus-shared/libp2p/hello/hello.go new file mode 100644 index 0000000000..e62af590f5 --- /dev/null +++ b/venus-shared/libp2p/hello/hello.go @@ -0,0 +1,28 @@ +package hello + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" +) + +var ErrBadGenesis = fmt.Errorf("bad genesis block") + +const ProtocolID = "/fil/hello/1.0.0" + +// GreetingMessage is the data structure of a single message in the hello protocol. +type GreetingMessage struct { + HeaviestTipSet []cid.Cid + HeaviestTipSetHeight abi.ChainEpoch + HeaviestTipSetWeight big.Int + GenesisHash cid.Cid +} + +// LatencyMessage is written in response to a hello message for measuring peer +// latency. +type LatencyMessage struct { + TArrival int64 + TSent int64 +} diff --git a/venus-shared/libp2p/hello/hello_test.go b/venus-shared/libp2p/hello/hello_test.go new file mode 100644 index 0000000000..844048fc64 --- /dev/null +++ b/venus-shared/libp2p/hello/hello_test.go @@ -0,0 +1,81 @@ +package hello + +import ( + "bytes" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/big" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestGreetingMessage(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + sliceLen := 5 + + for i := 0; i < 32; i++ { + var src, dst GreetingMessage + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + }, + + ProvideOpts: []interface{}{ + testutil.WithSliceLen(sliceLen), + testutil.PositiveBigProvider(), + }, + + Provided: func() { + require.Len(t, src.HeaviestTipSet, sliceLen, "HeaviestTipSet length") + require.True(t, src.HeaviestTipSetWeight.GreaterThan(big.Zero()), "positive HeaviestTipSetWeight") + require.NotEqual(t, src.GenesisHash, cid.Undef, "GenesisHash") + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} + +func TestLatencyMessage(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + + for i := 0; i < 32; i++ { + var src, dst LatencyMessage + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + }, + + ProvideOpts: []interface{}{ + testutil.IntRangedProvider(100, 200), + }, + + Provided: func() { + require.GreaterOrEqual(t, src.TArrival, int64(100), "LatencyMessage.TArrival min") + require.Less(t, src.TArrival, int64(200), "LatencyMessage.TArrival max") + + require.GreaterOrEqual(t, src.TSent, int64(100), "LatencyMessage.TSent min") + require.Less(t, src.TSent, int64(200), "LatencyMessage.TSent max") + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/libp2p/peer_mgr.go b/venus-shared/libp2p/peer_mgr.go new file mode 100644 index 0000000000..41d4c3fb90 --- /dev/null +++ b/venus-shared/libp2p/peer_mgr.go @@ -0,0 +1,27 @@ +package libp2p + +import ( + "context" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +type FilPeerEvtType int + +const ( + AddFilPeerEvt FilPeerEvtType = iota + RemoveFilPeerEvt +) + +type FilPeerEvent struct { + Type FilPeerEvtType + ID peer.ID +} + +type PeerManager interface { + AddFilecoinPeer(ctx context.Context, p peer.ID) + GetPeerLatency(ctx context.Context, p peer.ID) (time.Duration, bool) + SetPeerLatency(ctx context.Context, p peer.ID, latency time.Duration) + Disconnect(ctx context.Context, p peer.ID) +} diff --git a/venus-shared/localstore/chain.go b/venus-shared/localstore/chain.go new file mode 100644 index 0000000000..2c5b118138 --- /dev/null +++ b/venus-shared/localstore/chain.go @@ -0,0 +1,32 @@ +package localstore + +import ( + "context" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +type TipSetLoader interface { + GetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +type MessageLoader interface { + ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) + + LoadMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.Message, error) + LoadSignedMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.SignedMessage, error) +} + +type ChainLoader interface { + TipSetLoader + MessageLoader +} + +type FullTipSetLoader interface { + LoadFullTipSet(ctx context.Context, tsk types.TipSetKey) (*types.FullTipSet, error) +} + +type FullTipSetStorer interface { + StoreFullTipSet(ctx context.Context, fb *types.FullTipSet) error +} diff --git a/venus-shared/logging/logger.go b/venus-shared/logging/logger.go new file mode 100644 index 0000000000..38fccea19c --- /dev/null +++ b/venus-shared/logging/logger.go @@ -0,0 +1,35 @@ +package logging + +import ( + "context" + + logging "github.com/ipfs/go-log" + "go.uber.org/zap" +) + +type contextKey string + +var ctxKey contextKey = "logger" + +type ( + EventLogger = logging.ZapEventLogger + TaggedLogger = zap.SugaredLogger +) + +var New = logging.Logger + +func ContextWithLogger(parent context.Context, l *TaggedLogger) context.Context { + return context.WithValue(parent, ctxKey, l) +} + +func LoggerFromContext(ctx context.Context, fallback *EventLogger) *TaggedLogger { + val := ctx.Value(ctxKey) + if val != nil { + l, ok := val.(*TaggedLogger) + if ok && l != nil { + return l + } + } + + return &fallback.SugaredLogger +} diff --git a/venus-shared/testutil/cbor_basic.go b/venus-shared/testutil/cbor_basic.go new file mode 100644 index 0000000000..4686fd5b1f --- /dev/null +++ b/venus-shared/testutil/cbor_basic.go @@ -0,0 +1,45 @@ +package testutil + +import ( + "bytes" + "testing" + + "github.com/filecoin-project/go-state-types/cbor" + "github.com/stretchr/testify/require" +) + +type CborErBasicTestOptions struct { + Buf *bytes.Buffer + Prepare func() + ProvideOpts []interface{} + Provided func() + Marshaled func(data []byte) + Finished func() +} + +func CborErBasicTest(t *testing.T, src, dst cbor.Er, opts CborErBasicTestOptions) { + if opts.Prepare != nil { + opts.Prepare() + } + + Provide(t, src, opts.ProvideOpts...) + if opts.Provided != nil { + opts.Provided() + } + + opts.Buf.Reset() + + err := src.MarshalCBOR(opts.Buf) + require.NoErrorf(t, err, "marshal from src of %T", src) + + if opts.Marshaled != nil { + opts.Marshaled(opts.Buf.Bytes()) + } + + err = dst.UnmarshalCBOR(opts.Buf) + require.NoErrorf(t, err, "unmarshal to dst of %T", dst) + + if opts.Finished != nil { + opts.Finished() + } +} diff --git a/venus-shared/testutil/cbor_basic_test.go b/venus-shared/testutil/cbor_basic_test.go new file mode 100644 index 0000000000..37972d83fb --- /dev/null +++ b/venus-shared/testutil/cbor_basic_test.go @@ -0,0 +1,72 @@ +package testutil + +import ( + "bytes" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestCborBasicForAddress(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + for i := 0; i < 16; i++ { + var src, dst address.Address + opt := CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, address.Undef, "empty address") + require.Equal(t, src, dst, "empty cid") + }, + + Provided: func() { + require.NotEqual(t, src, dst, "address value provided") + }, + + Marshaled: func(b []byte) { + t.Logf("marshaled callback called with %d bytes", len(b)) + }, + + Finished: func() { + require.Equal(t, src, dst) + require.NotEqual(t, src, address.Undef, "must not be address.Undef") + }, + } + + CborErBasicTest(t, &src, &dst, opt) + } +} + +func TestCborBasicForIDAddress(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + for i := 0; i < 16; i++ { + var src, dst address.Address + opt := CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, address.Undef, "empty address") + require.Equal(t, src, dst, "empty cid") + }, + + ProvideOpts: []interface{}{ + IDAddressProvider(), + }, + + Provided: func() { + require.NotEqual(t, src, dst, "address value provided") + require.Equal(t, src.Protocol(), address.ID, "must be id address") + }, + + Finished: func() { + require.Equal(t, src, dst) + require.NotEqual(t, src, address.Undef, "must not be address.Undef") + }, + } + + CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/testutil/value_provdier_chain_test.go b/venus-shared/testutil/value_provdier_chain_test.go new file mode 100644 index 0000000000..6f709edfb5 --- /dev/null +++ b/venus-shared/testutil/value_provdier_chain_test.go @@ -0,0 +1,135 @@ +package testutil + +import ( + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +func TestDefaultCid(t *testing.T) { + tf.UnitTest(t) + var c cid.Cid + Provide(t, &c) + require.NotEqual(t, cid.Undef, c) +} + +func TestDefaultCidSlice(t *testing.T) { + tf.UnitTest(t) + cs := make([]cid.Cid, 16) + Provide(t, &cs) + for ci := range cs { + require.NotEqual(t, cid.Undef, cs[ci]) + } +} + +func TestDefaultAddresses(t *testing.T) { + tf.UnitTest(t) + addrs := make([]address.Address, 256) + protos := map[address.Protocol]struct{}{} + Provide(t, &addrs) + for i := range addrs { + protos[addrs[i].Protocol()] = struct{}{} + } + + require.True(t, len(protos) == 4) +} + +func TestDefaultIDAddresses(t *testing.T) { + tf.UnitTest(t) + addrs := make([]address.Address, 256) + protos := map[address.Protocol]struct{}{} + Provide(t, &addrs, IDAddressProvider()) + for i := range addrs { + protos[addrs[i].Protocol()] = struct{}{} + } + + require.True(t, len(protos) == 1) +} + +func TestDefaultBigs(t *testing.T) { + tf.UnitTest(t) + bigs := make([]big.Int, 256) + Provide(t, &bigs) + hasPositive := false + hasNegative := false + for bi := range bigs { + require.NotNil(t, bigs[bi].Int) + hasPositive = hasPositive || bigs[bi].GreaterThan(bigZero) + hasNegative = hasNegative || bigs[bi].LessThan(bigZero) + } + + require.True(t, hasPositive) + require.True(t, hasNegative) +} + +func TestPositiveBigs(t *testing.T) { + tf.UnitTest(t) + bigs := make([]big.Int, 256) + Provide(t, &bigs, PositiveBigProvider()) + for bi := range bigs { + require.NotNil(t, bigs[bi].Int) + require.True(t, bigs[bi].GreaterThan(bigZero)) + } +} + +func TestNegativeBigs(t *testing.T) { + tf.UnitTest(t) + + bigs := make([]big.Int, 256) + Provide(t, &bigs, NegativeBigProvider()) + for bi := range bigs { + require.NotNil(t, bigs[bi].Int) + require.True(t, bigs[bi].LessThan(bigZero)) + } +} + +func TestDefaultSigTypes(t *testing.T) { + tf.UnitTest(t) + + sigtyps := make([]crypto.SigType, 256) + Provide(t, &sigtyps) + typs := map[crypto.SigType]struct{}{} + for i := range sigtyps { + typs[sigtyps[i]] = struct{}{} + } + + require.True(t, len(typs) == 2) +} + +func TestDefaultPaddedSize(t *testing.T) { + tf.UnitTest(t) + + psizes := make([]abi.PaddedPieceSize, 32) + Provide(t, &psizes) + for i := range psizes { + require.NoErrorf(t, psizes[i].Validate(), "invalid padded size %d", psizes[i]) + } +} + +func TestFixedPaddedSize(t *testing.T) { + tf.UnitTest(t) + + shifts := make([]int, 32) + Provide(t, &shifts, IntRangedProvider(1, 50)) + for si := range shifts { + var ps abi.PaddedPieceSize + Provide(t, &ps, PaddedSizeFixedProvider(128<= 1, got %d", size) + } + + r.opt.sliceLen = &size + } +} + +func Provide(t *testing.T, dst interface{}, options ...interface{}) { + rval := reflect.ValueOf(dst) + if kind := rval.Kind(); kind != reflect.Ptr { + t.Fatalf("value provider can only be applied on to poniters, got %T", dst) + } + + reg := defaultValueProviderRegistry + if len(options) > 0 { + reg = defaultValueProviderRegistry.clone() + for fni := range options { + fn := options[fni] + if opt, ok := fn.(OptionFunc); ok { + opt(t, reg) + continue + } + + if err := reg.register(fn); err != nil { + t.Fatalf("register specified provider %T for %T: %s", fn, dst, err) + } + } + } + + reg.provide(t, rval.Elem()) +} + +func MustRegisterDefaultValueProvier(fn interface{}) { + if err := RegisterDefaultValueProvier(fn); err != nil { + panic(fmt.Errorf("register default value provider %T: %w", fn, err)) + } +} + +func RegisterDefaultValueProvier(fn interface{}) error { + return defaultValueProviderRegistry.register(fn) +} + +var defaultValueProviderRegistry = &valueProviderRegistry{ + providers: map[reflect.Type]reflect.Value{}, +} + +type valueProviderRegistry struct { + sync.RWMutex + providers map[reflect.Type]reflect.Value + + opt struct { + sliceLen *int + } +} + +func (r *valueProviderRegistry) clone() *valueProviderRegistry { + cloned := &valueProviderRegistry{ + providers: map[reflect.Type]reflect.Value{}, + opt: r.opt, + } + + r.Lock() + for rt, rv := range r.providers { + cloned.providers[rt] = rv + } + r.Unlock() + + return cloned +} + +func (r *valueProviderRegistry) register(fn interface{}) error { + rval := reflect.ValueOf(fn) + rtyp := rval.Type() + + if rkind := rtyp.Kind(); rkind != reflect.Func { + return fmt.Errorf("expected provider func, got %s", rkind) + } + + if numIn := rtyp.NumIn(); numIn != 1 { + return fmt.Errorf("expected provider func with 1 in, got %d", numIn) + } + + if numOut := rtyp.NumOut(); numOut != 1 { + return fmt.Errorf("expected provider func with 1 out, got %d", numOut) + } + + if inTyp := rtyp.In(0); inTyp != typeT { + return fmt.Errorf("expected provider's in type to be *testing.T, got %s", inTyp) + } + + outTyp := rtyp.Out(0) + r.Lock() + r.providers[outTyp] = rval + r.Unlock() + + return nil +} + +func (r *valueProviderRegistry) has(want reflect.Type) bool { + r.RLock() + _, has := r.providers[want] + r.RUnlock() + + return has +} + +func (r *valueProviderRegistry) provide(t *testing.T, rval reflect.Value) { + rtyp := rval.Type() + if !rval.CanSet() { + return + } + + r.RLock() + provider, ok := r.providers[rtyp] + r.RUnlock() + if ok { + ret := provider.Call([]reflect.Value{reflect.ValueOf(t)}) + rval.Set(ret[0]) + return + } + + rkind := rtyp.Kind() + switch rkind { + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64, + reflect.Float32, + reflect.Float64: + + r.RLock() + provider, ok = r.providers[typeInt] + r.RUnlock() + } + + if ok { + ret := provider.Call([]reflect.Value{reflect.ValueOf(t)}) + rval.Set(ret[0].Convert(rtyp)) + return + } + + r.RLock() + var convertor reflect.Value + for pt := range r.providers { + if pt.ConvertibleTo(rtyp) { + convertor = r.providers[pt] + break + } + } + r.RUnlock() + + if convertor.IsValid() { + ret := convertor.Call([]reflect.Value{reflect.ValueOf(t)}) + rval.Set(ret[0].Convert(rtyp)) + return + } + + switch rkind { + case reflect.Slice: + if rval.IsNil() || rval.Len() == 0 { + size := 1 + if r.opt.sliceLen != nil { + size = *r.opt.sliceLen + } + + rval.Set(reflect.MakeSlice(rtyp, size, size)) + } + + for i := 0; i < rval.Len(); i++ { + r.provide(t, rval.Index(i)) + } + + return + + case reflect.Array: + for i := 0; i < rval.Len(); i++ { + r.provide(t, rval.Index(i)) + } + + return + + case reflect.Ptr: + if rval.IsNil() { + rval.Set(reflect.New(rtyp.Elem())) + } + + r.provide(t, rval.Elem()) + return + + case reflect.Struct: + for i := 0; i < rval.NumField(); i++ { + fieldVal := rval.Field(i) + r.provide(t, fieldVal) + } + + return + } +} diff --git a/venus-shared/testutil/value_provider_registry_test.go b/venus-shared/testutil/value_provider_registry_test.go new file mode 100644 index 0000000000..3a8b69b1c3 --- /dev/null +++ b/venus-shared/testutil/value_provider_registry_test.go @@ -0,0 +1,26 @@ +package testutil + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/require" +) + +func TestInvalidProviders(t *testing.T) { + tf.UnitTest(t) + + vals := []interface{}{ + int(0), + float32(0), + func() {}, + func(t *testing.T) {}, + func() int { return 1 }, + func(int) int { return 1 }, + } + + for ri := range vals { + err := defaultValueProviderRegistry.register(vals[ri]) + require.Errorf(t, err, "value #%d", ri) + } +} diff --git a/venus-shared/testutil/value_set_n_reset.go b/venus-shared/testutil/value_set_n_reset.go new file mode 100644 index 0000000000..b9a82047d3 --- /dev/null +++ b/venus-shared/testutil/value_set_n_reset.go @@ -0,0 +1,41 @@ +package testutil + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func ValueSetNReset(t *testing.T, name string, onSet func(), onReset func(), vsets ...interface{}) { + psize := len(vsets) + require.Greaterf(t, psize, 0, "value sets should not be empty for case %s", name) + require.Truef(t, psize%2 == 0, "params count should be odd for case %s", name) + + ptrs := make([]reflect.Value, psize/2) + originalVals := make([]reflect.Value, psize/2) + for i := 0; i < psize/2; i++ { + pi := i * 2 + ptr := reflect.ValueOf(vsets[pi]) + require.Equalf(t, ptr.Type().Kind(), reflect.Ptr, "#%d param should be pointer to the target value for case %s", i, name) + ptrs[i] = ptr + + originVal := reflect.New(ptr.Elem().Type()) + originVal.Elem().Set(ptr.Elem()) + + originalVals[i] = originVal + ptr.Elem().Set(reflect.ValueOf(vsets[pi+1])) + } + + if onSet != nil { + onSet() + } + + for i := range ptrs { + ptrs[i].Elem().Set(originalVals[i].Elem()) + } + + if onReset != nil { + onReset() + } +} diff --git a/venus-shared/testutil/value_set_n_reset_test.go b/venus-shared/testutil/value_set_n_reset_test.go new file mode 100644 index 0000000000..71f2618d9b --- /dev/null +++ b/venus-shared/testutil/value_set_n_reset_test.go @@ -0,0 +1,22 @@ +package testutil + +import ( + "fmt" + "math/rand" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/require" +) + +func TestValueSetNReset(t *testing.T) { + tf.UnitTest(t) + + for i := 0; i < 32; i++ { + originVal := rand.Int() + newVal := originVal + 1 + + target := originVal + ValueSetNReset(t, fmt.Sprintf("set %d to %d", originVal, newVal), func() { require.Equal(t, target, newVal, "after set") }, func() { require.Equal(t, target, originVal, "after reset") }, &target, newVal) + } +} diff --git a/venus-shared/types/actor.go b/venus-shared/types/actor.go new file mode 100644 index 0000000000..5bda98e101 --- /dev/null +++ b/venus-shared/types/actor.go @@ -0,0 +1,21 @@ +package types + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/internal" + "github.com/ipfs/go-cid" +) + +var ErrActorNotFound = internal.ErrActorNotFound + +type Actor = internal.Actor + +// NewActor constructs a new actor. +func NewActor(code cid.Cid, balance abi.TokenAmount, head cid.Cid) *Actor { + return &Actor{ + Code: code, + Nonce: 0, + Balance: balance, + Head: head, + } +} diff --git a/venus-shared/types/actor_test.go b/venus-shared/types/actor_test.go new file mode 100644 index 0000000000..4a2fccadfb --- /dev/null +++ b/venus-shared/types/actor_test.go @@ -0,0 +1,38 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestActorBasic(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst Actor + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + }, + + Provided: func() { + require.NotEqual(t, src.Code, cid.Undef) + require.NotEqual(t, src.Head, cid.Undef) + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/api_types.go b/venus-shared/types/api_types.go new file mode 100644 index 0000000000..63b96576e9 --- /dev/null +++ b/venus-shared/types/api_types.go @@ -0,0 +1,409 @@ +package types + +import ( + "fmt" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/actors/builtin/power" +) + +type ComputeStateOutput struct { + Root cid.Cid + Trace []*InvocResult +} + +type HeadChangeType string + +// HeadChangeTopic is the topic used to publish new heads. +const HeadChangeTopic = "headchange" + +const ( + HCRevert HeadChangeType = "revert" + HCApply HeadChangeType = "apply" + HCCurrent HeadChangeType = "current" +) + +type HeadChange struct { + Type HeadChangeType + Val *TipSet +} + +type ObjStat struct { + Size uint64 + Links uint64 +} + +// ChainMessage is an on-chain message with its block and receipt. +type ChainMessage struct { //nolint + TS *TipSet + Message *Message + Block *BlockHeader + Receipt *MessageReceipt +} + +// BlsMessages[x].cid = Cids[x] +// SecpkMessages[y].cid = Cids[BlsMessages.length + y] +type BlockMessages struct { + BlsMessages []*Message + SecpkMessages []*SignedMessage + Cids []cid.Cid +} + +type MessageCID struct { + Cid cid.Cid + Message *Message +} + +type ActorState struct { + Balance BigInt + Code cid.Cid + State interface{} +} + +type NetworkName string + +const ( + NetworkNameMain NetworkName = "mainnet" + NetworkNameCalibration NetworkName = "calibrationnet" + NetworkNameButterfly NetworkName = "butterflynet" + NetworkNameInterop NetworkName = "interopnet" + NetworkNameIntegration NetworkName = "integrationnet" + NetworkNameForce NetworkName = "forcenet" +) + +type NetworkType int + +const ( + NetworkDefault NetworkType = 0 + NetworkMainnet NetworkType = 0x1 + Network2k NetworkType = 0x2 + NetworkDebug NetworkType = 0x3 + NetworkCalibnet NetworkType = 0x4 + NetworkNerpa NetworkType = 0x5 + NetworkInterop NetworkType = 0x6 + NetworkForce NetworkType = 0x7 + NetworkButterfly NetworkType = 0x8 + + Integrationnet NetworkType = 0x30 +) + +type PubsubScore struct { + ID peer.ID + Score *pubsub.PeerScoreSnapshot +} + +type Partition struct { + AllSectors bitfield.BitField + FaultySectors bitfield.BitField + RecoveringSectors bitfield.BitField + LiveSectors bitfield.BitField + ActiveSectors bitfield.BitField +} + +type Fault struct { + Miner address.Address + Epoch abi.ChainEpoch +} + +type MessageMatch struct { + To address.Address + From address.Address +} + +// SectorInfo provides information about a sector construction +type SectorInfo struct { + Size abi.SectorSize + MaxPieceSize abi.UnpaddedPieceSize +} + +type ProtocolParams struct { + Network string + BlockTime time.Duration + SupportedSectors []SectorInfo +} + +type Deadline struct { + PostSubmissions bitfield.BitField + DisputableProofCount uint64 +} + +var MarketBalanceNil = MarketBalance{} + +type MarketDeal struct { + Proposal DealProposal + State DealState +} + +type MinerPower struct { + MinerPower power.Claim + TotalPower power.Claim + HasMinPower bool +} + +type MinerSectors struct { + // Live sectors that should be proven. + Live uint64 + // Sectors actively contributing to power. + Active uint64 + // Sectors with failed proofs. + Faulty uint64 +} + +type MarketBalance struct { + Escrow big.Int + Locked big.Int +} + +type DealCollateralBounds struct { + Min abi.TokenAmount + Max abi.TokenAmount +} + +type MsgLookup struct { + Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed + Receipt MessageReceipt + ReturnDec interface{} + TipSet TipSetKey + Height abi.ChainEpoch +} + +type MiningBaseInfo struct { //nolint + MinerPower abi.StoragePower + NetworkPower abi.StoragePower + Sectors []builtin.ExtendedSectorInfo + WorkerKey address.Address + SectorSize abi.SectorSize + PrevBeaconEntry BeaconEntry + BeaconEntries []BeaconEntry + EligibleForMining bool +} + +type BlockTemplate struct { + Miner address.Address + Parents TipSetKey + Ticket *Ticket + Eproof *ElectionProof + BeaconValues []BeaconEntry + Messages []*SignedMessage + Epoch abi.ChainEpoch + Timestamp uint64 + WinningPoStProof []builtin.PoStProof +} + +type EstimateMessage struct { + Msg *Message + Spec *MessageSendSpec +} + +type EstimateResult struct { + Msg *Message + Err string +} + +type MessageSendSpec struct { + MaxFee abi.TokenAmount + GasOverEstimation float64 + GasOverPremium float64 +} + +// Version provides various build-time information +type Version struct { + Version string + + // APIVersion is a binary encoded semver version of the remote implementing + // this api + // + APIVersion APIVersion +} + +type ChannelAvailableFunds struct { + // Channel is the address of the channel + Channel *address.Address + // From is the from address of the channel (channel creator) + From address.Address + // To is the to address of the channel + To address.Address + // ConfirmedAmt is the total amount of funds that have been confirmed on-chain for the channel + ConfirmedAmt BigInt + // PendingAmt is the amount of funds that are pending confirmation on-chain + PendingAmt BigInt + // NonReservedAmt is part of ConfirmedAmt that is available for use (e.g. when the payment channel was pre-funded) + NonReservedAmt BigInt + // PendingAvailableAmt is the amount of funds that are pending confirmation on-chain that will become available once confirmed + PendingAvailableAmt BigInt + // PendingWaitSentinel can be used with PaychGetWaitReady to wait for + // confirmation of pending funds + PendingWaitSentinel *cid.Cid + // QueuedAmt is the amount that is queued up behind a pending request + QueuedAmt BigInt + // VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain + // and in the local datastore + VoucherReedeemedAmt BigInt +} + +type SyncState struct { + ActiveSyncs []ActiveSync + + VMApplied uint64 +} + +// just compatible code lotus +type SyncStateStage int + +const ( + StageIdle = SyncStateStage(iota) + StageHeaders + StagePersistHeaders + StageMessages + StageSyncComplete + StageSyncErrored + StageFetchingMessages +) + +func (v SyncStateStage) String() string { + switch v { + case StageHeaders: + return "header sync" + case StagePersistHeaders: + return "persisting headers" + case StageMessages: + return "message sync" + case StageSyncComplete: + return "complete" + case StageSyncErrored: + return "error" + case StageFetchingMessages: + return "fetching messages" + default: + return fmt.Sprintf("", v) + } +} + +type ActiveSync struct { + WorkerID uint64 + Base *TipSet + Target *TipSet + + Stage SyncStateStage + Height abi.ChainEpoch + + Start time.Time + End time.Time + Message string +} + +type Target struct { + State SyncStateStage + Base *TipSet + Current *TipSet + Start time.Time + End time.Time + Err error + ChainInfo +} + +type TargetTracker struct { + History []*Target + Buckets []*Target +} + +type MsgGasCost struct { + Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed + GasUsed abi.TokenAmount + BaseFeeBurn abi.TokenAmount + OverEstimationBurn abi.TokenAmount + MinerPenalty abi.TokenAmount + MinerTip abi.TokenAmount + Refund abi.TokenAmount + TotalCost abi.TokenAmount +} + +type InvocResult struct { + MsgCid cid.Cid + Msg *Message + MsgRct *MessageReceipt + GasCost MsgGasCost + ExecutionTrace ExecutionTrace + Error string + Duration time.Duration +} + +type MinerInfo struct { + Owner address.Address // Must be an ID-address. + Worker address.Address // Must be an ID-address. + NewWorker address.Address // Must be an ID-address. + ControlAddresses []address.Address // Must be an ID-addresses. + WorkerChangeEpoch abi.ChainEpoch + PeerId *peer.ID // nolint + Multiaddrs []abi.Multiaddrs + WindowPoStProofType abi.RegisteredPoStProof + SectorSize abi.SectorSize + WindowPoStPartitionSectors uint64 + ConsensusFaultElapsed abi.ChainEpoch + Beneficiary address.Address + BeneficiaryTerm *BeneficiaryTerm + PendingBeneficiaryTerm *PendingBeneficiaryChange +} + +type NetworkParams struct { + NetworkName NetworkName + BlockDelaySecs uint64 + ConsensusMinerMinPower abi.StoragePower + SupportedProofTypes []abi.RegisteredSealProof + PreCommitChallengeDelay abi.ChainEpoch + ForkUpgradeParams ForkUpgradeParams +} + +type ForkUpgradeParams struct { + UpgradeSmokeHeight abi.ChainEpoch + UpgradeBreezeHeight abi.ChainEpoch + UpgradeIgnitionHeight abi.ChainEpoch + UpgradeLiftoffHeight abi.ChainEpoch + UpgradeAssemblyHeight abi.ChainEpoch + UpgradeRefuelHeight abi.ChainEpoch + UpgradeTapeHeight abi.ChainEpoch + UpgradeKumquatHeight abi.ChainEpoch + BreezeGasTampingDuration abi.ChainEpoch + UpgradeCalicoHeight abi.ChainEpoch + UpgradePersianHeight abi.ChainEpoch + UpgradeOrangeHeight abi.ChainEpoch + UpgradeClausHeight abi.ChainEpoch + UpgradeTrustHeight abi.ChainEpoch + UpgradeNorwegianHeight abi.ChainEpoch + UpgradeTurboHeight abi.ChainEpoch + UpgradeHyperdriveHeight abi.ChainEpoch + UpgradeChocolateHeight abi.ChainEpoch + UpgradeOhSnapHeight abi.ChainEpoch + UpgradeSkyrHeight abi.ChainEpoch + UpgradeSharkHeight abi.ChainEpoch +} + +type NodeStatus struct { + SyncStatus NodeSyncStatus + PeerStatus NodePeerStatus + ChainStatus NodeChainStatus +} + +type NodeSyncStatus struct { + Epoch uint64 + Behind uint64 +} + +type NodePeerStatus struct { + PeersToPublishMsgs int + PeersToPublishBlocks int +} + +type NodeChainStatus struct { + BlocksPerTipsetLast100 float64 + BlocksPerTipsetLastFinality float64 +} diff --git a/venus-shared/types/beacon.go b/venus-shared/types/beacon.go new file mode 100644 index 0000000000..51b07b0940 --- /dev/null +++ b/venus-shared/types/beacon.go @@ -0,0 +1,6 @@ +package types + +type BeaconEntry struct { + Round uint64 + Data []byte +} diff --git a/venus-shared/types/beacon_test.go b/venus-shared/types/beacon_test.go new file mode 100644 index 0000000000..1832d73450 --- /dev/null +++ b/venus-shared/types/beacon_test.go @@ -0,0 +1,43 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestBeaconEntryBasic(t *testing.T) { + tf.UnitTest(t) + dataLen := 32 + + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst BeaconEntry + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + require.Nil(t, src.Data) + }, + + ProvideOpts: []interface{}{ + testutil.BytesFixedProvider(dataLen), + }, + + Provided: func() { + require.Len(t, src.Data, dataLen) + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/bigint.go b/venus-shared/types/bigint.go new file mode 100644 index 0000000000..9c301be04e --- /dev/null +++ b/venus-shared/types/bigint.go @@ -0,0 +1,22 @@ +package types + +import ( + "github.com/filecoin-project/venus/venus-shared/internal" +) + +var EmptyInt = internal.EmptyInt + +type BigInt = internal.BigInt + +var ( + NewInt = internal.NewInt + BigFromBytes = internal.BigFromBytes + BigFromString = internal.BigFromString + BigMul = internal.BigMul + BigDiv = internal.BigDiv + BigDivFloat = internal.BigDivFloat + BigMod = internal.BigMod + BigAdd = internal.BigAdd + BigSub = internal.BigSub + BigCmp = internal.BigCmp +) diff --git a/venus-shared/types/bigint_fil.go b/venus-shared/types/bigint_fil.go new file mode 100644 index 0000000000..14d5ad48c3 --- /dev/null +++ b/venus-shared/types/bigint_fil.go @@ -0,0 +1,22 @@ +package types + +import ( + "github.com/filecoin-project/venus/venus-shared/internal" +) + +var ZeroFIL = internal.ZeroFIL + +type FIL = internal.FIL + +var ( + AttoFil = internal.AttoFil + FemtoFil = internal.FemtoFil + PicoFil = internal.PicoFil + NanoFil = internal.NanoFil +) + +var ( + ParseFIL = internal.ParseFIL + MustParseFIL = internal.MustParseFIL + FromFil = internal.FromFil +) diff --git a/venus-shared/types/bigint_fil_test.go b/venus-shared/types/bigint_fil_test.go new file mode 100644 index 0000000000..068a30e24b --- /dev/null +++ b/venus-shared/types/bigint_fil_test.go @@ -0,0 +1,239 @@ +package types + +import ( + "encoding/json" + "fmt" + "math/big" + "strings" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/filecoin-project/venus/venus-shared/types/params" + "github.com/stretchr/testify/require" +) + +func TestFilRoundTrip(t *testing.T) { + tf.UnitTest(t) + testValues := []string{ + "0 FIL", "1 FIL", "1.001 FIL", "100.10001 FIL", "101100 FIL", "5000.01 FIL", "5000 FIL", + strings.Repeat("1", 50) + " FIL", + } + + for _, v := range testValues { + fval := MustParseFIL(v) + + if fval.String() != v { + t.Fatal("mismatch in values!", v, fval.String()) + } + + text, err := fval.MarshalText() + require.NoError(t, err, "marshal text for fval") + + fval2 := FIL(NewInt(0)) + err = fval2.UnmarshalText(text) + require.NoError(t, err, "unmarshal text for fval2") + require.True(t, BigInt{Int: fval.Int}.Equals(BigInt{Int: fval2.Int})) + } +} + +func TestParseAttoFils(t *testing.T) { + tf.UnitTest(t) + testValues := []string{ + "0 aFIL", "1 aFIL", "1 aFIL", "100 aFIL", "101100 aFIL", "5000 aFIL", + "0 attoFIL", "1 attoFIL", "1 attoFIL", "100 attoFIL", "101100 attoFIL", "5000 attoFIL", + } + + for _, v := range testValues { + fval := MustParseFIL(v) + + text, err := fval.MarshalText() + require.NoError(t, err, "marshal text for fval") + + fval2 := FIL(NewInt(0)) + err = fval2.UnmarshalText(text) + require.NoError(t, err, "unmarshal text for fval2") + require.True(t, BigInt{Int: fval.Int}.Equals(BigInt{Int: fval2.Int})) + } +} + +func TestInvalidFILString(t *testing.T) { + tf.UnitTest(t) + testValues := []string{ + "0 nFIL", "1 nFIL", "1.001 nFIL", "100.10001 nFIL", "101100 nFIL", "5000.01 nFIL", "5000 nFIL", + "1.001.1 FIL", + strings.Repeat("1", 51) + " FIL", + } + + for _, v := range testValues { + _, err := ParseFIL(v) + require.Errorf(t, err, "invalid fil string %s", v) + } +} + +func TestBigFromFIL(t *testing.T) { + tf.UnitTest(t) + ratio := NewInt(params.FilecoinPrecision) + + nums := make([]uint64, 32) + testutil.Provide(t, &nums, testutil.IntRangedProvider(10, 1000)) + + for i := range nums { + fval := FromFil(nums[i]) + require.True(t, fval.GreaterThan(ZeroFIL), "greater than zero") + require.True(t, ratio.Equals(BigDiv(fval, NewInt(nums[i]))), "fil precision") + } +} + +func TestFilShort(t *testing.T) { + tf.UnitTest(t) + for _, s := range []struct { + fil string + expect string + }{ + {fil: "1", expect: "1 FIL"}, + {fil: "1.1", expect: "1.1 FIL"}, + {fil: "12", expect: "12 FIL"}, + {fil: "123", expect: "123 FIL"}, + {fil: "123456", expect: "123456 FIL"}, + {fil: "123.23", expect: "123.23 FIL"}, + {fil: "123456.234", expect: "123456.234 FIL"}, + {fil: "123456.2341234", expect: "123456.234 FIL"}, + {fil: "123456.234123445", expect: "123456.234 FIL"}, + + {fil: "0.1", expect: "100 mFIL"}, + {fil: "0.01", expect: "10 mFIL"}, + {fil: "0.001", expect: "1 mFIL"}, + + {fil: "0.0001", expect: "100 μFIL"}, + {fil: "0.00001", expect: "10 μFIL"}, + {fil: "0.000001", expect: "1 μFIL"}, + + {fil: "0.0000001", expect: "100 nFIL"}, + {fil: "0.00000001", expect: "10 nFIL"}, + {fil: "0.000000001", expect: "1 nFIL"}, + + {fil: "0.0000000001", expect: "100 pFIL"}, + {fil: "0.00000000001", expect: "10 pFIL"}, + {fil: "0.000000000001", expect: "1 pFIL"}, + + {fil: "0.0000000000001", expect: "100 fFIL"}, + {fil: "0.00000000000001", expect: "10 fFIL"}, + {fil: "0.000000000000001", expect: "1 fFIL"}, + + {fil: "0.0000000000000001", expect: "100 aFIL"}, + {fil: "0.00000000000000001", expect: "10 aFIL"}, + {fil: "0.000000000000000001", expect: "1 aFIL"}, + + {fil: "0.0000012", expect: "1.2 μFIL"}, + {fil: "0.00000123", expect: "1.23 μFIL"}, + {fil: "0.000001234", expect: "1.234 μFIL"}, + {fil: "0.0000012344", expect: "1.234 μFIL"}, + {fil: "0.00000123444", expect: "1.234 μFIL"}, + + {fil: "0.0002212", expect: "221.2 μFIL"}, + {fil: "0.00022123", expect: "221.23 μFIL"}, + {fil: "0.000221234", expect: "221.234 μFIL"}, + {fil: "0.0002212344", expect: "221.234 μFIL"}, + {fil: "0.00022123444", expect: "221.234 μFIL"}, + + {fil: "-1", expect: "-1 FIL"}, + {fil: "-1.1", expect: "-1.1 FIL"}, + {fil: "-12", expect: "-12 FIL"}, + {fil: "-123", expect: "-123 FIL"}, + {fil: "-123456", expect: "-123456 FIL"}, + {fil: "-123.23", expect: "-123.23 FIL"}, + {fil: "-123456.234", expect: "-123456.234 FIL"}, + {fil: "-123456.2341234", expect: "-123456.234 FIL"}, + {fil: "-123456.234123445", expect: "-123456.234 FIL"}, + + {fil: "-0.1", expect: "-100 mFIL"}, + {fil: "-0.01", expect: "-10 mFIL"}, + {fil: "-0.001", expect: "-1 mFIL"}, + + {fil: "-0.0001", expect: "-100 μFIL"}, + {fil: "-0.00001", expect: "-10 μFIL"}, + {fil: "-0.000001", expect: "-1 μFIL"}, + + {fil: "-0.0000001", expect: "-100 nFIL"}, + {fil: "-0.00000001", expect: "-10 nFIL"}, + {fil: "-0.000000001", expect: "-1 nFIL"}, + + {fil: "-0.0000000001", expect: "-100 pFIL"}, + {fil: "-0.00000000001", expect: "-10 pFIL"}, + {fil: "-0.000000000001", expect: "-1 pFIL"}, + + {fil: "-0.0000000000001", expect: "-100 fFIL"}, + {fil: "-0.00000000000001", expect: "-10 fFIL"}, + {fil: "-0.000000000000001", expect: "-1 fFIL"}, + + {fil: "-0.0000000000000001", expect: "-100 aFIL"}, + {fil: "-0.00000000000000001", expect: "-10 aFIL"}, + {fil: "-0.000000000000000001", expect: "-1 aFIL"}, + + {fil: "-0.0000012", expect: "-1.2 μFIL"}, + {fil: "-0.00000123", expect: "-1.23 μFIL"}, + {fil: "-0.000001234", expect: "-1.234 μFIL"}, + {fil: "-0.0000012344", expect: "-1.234 μFIL"}, + {fil: "-0.00000123444", expect: "-1.234 μFIL"}, + + {fil: "-0.0002212", expect: "-221.2 μFIL"}, + {fil: "-0.00022123", expect: "-221.23 μFIL"}, + {fil: "-0.000221234", expect: "-221.234 μFIL"}, + {fil: "-0.0002212344", expect: "-221.234 μFIL"}, + {fil: "-0.00022123444", expect: "-221.234 μFIL"}, + } { + s := s + t.Run(s.fil, func(t *testing.T) { + f, err := ParseFIL(s.fil) + require.NoError(t, err) + require.Equal(t, s.expect, f.Short()) + }) + } +} + +func TestMarshal(t *testing.T) { + tf.UnitTest(t) + type A struct { + Fil FIL + } + a := A{ + Fil: FIL{Int: big.NewInt(1000000)}, + } + + aBytes, err := json.Marshal(a) + require.NoError(t, err) + + require.Equal(t, aBytes, []byte("{\"Fil\":\"0.000000000001 FIL\"}")) + fmt.Println(string(aBytes)) +} + +func TestUnMarshal(t *testing.T) { + tf.UnitTest(t) + type A struct { + Fil FIL + } + bigFIl, _ := big.NewInt(0).SetString("100000000000000000000", 10) + for _, s := range []struct { + fil string + expect FIL + }{ + { + fil: "{\"Fil\":\"0.000000000001 FIL\"}", + expect: FIL{Int: big.NewInt(1000000)}, + }, + { + fil: "{\"Fil\":\"1 FIL\"}", + expect: FIL{Int: big.NewInt(1000000000000000000)}, + }, + { + fil: "{\"Fil\":\"100 FIL\"}", + expect: FIL{Int: bigFIl}, + }, + } { + a := A{} + err := json.Unmarshal([]byte(s.fil), &a) + require.NoError(t, err) + require.Equal(t, a.Fil.String(), s.expect.String()) + } +} diff --git a/venus-shared/types/bigint_test.go b/venus-shared/types/bigint_test.go new file mode 100644 index 0000000000..cb0e8efb4b --- /dev/null +++ b/venus-shared/types/bigint_test.go @@ -0,0 +1,94 @@ +package types + +import ( + "bytes" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/stretchr/testify/require" +) + +func TestBigIntSerializationRoundTrip(t *testing.T) { + tf.UnitTest(t) + testValues := []string{ + "0", "1", "10", "-10", "9999", "12345678901234567891234567890123456789012345678901234567890", + } + + for _, v := range testValues { + bi, err := BigFromString(v) + if err != nil { + t.Fatal(err) + } + + buf := new(bytes.Buffer) + if err := bi.MarshalCBOR(buf); err != nil { + t.Fatal(err) + } + + var out BigInt + if err := out.UnmarshalCBOR(buf); err != nil { + t.Fatal(err) + } + + if BigCmp(out, bi) != 0 { + t.Fatal("failed to round trip BigInt through cbor") + } + + } +} + +func TestBigIntParseErr(t *testing.T) { + tf.UnitTest(t) + testValues := []string{ + "a0", "1b", "10c", "-1d0", "9e999", "f12345678901234567891234567890123456789012345678901234567890", + } + + for _, v := range testValues { + _, err := BigFromString(v) + require.Error(t, err, "from invalid big int string") + } +} + +func TestBigIntCalculating(t *testing.T) { + tf.UnitTest(t) + zero := NewInt(0) + maxProvideAttempts := 8 + for i := 0; i < 32; i++ { + var a, b BigInt + for attempt := 0; ; i++ { + if attempt == maxProvideAttempts { + t.Fatal("unable to get required numbers") + } + + testutil.Provide(t, &a) + testutil.Provide(t, &b) + + if a == EmptyInt || b == EmptyInt { + t.Fatal("BigInt not provided") + } + + if !a.Equals(zero) || !b.Equals(zero) { + break + } + } + + sum := BigAdd(a, b) + product := BigMul(a, b) + + require.True(t, BigSub(sum, a).Equals(b)) + require.True(t, BigDiv(product, a).Equals(b)) + + base := a + if base.IsZero() { + base = b + } + + base4 := BigMul(base, NewInt(4)) + require.Equal(t, BigDivFloat(base4, base), 4.0) + require.Equal(t, BigDivFloat(base, base4), 0.25) + + abs := base.Abs() + require.True(t, BigMod(abs, BigAdd(abs, NewInt(1))).Equals(abs)) + } +} diff --git a/venus-shared/types/bigint_unit.go b/venus-shared/types/bigint_unit.go new file mode 100644 index 0000000000..f9687d42a4 --- /dev/null +++ b/venus-shared/types/bigint_unit.go @@ -0,0 +1,34 @@ +package types + +import ( + "fmt" + "math/big" +) + +var byteSizeUnits = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB"} + +func SizeStr(bi BigInt) string { + f, i := unitNumber(bi, byteSizeUnits) + return fmt.Sprintf("%.4g %s", f, byteSizeUnits[i]) +} + +var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"} + +func DeciStr(bi BigInt) string { + f, i := unitNumber(bi, deciUnits) + return fmt.Sprintf("%.3g %s", f, deciUnits[i]) +} + +func unitNumber(n BigInt, units []string) (float64, int) { + r := new(big.Rat).SetInt(n.Int) + den := big.NewRat(1, 1024) + + var i int + for f, _ := r.Float64(); f >= 1024 && i+1 < len(units); f, _ = r.Float64() { + i++ + r = r.Mul(r, den) + } + + f, _ := r.Float64() + return f, i +} diff --git a/venus-shared/types/bigint_unit_test.go b/venus-shared/types/bigint_unit_test.go new file mode 100644 index 0000000000..49bf13c707 --- /dev/null +++ b/venus-shared/types/bigint_unit_test.go @@ -0,0 +1,62 @@ +package types + +import ( + "math/big" + "math/rand" + "strings" + "testing" + "time" + + "github.com/docker/go-units" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/require" +) + +func TestUnitStrs(t *testing.T) { + tf.UnitTest(t) + cases := []struct { + in uint64 + size string + deci string + }{ + {0, "0 B", "0 "}, + {1, "1 B", "1 "}, + {1016, "1016 B", "1.02e+03 "}, + {1024, "1 KiB", "1 Ki"}, + {1000 * 1024, "1000 KiB", "1e+03 Ki"}, + {2000, "1.953 KiB", "1.95 Ki"}, + {5 << 20, "5 MiB", "5 Mi"}, + {11 << 60, "11 EiB", "11 Ei"}, + } + + for _, c := range cases { + require.Equal(t, c.size, SizeStr(NewInt(c.in)), "result of SizeStr") + require.Equal(t, c.deci, DeciStr(NewInt(c.in)), "result of DeciStr") + } +} + +func TestSizeStrUnitsSymmetry(t *testing.T) { + tf.UnitTest(t) + s := rand.NewSource(time.Now().UnixNano()) + r := rand.New(s) + + for i := 0; i < 10000; i++ { + n := r.Uint64() + l := strings.ReplaceAll(units.BytesSize(float64(n)), " ", "") + r := strings.ReplaceAll(SizeStr(NewInt(n)), " ", "") + + require.NotContains(t, l, "e+") + require.NotContains(t, r, "e+") + + require.Equal(t, l, r, "wrong formatting for %d", n) + } +} + +func TestSizeStrBig(t *testing.T) { + tf.UnitTest(t) + ZiB := big.NewInt(50000) + ZiB = ZiB.Lsh(ZiB, 70) + + require.Equal(t, "5e+04 ZiB", SizeStr(BigInt{Int: ZiB}), "inout %+v, produced wrong result", ZiB) +} diff --git a/venus-shared/types/block_header.go b/venus-shared/types/block_header.go new file mode 100644 index 0000000000..1eb878695d --- /dev/null +++ b/venus-shared/types/block_header.go @@ -0,0 +1,205 @@ +package types + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/proof" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + node "github.com/ipfs/go-ipld-format" +) + +// DecodeBlock decodes raw cbor bytes into a BlockHeader. +func DecodeBlock(b []byte) (*BlockHeader, error) { + var out BlockHeader + if err := out.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, err + } + + return &out, nil +} + +// BlockHeader is a newBlock in the blockchain. +type BlockHeader struct { + // Miner is the address of the miner actor that mined this newBlock. + Miner address.Address + + // Ticket is the ticket submitted with this newBlock. + Ticket *Ticket + + // ElectionProof is the vrf proof giving this newBlock's miner authoring rights + ElectionProof *ElectionProof + + // BeaconEntries contain the verifiable oracle randomness used to elect + // this newBlock's author leader + BeaconEntries []BeaconEntry + + // WinPoStProof are the winning post proofs + WinPoStProof []proof.PoStProof + + // Parents is the set of parents this newBlock was based on. Typically one, + // but can be several in the case where there were multiple winning ticket- + // holders for an epoch. + Parents []cid.Cid + + // ParentWeight is the aggregate chain weight of the parent set. + ParentWeight big.Int + + // Height is the chain height of this newBlock. + Height abi.ChainEpoch + + // ParentStateRoot is the CID of the root of the state tree after application of the messages in the parent tipset + // to the parent tipset's state root. + ParentStateRoot cid.Cid + + // ParentMessageReceipts is a list of receipts corresponding to the application of the messages in the parent tipset + // to the parent tipset's state root (corresponding to this newBlock's ParentStateRoot). + ParentMessageReceipts cid.Cid + + // Messages is the set of messages included in this newBlock + Messages cid.Cid + + // The aggregate signature of all BLS signed messages in the newBlock + BLSAggregate *crypto.Signature + + // The timestamp, in seconds since the Unix epoch, at which this newBlock was created. + Timestamp uint64 + + // The signature of the miner's worker key over the newBlock + BlockSig *crypto.Signature + + // ForkSignaling is extra data used by miners to communicate + ForkSignaling uint64 + + // identical for all blocks in same tipset: the base fee after executing parent tipset + ParentBaseFee abi.TokenAmount + + validated bool // internal, true if the signature has been validated +} + +// Cid returns the content id of this newBlock. +func (b *BlockHeader) Cid() cid.Cid { + c, _, err := b.SerializeWithCid() + if err != nil { + panic(err) + } + + return c +} + +func (b *BlockHeader) String() string { + errStr := "(error encoding BlockHeader)" + c, _, err := b.SerializeWithCid() + if err != nil { + return errStr + } + + js, err := json.MarshalIndent(b, "", " ") + if err != nil { + return errStr + } + + return fmt.Sprintf("BlockHeader cid=[%v]: %s", c, string(js)) +} + +// Equals returns true if the BlockHeader is equal to other. +func (b *BlockHeader) Equals(other *BlockHeader) bool { + return b.Cid().Equals(other.Cid()) +} + +// SignatureData returns the newBlock's bytes with a null signature field for +// signature creation and verification +func (b *BlockHeader) SignatureData() ([]byte, error) { + tmp := *b + tmp.BlockSig = nil + return tmp.Serialize() +} + +// Serialize serialize blockheader to binary +func (b *BlockHeader) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := b.MarshalCBOR(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (b *BlockHeader) SerializeWithCid() (cid.Cid, []byte, error) { + data, err := b.Serialize() + if err != nil { + return cid.Undef, nil, err + } + + c, err := abi.CidBuilder.Sum(data) + if err != nil { + return cid.Undef, nil, err + } + + return c, data, nil +} + +// ToStorageBlock convert blockheader to data block with cid +func (b *BlockHeader) ToStorageBlock() (blocks.Block, error) { + c, data, err := b.SerializeWithCid() + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) +} + +// LastTicket get ticket in block +func (b *BlockHeader) LastTicket() *Ticket { + return b.Ticket +} + +// SetValidated set block signature is valid after checkout blocksig +func (b *BlockHeader) SetValidated() { + b.validated = true +} + +// IsValidated check whether block signature is valid from memory +func (b *BlockHeader) IsValidated() bool { + return b.validated +} + +// ToNode converts the BlockHeader to an IPLD node. +func (b *BlockHeader) ToNode() node.Node { + buf := new(bytes.Buffer) + err := b.MarshalCBOR(buf) + if err != nil { + panic(err) + } + data := buf.Bytes() + c, err := DefaultCidBuilder.Sum(data) + if err != nil { + panic(err) + } + + blk, err := blocks.NewBlockWithCid(data, c) + if err != nil { + panic(err) + } + n, err := cbor.DecodeBlock(blk) + if err != nil { + panic(err) + } + return n +} + +func CidArrsContains(a []cid.Cid, b cid.Cid) bool { + for _, elem := range a { + if elem.Equals(b) { + return true + } + } + return false +} diff --git a/venus-shared/types/block_header_test.go b/venus-shared/types/block_header_test.go new file mode 100644 index 0000000000..470549d1cc --- /dev/null +++ b/venus-shared/types/block_header_test.go @@ -0,0 +1,109 @@ +package types + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestBlockHeaderMarshal(t *testing.T) { + tf.UnitTest(t) + const mdata = "f021344" + const cdata = "bafy2bzaced35aqx5wnwp4ohegreumsheigitrhcqlr3lmz4phyzwikmi44sww" + const bdata = "904400e0a601815860b48541b503b47535334553bf0d1fc702d395133e28b48bbae7550e955025ee09f642cb82c79de036099c657b7efb78e10b685aa99d817f6e560805787c5df894ac6758b0d68c72fb498113a5e763ef65d5bff576cd7fc3a847684a410b89c63c82025860a233b638f28312f9014728cf42e2c353bbf2031506488fa3192d137881e8b9666a2d131218e96b51b701f826940ef6630ae13b20c7bf115155e349c88363949fd7704f8026c3c3b1f872d9085e912856d2b56b8ce8a6fb8cf4e996aaa8476e2c81821a001506545860a699f4d93c54d66d46a46cab8da059fbf7ef82e40dd19a67fc1c5a402422304e7b8eec5c2dc6107e0cf1be676f12ce9b05675160e1c66f0ae5ebcf056e303ca39aa813acd5403844604b51f1e3dd5fcd271978346b85cdfa6d75cb46e2c6609581820358c09095232420f9caff389c0709ace12897ad0b5734b104011849e0f008febf39d8c64079236d3e75c24c08a613bcd946538a4b966b3c5a79cf61832c673f2ec90d22d02c16e28073c20995f5259567736d6e6f2fee588c7c23ca1946d753a783fe14404c3f4684a0a7cf5ceadc8a7a2cd9ad0387b96608eca6d3604ed7beb948f7fa2e235f1d611114f66752c6c36ac9aeb17c2f36d70accbd7554678034381486a1a95ea36db4dc549ee152a00c1b454da4f47b33327609be8b055f14681a2edf84d82a5827000171a0e4022084da38b952ab5644c5418c3305b3c22b5eca92eab9e23cdb675163773a964c53d82a5827000171a0e402201ef87dc542d008d961a30a36935a06d28ef05e4ed5e22f7779c4f3f8002c451dd82a5827000171a0e4022079bfefc62c740cda4b0463ceba68e9613c5c47ef7bbed968b179763ae7978bf4d82a5827000171a0e402202f9becc403d7228035f153d9cba42db472ed3c1cb3f22c7ae02eaaabca53e80f460001d7dbb8171a00067680d82a5827000171a0e402204f2120d6581f3d69a5d62e25dd993d1825ce6a446ffa801a4092e7e3a28d4b73d82a5827000171a0e402204bc482ae9a6a1afd1a252264a4bcae9fb2150faf9910b80703e9fbb91ab041e3d82a5827000171a0e4022017d2c80f5b157b61e96ea4ef3888762fa81a9a853bffa624f4bfb9c388859a88586102b3f7f6dc71591af0a61bbcad978178fc123a6edbec959716c028ec976b997df83af557a5ad1d05544d5ce82e5461c562196ea998b437bf0ceb7965871bd6d9e16a2df9cfaaf50b627f5a406d344f1ae0d8e0eaa5835f9c092fe24681cbc7761d1a618f1680586102920f0a831f86073b12641e6c880ddc2823a9c7b1b14b56f7995eaafc35df9c8f3066cd3ab9693c53b388e4c46d7680b50dcd242471d763a5114274c475eeb7d6561e35f51db3b6ac46c4fb8f4218ddc6d6fae3c1cd09fa70c21e6e87bd94e33100420064" + const sdata = "904400e0a601815860b48541b503b47535334553bf0d1fc702d395133e28b48bbae7550e955025ee09f642cb82c79de036099c657b7efb78e10b685aa99d817f6e560805787c5df894ac6758b0d68c72fb498113a5e763ef65d5bff576cd7fc3a847684a410b89c63c82025860a233b638f28312f9014728cf42e2c353bbf2031506488fa3192d137881e8b9666a2d131218e96b51b701f826940ef6630ae13b20c7bf115155e349c88363949fd7704f8026c3c3b1f872d9085e912856d2b56b8ce8a6fb8cf4e996aaa8476e2c81821a001506545860a699f4d93c54d66d46a46cab8da059fbf7ef82e40dd19a67fc1c5a402422304e7b8eec5c2dc6107e0cf1be676f12ce9b05675160e1c66f0ae5ebcf056e303ca39aa813acd5403844604b51f1e3dd5fcd271978346b85cdfa6d75cb46e2c6609581820358c09095232420f9caff389c0709ace12897ad0b5734b104011849e0f008febf39d8c64079236d3e75c24c08a613bcd946538a4b966b3c5a79cf61832c673f2ec90d22d02c16e28073c20995f5259567736d6e6f2fee588c7c23ca1946d753a783fe14404c3f4684a0a7cf5ceadc8a7a2cd9ad0387b96608eca6d3604ed7beb948f7fa2e235f1d611114f66752c6c36ac9aeb17c2f36d70accbd7554678034381486a1a95ea36db4dc549ee152a00c1b454da4f47b33327609be8b055f14681a2edf84d82a5827000171a0e4022084da38b952ab5644c5418c3305b3c22b5eca92eab9e23cdb675163773a964c53d82a5827000171a0e402201ef87dc542d008d961a30a36935a06d28ef05e4ed5e22f7779c4f3f8002c451dd82a5827000171a0e4022079bfefc62c740cda4b0463ceba68e9613c5c47ef7bbed968b179763ae7978bf4d82a5827000171a0e402202f9becc403d7228035f153d9cba42db472ed3c1cb3f22c7ae02eaaabca53e80f460001d7dbb8171a00067680d82a5827000171a0e402204f2120d6581f3d69a5d62e25dd993d1825ce6a446ffa801a4092e7e3a28d4b73d82a5827000171a0e402204bc482ae9a6a1afd1a252264a4bcae9fb2150faf9910b80703e9fbb91ab041e3d82a5827000171a0e4022017d2c80f5b157b61e96ea4ef3888762fa81a9a853bffa624f4bfb9c388859a88586102b3f7f6dc71591af0a61bbcad978178fc123a6edbec959716c028ec976b997df83af557a5ad1d05544d5ce82e5461c562196ea998b437bf0ceb7965871bd6d9e16a2df9cfaaf50b627f5a406d344f1ae0d8e0eaa5835f9c092fe24681cbc7761d1a618f1680f600420064" + + maddr, err := address.NewFromString(mdata) + require.NoErrorf(t, err, "parse miner address %s", mdata) + + c, err := cid.Decode(cdata) + require.NoErrorf(t, err, "decode cid %s", cdata) + + require.NotEqual(t, bdata, sdata, "check raw sign bytes") + + b, err := hex.DecodeString(bdata) + require.NoError(t, err, "decode block header binary") + + signb, err := hex.DecodeString(sdata) + require.NoError(t, err, "decode sign bytes") + + bh, err := DecodeBlock(b) + require.NoError(t, err, "decode block header") + + require.Equal(t, maddr, bh.Miner, "check for miner") + signdata, err := bh.SignatureData() + require.NoError(t, err, "call bh.SignatureData") + require.Equal(t, signb, signdata, "check for signature data") + + require.Equal(t, c, bh.Cid(), "check for bh.Cid()") + serialized, err := bh.Serialize() + require.NoError(t, err, "call bh.Serialize") + require.Equal(t, b, serialized, "check for bh.Serialize()") + + blk, err := bh.ToStorageBlock() + require.NoError(t, err, "call bh.ToStorageBlock") + + require.Equal(t, c, blk.Cid(), "check for blk.Cid()") + require.Equal(t, b, blk.RawData(), "check for blk.RawData()") +} + +func TestBlockHeaderBasic(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + sliceLen := 5 + bytesLen := 32 + for i := 0; i < 64; i++ { + var src, dst BlockHeader + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst) + }, + + ProvideOpts: []interface{}{ + testutil.WithSliceLen(sliceLen), + testutil.BytesFixedProvider(bytesLen), + testutil.IDAddressProvider(), + }, + + Provided: func() { + require.Equal(t, src.Miner.Protocol(), address.ID, "miner addr proto") + require.Len(t, src.Parents, sliceLen, "parents length") + require.NotNil(t, src.ElectionProof, "ElectionProof") + require.Len(t, src.ElectionProof.VRFProof, bytesLen, "VRFProof len") + require.NotNil(t, src.BlockSig, "BlockSig") + require.Len(t, src.BlockSig.Data, bytesLen, "BlockSig.Data len") + require.NotNil(t, src.BLSAggregate, "BLSAggregate") + require.Len(t, src.BLSAggregate.Data, bytesLen, "BLSAggregate.Data len") + }, + + Marshaled: func(b []byte) { + decoded, err := DecodeBlock(b) + require.NoError(t, err, "DecodeBlock") + require.Equal(t, src, *decoded) + }, + + Finished: func() { + require.Equal(t, src.LastTicket(), dst.LastTicket()) + require.Equal(t, src, dst) + require.Equal(t, src.String(), dst.String()) + require.True(t, src.Equals(&dst)) + + require.False(t, src.IsValidated(), "check validated before set") + + src.SetValidated() + require.True(t, src.IsValidated(), "check validated before set") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/block_message.go b/venus-shared/types/block_message.go new file mode 100644 index 0000000000..36b5fa31b7 --- /dev/null +++ b/venus-shared/types/block_message.go @@ -0,0 +1,9 @@ +package types + +// BlockMessagesInfo contains messages for one newBlock in a tipset. +type BlockMessagesInfo struct { //nolint + BlsMessages []ChainMsg + SecpkMessages []ChainMsg + + Block *BlockHeader +} diff --git a/venus-shared/types/block_message_test.go b/venus-shared/types/block_message_test.go new file mode 100644 index 0000000000..5d24852c3d --- /dev/null +++ b/venus-shared/types/block_message_test.go @@ -0,0 +1,11 @@ +package types + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" +) + +func TestBlockMessageBasic(t *testing.T) { + tf.UnitTest(t) +} diff --git a/venus-shared/types/block_msg.go b/venus-shared/types/block_msg.go new file mode 100644 index 0000000000..e884d03b9e --- /dev/null +++ b/venus-shared/types/block_msg.go @@ -0,0 +1,28 @@ +package types + +import ( + "bytes" + + "github.com/ipfs/go-cid" +) + +type BlockMsg struct { // nolint: golint + Header *BlockHeader + BlsMessages []cid.Cid + SecpkMessages []cid.Cid +} + +// Cid return block cid +func (bm *BlockMsg) Cid() cid.Cid { + return bm.Header.Cid() +} + +// Serialize return blockmsg binary +func (bm *BlockMsg) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := bm.MarshalCBOR(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/venus-shared/types/block_msg_test.go b/venus-shared/types/block_msg_test.go new file mode 100644 index 0000000000..d39a6cc922 --- /dev/null +++ b/venus-shared/types/block_msg_test.go @@ -0,0 +1,65 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestBlockMsgBasic(t *testing.T) { + tf.UnitTest(t) + msgLen := 16 + emptyCids := make([]cid.Cid, msgLen) + + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst BlockMsg + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst) + require.Nil(t, src.Header) + require.Nil(t, src.BlsMessages) + require.Nil(t, src.SecpkMessages) + }, + + ProvideOpts: []interface{}{ + testutil.WithSliceLen(msgLen), + }, + + Provided: func() { + require.NotEqual(t, src, dst, "value provided") + require.NotNil(t, src.Header) + require.NotEqual(t, emptyCids, src.BlsMessages) + require.NotEqual(t, emptyCids, src.SecpkMessages) + }, + + Marshaled: func(b []byte) { + bmCid := src.Cid() + require.Equal(t, bmCid, src.Header.Cid(), "Cid() result for BlockMsg") + + sumCid, err := abi.CidBuilder.Sum(b) + require.NoError(t, err, "CidBuilder.Sum") + + require.NotEqual(t, bmCid, sumCid) + + serialized, err := src.Serialize() + require.NoError(t, err, "Serialize") + require.Equal(t, b, serialized) + }, + + Finished: func() { + require.Equal(t, src, dst, "after unmarshaling") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/cbor_gen.go b/venus-shared/types/cbor_gen.go new file mode 100644 index 0000000000..78c56e72b3 --- /dev/null +++ b/venus-shared/types/cbor_gen.go @@ -0,0 +1,1473 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package types + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + crypto "github.com/filecoin-project/go-state-types/crypto" + exitcode "github.com/filecoin-project/go-state-types/exitcode" + proof "github.com/filecoin-project/go-state-types/proof" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufBlockHeader = []byte{144} + +func (t *BlockHeader) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBlockHeader); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + + // t.Ticket (types.Ticket) (struct) + if err := t.Ticket.MarshalCBOR(cw); err != nil { + return err + } + + // t.ElectionProof (types.ElectionProof) (struct) + if err := t.ElectionProof.MarshalCBOR(cw); err != nil { + return err + } + + // t.BeaconEntries ([]types.BeaconEntry) (slice) + if len(t.BeaconEntries) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.BeaconEntries was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.BeaconEntries))); err != nil { + return err + } + for _, v := range t.BeaconEntries { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.WinPoStProof ([]proof.PoStProof) (slice) + if len(t.WinPoStProof) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.WinPoStProof was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.WinPoStProof))); err != nil { + return err + } + for _, v := range t.WinPoStProof { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.Parents ([]cid.Cid) (slice) + if len(t.Parents) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Parents was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Parents))); err != nil { + return err + } + for _, v := range t.Parents { + if err := cbg.WriteCid(w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Parents: %w", err) + } + } + + // t.ParentWeight (big.Int) (struct) + if err := t.ParentWeight.MarshalCBOR(cw); err != nil { + return err + } + + // t.Height (abi.ChainEpoch) (int64) + if t.Height >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Height)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil { + return err + } + } + + // t.ParentStateRoot (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.ParentStateRoot); err != nil { + return xerrors.Errorf("failed to write cid field t.ParentStateRoot: %w", err) + } + + // t.ParentMessageReceipts (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.ParentMessageReceipts); err != nil { + return xerrors.Errorf("failed to write cid field t.ParentMessageReceipts: %w", err) + } + + // t.Messages (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Messages); err != nil { + return xerrors.Errorf("failed to write cid field t.Messages: %w", err) + } + + // t.BLSAggregate (crypto.Signature) (struct) + if err := t.BLSAggregate.MarshalCBOR(cw); err != nil { + return err + } + + // t.Timestamp (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + + // t.BlockSig (crypto.Signature) (struct) + if err := t.BlockSig.MarshalCBOR(cw); err != nil { + return err + } + + // t.ForkSignaling (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ForkSignaling)); err != nil { + return err + } + + // t.ParentBaseFee (big.Int) (struct) + if err := t.ParentBaseFee.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { + *t = BlockHeader{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 16 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Ticket (types.Ticket) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ticket = new(Ticket) + if err := t.Ticket.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ticket pointer: %w", err) + } + } + + } + // t.ElectionProof (types.ElectionProof) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.ElectionProof = new(ElectionProof) + if err := t.ElectionProof.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ElectionProof pointer: %w", err) + } + } + + } + // t.BeaconEntries ([]types.BeaconEntry) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BeaconEntries: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BeaconEntries = make([]BeaconEntry, extra) + } + + for i := 0; i < int(extra); i++ { + + var v BeaconEntry + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.BeaconEntries[i] = v + } + + // t.WinPoStProof ([]proof.PoStProof) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.WinPoStProof: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.WinPoStProof = make([]proof.PoStProof, extra) + } + + for i := 0; i < int(extra); i++ { + + var v proof.PoStProof + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.WinPoStProof[i] = v + } + + // t.Parents ([]cid.Cid) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Parents: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Parents = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("reading cid field t.Parents failed: %w", err) + } + t.Parents[i] = c + } + + // t.ParentWeight (big.Int) (struct) + + { + + if err := t.ParentWeight.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ParentWeight: %w", err) + } + + } + // t.Height (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Height = abi.ChainEpoch(extraI) + } + // t.ParentStateRoot (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ParentStateRoot: %w", err) + } + + t.ParentStateRoot = c + + } + // t.ParentMessageReceipts (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ParentMessageReceipts: %w", err) + } + + t.ParentMessageReceipts = c + + } + // t.Messages (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Messages: %w", err) + } + + t.Messages = c + + } + // t.BLSAggregate (crypto.Signature) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.BLSAggregate = new(crypto.Signature) + if err := t.BLSAggregate.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.BLSAggregate pointer: %w", err) + } + } + + } + // t.Timestamp (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Timestamp = uint64(extra) + + } + // t.BlockSig (crypto.Signature) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.BlockSig = new(crypto.Signature) + if err := t.BlockSig.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.BlockSig pointer: %w", err) + } + } + + } + // t.ForkSignaling (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ForkSignaling = uint64(extra) + + } + // t.ParentBaseFee (big.Int) (struct) + + { + + if err := t.ParentBaseFee.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ParentBaseFee: %w", err) + } + + } + return nil +} + +var lengthBufTicket = []byte{129} + +func (t *Ticket) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufTicket); err != nil { + return err + } + + // t.VRFProof (types.VRFPi) (slice) + if len(t.VRFProof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.VRFProof was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.VRFProof))); err != nil { + return err + } + + if _, err := cw.Write(t.VRFProof[:]); err != nil { + return err + } + return nil +} + +func (t *Ticket) UnmarshalCBOR(r io.Reader) (err error) { + *t = Ticket{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.VRFProof (types.VRFPi) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.VRFProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + return err + } + return nil +} + +var lengthBufElectionProof = []byte{130} + +func (t *ElectionProof) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufElectionProof); err != nil { + return err + } + + // t.WinCount (int64) (int64) + if t.WinCount >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.WinCount)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.WinCount-1)); err != nil { + return err + } + } + + // t.VRFProof (types.VRFPi) (slice) + if len(t.VRFProof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.VRFProof was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.VRFProof))); err != nil { + return err + } + + if _, err := cw.Write(t.VRFProof[:]); err != nil { + return err + } + return nil +} + +func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { + *t = ElectionProof{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.WinCount (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.WinCount = int64(extraI) + } + // t.VRFProof (types.VRFPi) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.VRFProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + return err + } + return nil +} + +var lengthBufBeaconEntry = []byte{130} + +func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBeaconEntry); err != nil { + return err + } + + // t.Round (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Round)); err != nil { + return err + } + + // t.Data ([]uint8) (slice) + if len(t.Data) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Data was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Data))); err != nil { + return err + } + + if _, err := cw.Write(t.Data[:]); err != nil { + return err + } + return nil +} + +func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) (err error) { + *t = BeaconEntry{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Round (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Round = uint64(extra) + + } + // t.Data ([]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Data: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Data = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + return err + } + return nil +} + +var lengthBufSignedMessage = []byte{130} + +func (t *SignedMessage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufSignedMessage); err != nil { + return err + } + + // t.Message (internal.Message) (struct) + if err := t.Message.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedMessage) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedMessage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Message (internal.Message) (struct) + + { + + if err := t.Message.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Message: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + return nil +} + +var lengthBufMessageRoot = []byte{130} + +func (t *MessageRoot) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMessageRoot); err != nil { + return err + } + + // t.BlsRoot (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.BlsRoot); err != nil { + return xerrors.Errorf("failed to write cid field t.BlsRoot: %w", err) + } + + // t.SecpkRoot (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.SecpkRoot); err != nil { + return xerrors.Errorf("failed to write cid field t.SecpkRoot: %w", err) + } + + return nil +} + +func (t *MessageRoot) UnmarshalCBOR(r io.Reader) (err error) { + *t = MessageRoot{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.BlsRoot (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.BlsRoot: %w", err) + } + + t.BlsRoot = c + + } + // t.SecpkRoot (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.SecpkRoot: %w", err) + } + + t.SecpkRoot = c + + } + return nil +} + +var lengthBufMessageReceipt = []byte{131} + +func (t *MessageReceipt) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMessageReceipt); err != nil { + return err + } + + // t.ExitCode (exitcode.ExitCode) (int64) + if t.ExitCode >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil { + return err + } + } + + // t.Return ([]uint8) (slice) + if len(t.Return) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Return was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Return))); err != nil { + return err + } + + if _, err := cw.Write(t.Return[:]); err != nil { + return err + } + + // t.GasUsed (int64) (int64) + if t.GasUsed >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.GasUsed)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.GasUsed-1)); err != nil { + return err + } + } + return nil +} + +func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) (err error) { + *t = MessageReceipt{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ExitCode (exitcode.ExitCode) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ExitCode = exitcode.ExitCode(extraI) + } + // t.Return ([]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Return: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Return = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + return err + } + // t.GasUsed (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.GasUsed = int64(extraI) + } + return nil +} + +var lengthBufBlockMsg = []byte{131} + +func (t *BlockMsg) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBlockMsg); err != nil { + return err + } + + // t.Header (types.BlockHeader) (struct) + if err := t.Header.MarshalCBOR(cw); err != nil { + return err + } + + // t.BlsMessages ([]cid.Cid) (slice) + if len(t.BlsMessages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.BlsMessages was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.BlsMessages))); err != nil { + return err + } + for _, v := range t.BlsMessages { + if err := cbg.WriteCid(w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.BlsMessages: %w", err) + } + } + + // t.SecpkMessages ([]cid.Cid) (slice) + if len(t.SecpkMessages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.SecpkMessages was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.SecpkMessages))); err != nil { + return err + } + for _, v := range t.SecpkMessages { + if err := cbg.WriteCid(w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.SecpkMessages: %w", err) + } + } + return nil +} + +func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { + *t = BlockMsg{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Header (types.BlockHeader) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Header = new(BlockHeader) + if err := t.Header.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Header pointer: %w", err) + } + } + + } + // t.BlsMessages ([]cid.Cid) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BlsMessages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BlsMessages = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("reading cid field t.BlsMessages failed: %w", err) + } + t.BlsMessages[i] = c + } + + // t.SecpkMessages ([]cid.Cid) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.SecpkMessages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.SecpkMessages = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("reading cid field t.SecpkMessages failed: %w", err) + } + t.SecpkMessages[i] = c + } + + return nil +} + +var lengthBufExpTipSet = []byte{131} + +func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufExpTipSet); err != nil { + return err + } + + // t.Cids ([]cid.Cid) (slice) + if len(t.Cids) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Cids was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Cids))); err != nil { + return err + } + for _, v := range t.Cids { + if err := cbg.WriteCid(w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Cids: %w", err) + } + } + + // t.Blocks ([]*types.BlockHeader) (slice) + if len(t.Blocks) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Blocks was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Blocks))); err != nil { + return err + } + for _, v := range t.Blocks { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.Height (abi.ChainEpoch) (int64) + if t.Height >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Height)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil { + return err + } + } + return nil +} + +func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { + *t = ExpTipSet{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Cids ([]cid.Cid) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Cids: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Cids = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("reading cid field t.Cids failed: %w", err) + } + t.Cids[i] = c + } + + // t.Blocks ([]*types.BlockHeader) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Blocks: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Blocks = make([]*BlockHeader, extra) + } + + for i := 0; i < int(extra); i++ { + + var v BlockHeader + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Blocks[i] = &v + } + + // t.Height (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Height = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufPaymentInfo = []byte{131} + +func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPaymentInfo); err != nil { + return err + } + + // t.Channel (address.Address) (struct) + if err := t.Channel.MarshalCBOR(cw); err != nil { + return err + } + + // t.WaitSentinel (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err) + } + + // t.Vouchers ([]*paych.SignedVoucher) (slice) + if len(t.Vouchers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Vouchers was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Vouchers))); err != nil { + return err + } + for _, v := range t.Vouchers { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PaymentInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Channel (address.Address) (struct) + + { + + if err := t.Channel.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Channel: %w", err) + } + + } + // t.WaitSentinel (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err) + } + + t.WaitSentinel = c + + } + // t.Vouchers ([]*paych.SignedVoucher) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Vouchers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Vouchers = make([]*paych.SignedVoucher, extra) + } + + for i := 0; i < int(extra); i++ { + + var v paych.SignedVoucher + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Vouchers[i] = &v + } + + return nil +} diff --git a/venus-shared/types/chain_info.go b/venus-shared/types/chain_info.go new file mode 100644 index 0000000000..6ab4ddd3b0 --- /dev/null +++ b/venus-shared/types/chain_info.go @@ -0,0 +1,31 @@ +package types + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// ChainInfo is used to track metadata about a peer and its chain. +type ChainInfo struct { + // The originator of the TipSetKey propagation wave. + Source peer.ID + // The peer that sent us the TipSetKey message. + Sender peer.ID + Head *TipSet +} + +// NewChainInfo creates a chain info from a peer id a head tipset key and a +// chain height. +func NewChainInfo(source peer.ID, sender peer.ID, head *TipSet) *ChainInfo { + return &ChainInfo{ + Source: source, + Sender: sender, + Head: head, + } +} + +// String returns a human-readable string representation of a chain info +func (i *ChainInfo) String() string { + return fmt.Sprintf("{source=%s sender:%s height=%d head=%s}", i.Source, i.Sender, i.Head.Height(), i.Head.Key()) +} diff --git a/venus-shared/types/checkstatuscode_string.go b/venus-shared/types/checkstatuscode_string.go new file mode 100644 index 0000000000..7cfc3785c9 --- /dev/null +++ b/venus-shared/types/checkstatuscode_string.go @@ -0,0 +1,35 @@ +// Code generated by "stringer -type=CheckStatusCode -trimprefix=CheckStatus"; DO NOT EDIT. + +package types + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CheckStatusMessageSerialize-1] + _ = x[CheckStatusMessageSize-2] + _ = x[CheckStatusMessageValidity-3] + _ = x[CheckStatusMessageMinGas-4] + _ = x[CheckStatusMessageMinBaseFee-5] + _ = x[CheckStatusMessageBaseFee-6] + _ = x[CheckStatusMessageBaseFeeLowerBound-7] + _ = x[CheckStatusMessageBaseFeeUpperBound-8] + _ = x[CheckStatusMessageGetStateNonce-9] + _ = x[CheckStatusMessageNonce-10] + _ = x[CheckStatusMessageGetStateBalance-11] + _ = x[CheckStatusMessageBalance-12] +} + +const _CheckStatusCode_name = "MessageSerializeMessageSizeMessageValidityMessageMinGasMessageMinBaseFeeMessageBaseFeeMessageBaseFeeLowerBoundMessageBaseFeeUpperBoundMessageGetStateNonceMessageNonceMessageGetStateBalanceMessageBalance" + +var _CheckStatusCode_index = [...]uint8{0, 16, 27, 42, 55, 72, 86, 110, 134, 154, 166, 188, 202} + +func (i CheckStatusCode) String() string { + i -= 1 + if i < 0 || i >= CheckStatusCode(len(_CheckStatusCode_index)-1) { + return "CheckStatusCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _CheckStatusCode_name[_CheckStatusCode_index[i]:_CheckStatusCode_index[i+1]] +} diff --git a/venus-shared/types/complex_provider.go b/venus-shared/types/complex_provider.go new file mode 100644 index 0000000000..a328623584 --- /dev/null +++ b/venus-shared/types/complex_provider.go @@ -0,0 +1,77 @@ +package types + +import ( + "math" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/filecoin-project/venus/venus-shared/types/params" + "github.com/ipfs/go-cid" +) + +func init() { + testutil.MustRegisterDefaultValueProvier(TipsetProvider()) + testutil.MustRegisterDefaultValueProvier(MessageProvider()) +} + +func TipsetProvider() func(*testing.T) *TipSet { + const ( + minBlkNumInTipset = 1 + maxBlkNumInTipset = 5 + ) + + return func(t *testing.T) *TipSet { + var ( + blkNum, parentNum int + parentWeight big.Int + epoch abi.ChainEpoch + blocks []*BlockHeader + parents []cid.Cid + ) + + testutil.Provide(t, &parentNum, testutil.IntRangedProvider(minBlkNumInTipset, maxBlkNumInTipset)) + testutil.Provide(t, &parents, testutil.WithSliceLen(parentNum)) + + testutil.Provide(t, &blkNum, testutil.IntRangedProvider(minBlkNumInTipset+1, maxBlkNumInTipset)) + testutil.Provide(t, &blocks, testutil.WithSliceLen(blkNum), + // blocks in one tipset must be with the same parents. + func(t *testing.T) []cid.Cid { + return parents + }) + + testutil.Provide(t, &epoch, testutil.IntRangedProvider(0, math.MaxUint32)) + testutil.Provide(t, &parentWeight, testutil.PositiveBigProvider()) + + // ensure that random assignments won't break the validation + for _, blk := range blocks { + blk.Height = epoch + blk.ParentWeight.Set(parentWeight.Int) + } + + tipset, err := NewTipSet(blocks) + if err != nil { + t.Fatalf("create new tipset failed: %s", err.Error()) + } + + return tipset + } +} + +func MessageProvider() func(t *testing.T) *Message { + return func(t *testing.T) *Message { + var msg Message + testutil.Provide(t, &msg, + testutil.IntRangedProvider(0, params.BlockGasLimit), + func(t *testing.T) big.Int { + ip := testutil.IntRangedProvider(0, int(params.FilBase)) + return FromFil(uint64(ip(t))) + }, + ) + // ensure that random assignments won't break the validation + msg.Version = 0 + msg.GasPremium = msg.GasFeeCap + return &msg + } +} diff --git a/venus-shared/types/complex_provider_test.go b/venus-shared/types/complex_provider_test.go new file mode 100644 index 0000000000..a5636b94dc --- /dev/null +++ b/venus-shared/types/complex_provider_test.go @@ -0,0 +1,43 @@ +package types + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/stretchr/testify/require" +) + +func TestTipsetProvider(t *testing.T) { + tf.UnitTest(t) + tipset := &TipSet{} + testutil.Provide(t, &tipset) + require.Greater(t, len(tipset.blocks), 0, "blocks in a tipset must greater than 0") +} + +func TestMessageProvider(t *testing.T) { + tf.UnitTest(t) + var message *Message + testutil.Provide(t, &message) + require.NotEqual(t, message.Cid().String(), "", "message cid can't be empty") +} + +func TestBlockProvider(t *testing.T) { + tf.UnitTest(t) + var block *BlockHeader + testutil.Provide(t, &block) + require.NotNil(t, block, "block must not be nil") +} + +func TestComplexProvider(t *testing.T) { + tf.UnitTest(t) + + tests := map[string]func(*testing.T){ + "Tipset": TestTipsetProvider, + "Message": TestMessageProvider, + "Block": TestBlockProvider, + } + for testName, f := range tests { + t.Run(testName, f) + } +} diff --git a/venus-shared/types/election_proof.go b/venus-shared/types/election_proof.go new file mode 100644 index 0000000000..dfa3c55374 --- /dev/null +++ b/venus-shared/types/election_proof.go @@ -0,0 +1,250 @@ +package types + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/venus/venus-shared/types/params" +) + +// A Ticket is a marker of a tick of the blockchain's clock. It is the source +// of randomness for proofs of storage and leader election. It is generated +// by the miner of a newBlock using a VRF. +type Ticket struct { + // A proof output by running a VRF on the VRFProof of the parent ticket + VRFProof VRFPi +} + +// String returns the string representation of the VRFProof of the ticket +func (t Ticket) String() string { + return fmt.Sprintf("%x", t.VRFProof) +} + +func (t *Ticket) Compare(o *Ticket) int { + tDigest := t.VRFProof.Digest() + oDigest := o.VRFProof.Digest() + return bytes.Compare(tDigest[:], oDigest[:]) +} + +func (t *Ticket) Less(o *Ticket) bool { + return t.Compare(o) < 0 +} + +func (t *Ticket) Quality() float64 { + ticketHash := blake2b.Sum256(t.VRFProof) + ticketNum := BigFromBytes(ticketHash[:]).Int + ticketDenu := big.NewInt(1) + ticketDenu.Lsh(ticketDenu, 256) + tv, _ := new(big.Rat).SetFrac(ticketNum, ticketDenu).Float64() + tq := 1 - tv + return tq +} + +// VRFPi is the proof output from running a VRF. +type VRFPi []byte + +// Digest returns the digest (hash) of a proof, for use generating challenges etc. +func (p VRFPi) Digest() [32]byte { + proofDigest := blake2b.Sum256(p) + return proofDigest +} + +type ElectionProof struct { + WinCount int64 + + // A proof output by running a VRF on the VRFProof of the parent ticket + VRFProof VRFPi +} + +const precision = 256 + +var ( + expNumCoef []*big.Int + expDenoCoef []*big.Int +) + +func init() { + parse := func(coefs []string) []*big.Int { + out := make([]*big.Int, len(coefs)) + for i, coef := range coefs { + c, ok := new(big.Int).SetString(coef, 10) + if !ok { + panic("could not parse exp paramemter") + } + // << 256 (Q.0 to Q.256), >> 128 to transform integer params to coefficients + c = c.Lsh(c, precision-128) + out[i] = c + } + return out + } + + // parameters are in integer format, + // coefficients are *2^-128 of that + num := []string{ + "-648770010757830093818553637600", + "67469480939593786226847644286976", + "-3197587544499098424029388939001856", + "89244641121992890118377641805348864", + "-1579656163641440567800982336819953664", + "17685496037279256458459817590917169152", + "-115682590513835356866803355398940131328", + "340282366920938463463374607431768211456", + } + expNumCoef = parse(num) + + deno := []string{ + "1225524182432722209606361", + "114095592300906098243859450", + "5665570424063336070530214243", + "194450132448609991765137938448", + "5068267641632683791026134915072", + "104716890604972796896895427629056", + "1748338658439454459487681798864896", + "23704654329841312470660182937960448", + "259380097567996910282699886670381056", + "2250336698853390384720606936038375424", + "14978272436876548034486263159246028800", + "72144088983913131323343765784380833792", + "224599776407103106596571252037123047424", + "340282366920938463463374607431768211456", + } + expDenoCoef = parse(deno) +} + +// expneg accepts x in Q.256 format and computes e^-x. +// It is most precise within [0, 1.725) range, where error is less than 3.4e-30. +// Over the [0, 5) range its error is less than 4.6e-15. +// Output is in Q.256 format. +func expneg(x *big.Int) *big.Int { + // exp is approximated by rational function + // polynomials of the rational function are evaluated using Horner's method + num := polyval(expNumCoef, x) // Q.256 + deno := polyval(expDenoCoef, x) // Q.256 + + num = num.Lsh(num, precision) // Q.512 + return num.Div(num, deno) // Q.512 / Q.256 => Q.256 +} + +// polyval evaluates a polynomial given by coefficients `p` in Q.256 format +// at point `x` in Q.256 format. Output is in Q.256. +// Coefficients should be ordered from the highest order coefficient to the lowest. +func polyval(p []*big.Int, x *big.Int) *big.Int { + // evaluation using Horner's method + res := new(big.Int).Set(p[0]) // Q.256 + tmp := new(big.Int) // big.Int.Mul doesn't like when input is reused as output + for _, c := range p[1:] { + tmp = tmp.Mul(res, x) // Q.256 * Q.256 => Q.512 + res = res.Rsh(tmp, precision) // Q.512 >> 256 => Q.256 + res = res.Add(res, c) + } + + return res +} + +// computes lambda in Q.256 +func lambda(power, totalPower *big.Int) *big.Int { + lam := new(big.Int).Mul(power, blocksPerEpochBig) // Q.0 + lam = lam.Lsh(lam, precision) // Q.256 + lam = lam.Div(lam /* Q.256 */, totalPower /* Q.0 */) // Q.256 + return lam +} + +type poiss struct { + lam *big.Int + pmf *big.Int + icdf *big.Int + + tmp *big.Int // temporary variable for optimization + + k uint64 +} + +// newPoiss starts poisson inverted CDF +// lambda is in Q.256 format +// returns (instance, `1-poisscdf(0, lambda)`) +// CDF value returend is reused when calling `next` +func newPoiss(lambda *big.Int) (*poiss, *big.Int) { + // pmf(k) = (lambda^k)*(e^lambda) / k! + // k = 0 here, so it simplifies to just e^-lambda + elam := expneg(lambda) // Q.256 + pmf := new(big.Int).Set(elam) + + // icdf(k) = 1 - ∑ᵏᵢ₌₀ pmf(i) + // icdf(0) = 1 - pmf(0) + icdf := big.NewInt(1) + icdf = icdf.Lsh(icdf, precision) // Q.256 + icdf = icdf.Sub(icdf, pmf) // Q.256 + + k := uint64(0) + + p := &poiss{ + lam: lambda, + pmf: pmf, + + tmp: elam, + icdf: icdf, + + k: k, + } + + return p, icdf +} + +// next computes `k++, 1-poisscdf(k, lam)` +// return is in Q.256 format +func (p *poiss) next() *big.Int { + // incrementally compute next pmf and icdf + + // pmf(k) = (lambda^k)*(e^lambda) / k! + // so pmf(k) = pmf(k-1) * lambda / k + p.k++ + p.tmp.SetUint64(p.k) // Q.0 + + // calculate pmf for k + p.pmf = p.pmf.Div(p.pmf, p.tmp) // Q.256 / Q.0 => Q.256 + // we are using `tmp` as target for multiplication as using an input as output + // for Int.Mul causes allocations + p.tmp = p.tmp.Mul(p.pmf, p.lam) // Q.256 * Q.256 => Q.512 + p.pmf = p.pmf.Rsh(p.tmp, precision) // Q.512 >> 256 => Q.256 + + // calculate output + // icdf(k) = icdf(k-1) - pmf(k) + p.icdf = p.icdf.Sub(p.icdf, p.pmf) // Q.256 + return p.icdf +} + +// ComputeWinCount uses VRFProof to compute number of wins +// The algorithm is based on Algorand's Sortition with Binomial distribution +// replaced by Poisson distribution. +func (ep *ElectionProof) ComputeWinCount(power abi.StoragePower, totalPower abi.StoragePower) int64 { + h := blake2b.Sum256(ep.VRFProof) + + lhs := big.NewInt(0).SetBytes(h[:]) // 256bits, assume Q.256 so [0, 1) + + // We are calculating upside-down CDF of Poisson distribution with + // rate λ=power*E/totalPower + // Steps: + // 1. calculate λ=power*E/totalPower + // 2. calculate elam = exp(-λ) + // 3. Check how many times we win: + // j = 0 + // pmf = elam + // rhs = 1 - pmf + // for h(vrf) < rhs: j++; pmf = pmf * lam / j; rhs = rhs - pmf + + lam := lambda(power.Int, totalPower.Int) // Q.256 + + p, rhs := newPoiss(lam) + + var j int64 + for lhs.Cmp(rhs) < 0 && j < params.MaxWinCount { + rhs = p.next() + j++ + } + + return j +} diff --git a/venus-shared/types/election_proof_test.go b/venus-shared/types/election_proof_test.go new file mode 100644 index 0000000000..119bd84261 --- /dev/null +++ b/venus-shared/types/election_proof_test.go @@ -0,0 +1,117 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/filecoin-project/venus/venus-shared/types/params" +) + +func TestElectionProofBasic(t *testing.T) { + tf.UnitTest(t) + vrfLen := 32 + winCountMin := int64(3) + winCountMax := int64(10) + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst ElectionProof + + var power abi.StoragePower + testutil.Provide(t, &power, testutil.PositiveBigProvider()) + require.True(t, power.GreaterThan(big.Zero()), "positive storage power") + totalPower := BigMul(power, NewInt(7)) + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + }, + + ProvideOpts: []interface{}{ + testutil.BytesFixedProvider(vrfLen), + testutil.IntRangedProvider(int(winCountMin), int(winCountMax)), + }, + + Provided: func() { + require.NotEqual(t, src, dst, "src value provided") + require.Len(t, src.VRFProof, vrfLen, "vrf length") + require.GreaterOrEqual(t, src.WinCount, winCountMin, "win count min") + require.Less(t, src.WinCount, winCountMax, "win count max") + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + winCount := src.ComputeWinCount(power, totalPower) + require.GreaterOrEqual(t, winCount, int64(0), "win count >=0") + require.LessOrEqual(t, winCount, params.MaxWinCount, "win count <= MaxWinCount") + require.Equal(t, winCount, dst.ComputeWinCount(power, totalPower)) + + t1, t2 := Ticket{ + VRFProof: src.VRFProof, + }, Ticket{ + VRFProof: dst.VRFProof, + } + + require.Equal(t, t1, t2, "ticket") + + require.True(t, t1.Compare(&t2) == 0, "ticket equal") + require.Equal(t, t1.Quality(), t2.Quality()) + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} + +func TestTicketBasic(t *testing.T) { + tf.UnitTest(t) + vrfLen := 32 + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst, another Ticket + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + }, + + ProvideOpts: []interface{}{ + testutil.BytesFixedProvider(vrfLen), + }, + + Provided: func() { + require.NotEqual(t, src, dst, "src value provided") + require.Len(t, src.VRFProof, vrfLen, "vrf length") + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + + t1, t2 := Ticket{ + VRFProof: src.VRFProof, + }, Ticket{ + VRFProof: dst.VRFProof, + } + + require.Equal(t, t1, t2, "ticket") + + require.True(t, t1.Compare(&t2) == 0, "ticket equal") + require.Equal(t, t1.Quality(), t2.Quality()) + require.Equal(t, t1.String(), t2.String(), "ticket string") + + testutil.Provide(t, &another, testutil.BytesFixedProvider(vrfLen)) + require.Len(t, another.VRFProof, vrfLen, "vrf length") + require.True(t, src.Less(&another) == (src.Compare(&another) < 0)) + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/exec.go b/venus-shared/types/exec.go new file mode 100644 index 0000000000..c9ad82b823 --- /dev/null +++ b/venus-shared/types/exec.go @@ -0,0 +1,106 @@ +package types + +import ( + "encoding/json" + "fmt" + "regexp" + "runtime" + "strings" + "time" +) + +type ExecutionTrace struct { + Msg *Message + MsgRct *MessageReceipt + Error string + Duration time.Duration + GasCharges []*GasTrace + + Subcalls []ExecutionTrace +} + +type GasTrace struct { + Name string + + Location []Loc `json:"loc"` + TotalGas int64 `json:"tg"` + ComputeGas int64 `json:"cg"` + StorageGas int64 `json:"sg"` + TotalVirtualGas int64 `json:"vtg"` + VirtualComputeGas int64 `json:"vcg"` + VirtualStorageGas int64 `json:"vsg"` + + TimeTaken time.Duration `json:"tt"` + Extra interface{} `json:"ex,omitempty"` + + Callers []uintptr `json:"-"` +} + +type Loc struct { + File string + Line int + Function string +} + +// TODO: ??? +// func (l Loc) Show() bool { +// ignorePrefix := []string{ +// "reflect.", +// "github.com/filecoin-project/lotus/chain/vm.(*Invoker).transform", +// "github.com/filecoin-project/go-amt-ipld/", +// } +// for _, pre := range ignorePrefix { +// if strings.HasPrefix(l.Function, pre) { +// return false +// } +// } +// return true +// } + +func (l Loc) String() string { + file := strings.Split(l.File, "/") + + fn := strings.Split(l.Function, "/") + var fnpkg string + if len(fn) > 2 { + fnpkg = strings.Join(fn[len(fn)-2:], "/") + } else { + fnpkg = l.Function + } + + return fmt.Sprintf("%s@%s:%d", fnpkg, file[len(file)-1], l.Line) +} + +var importantRegex = regexp.MustCompile(`github.com/filecoin-project/specs-actors/(v\d+/)?actors/builtin`) + +func (l Loc) Important() bool { + return importantRegex.MatchString(l.Function) +} + +func (gt *GasTrace) MarshalJSON() ([]byte, error) { + type GasTraceCopy GasTrace + if len(gt.Location) == 0 { + if len(gt.Callers) != 0 { + frames := runtime.CallersFrames(gt.Callers) + for { + frame, more := frames.Next() + // TODO: this func name must be fixed + if frame.Function == "github.com/filecoin-project/lotus/chain/vm.(*LegacyVM).ApplyMessage" { + break + } + l := Loc{ + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + gt.Location = append(gt.Location, l) + if !more { + break + } + } + } + } + + cpy := (*GasTraceCopy)(gt) + return json.Marshal(cpy) +} diff --git a/venus-shared/types/full_block.go b/venus-shared/types/full_block.go new file mode 100644 index 0000000000..bb68723fa5 --- /dev/null +++ b/venus-shared/types/full_block.go @@ -0,0 +1,18 @@ +package types + +import ( + "github.com/ipfs/go-cid" +) + +// FullBlock carries a newBlock header and the message and receipt collections +// referenced from the header. +type FullBlock struct { + Header *BlockHeader + BLSMessages []*Message + SECPMessages []*SignedMessage +} + +// Cid returns the FullBlock's header's Cid +func (fb *FullBlock) Cid() cid.Cid { + return fb.Header.Cid() +} diff --git a/venus-shared/types/full_tipset.go b/venus-shared/types/full_tipset.go new file mode 100644 index 0000000000..9c031b3861 --- /dev/null +++ b/venus-shared/types/full_tipset.go @@ -0,0 +1,71 @@ +package types + +import ( + "github.com/ipfs/go-cid" +) + +// FullTipSet is an expanded version of the TipSet that contains all the blocks and messages +type FullTipSet struct { + Blocks []*FullBlock + tipset *TipSet + cids []cid.Cid +} + +func NewFullTipSet(blks []*FullBlock) *FullTipSet { + return &FullTipSet{ + Blocks: blks, + } +} + +func (fts *FullTipSet) Cids() []cid.Cid { + if fts.cids != nil { + return fts.cids + } + + var cids []cid.Cid + for _, b := range fts.Blocks { + cids = append(cids, b.Cid()) + } + fts.cids = cids + + return cids +} + +// TipSet returns a narrower view of this FullTipSet elliding the newBlock +// messages. +func (fts *FullTipSet) TipSet() *TipSet { + if fts.tipset != nil { + // FIXME: fts.tipset is actually never set. Should it memoize? + return fts.tipset + } + + var headers []*BlockHeader + for _, b := range fts.Blocks { + headers = append(headers, b.Header) + } + + ts, err := NewTipSet(headers) + if err != nil { + panic(err) + } + + return ts +} + +// Reverse reverses the order of the slice `chain`. +func ReverseFullBlock(chain []*FullTipSet) { + // https://github.com/golang/go/wiki/SliceTricks#reversing + for i := len(chain)/2 - 1; i >= 0; i-- { + opp := len(chain) - 1 - i + chain[i], chain[opp] = chain[opp], chain[i] + } +} + +// Reverse reverses the order of the slice `chain`. +func ReverseTipSet(chain []*TipSet) { + // https://github.com/golang/go/wiki/SliceTricks#reversing + for i := len(chain)/2 - 1; i >= 0; i-- { + opp := len(chain) - 1 - i + chain[i], chain[opp] = chain[opp], chain[i] + } +} diff --git a/venus-shared/types/gateway/common.go b/venus-shared/types/gateway/common.go new file mode 100644 index 0000000000..b7a8bc3a18 --- /dev/null +++ b/venus-shared/types/gateway/common.go @@ -0,0 +1,39 @@ +package gateway + +import ( + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type RequestEvent struct { + ID types.UUID `json:"Id"` + Method string + Payload []byte + CreateTime time.Time `json:"-"` + Result chan *ResponseEvent `json:"-"` +} + +type ResponseEvent struct { + ID types.UUID `json:"Id"` + Payload []byte + Error string +} + +type ConnectionStates struct { + Connections []*ConnectState + ConnectionCount int +} + +type ConnectState struct { + Addrs []address.Address + ChannelID types.UUID `json:"ChannelId"` + IP string `json:"Ip"` + RequestCount int + CreateTime time.Time +} + +type ConnectedCompleted struct { + ChannelId types.UUID // nolint +} diff --git a/venus-shared/types/gateway/market_event.go b/venus-shared/types/gateway/market_event.go new file mode 100644 index 0000000000..2e5aeecfa2 --- /dev/null +++ b/venus-shared/types/gateway/market_event.go @@ -0,0 +1,37 @@ +package gateway + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +type MarketRegisterPolicy struct { + Miner address.Address +} + +type IsUnsealRequest struct { + PieceCid cid.Cid + Sector storage.SectorRef + Offset types.PaddedByteIndex + Size abi.PaddedPieceSize +} + +type IsUnsealResponse struct{} + +type UnsealRequest struct { + PieceCid cid.Cid + Sector storage.SectorRef + Offset types.PaddedByteIndex + Size abi.PaddedPieceSize + Dest string +} + +type UnsealResponse struct{} + +type MarketConnectionState struct { + Addr address.Address + Conn ConnectionStates +} diff --git a/venus-shared/types/gateway/proof_event.go b/venus-shared/types/gateway/proof_event.go new file mode 100644 index 0000000000..f148294485 --- /dev/null +++ b/venus-shared/types/gateway/proof_event.go @@ -0,0 +1,25 @@ +package gateway + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/venus/venus-shared/actors/builtin" +) + +type MinerState struct { + Connections []*ConnectState + ConnectionCount int +} + +type ProofRegisterPolicy struct { + MinerAddress address.Address +} + +type ComputeProofRequest struct { + SectorInfos []builtin.ExtendedSectorInfo + Rand abi.PoStRandomness + Height abi.ChainEpoch + NWVersion network.Version +} diff --git a/venus-shared/types/gateway/wallet_event.go b/venus-shared/types/gateway/wallet_event.go new file mode 100644 index 0000000000..510abaf4da --- /dev/null +++ b/venus-shared/types/gateway/wallet_event.go @@ -0,0 +1,25 @@ +package gateway + +import ( + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type WalletDetail struct { + Account string + SupportAccounts []string + ConnectStates []ConnectState +} + +type WalletRegisterPolicy struct { + SupportAccounts []string + // a slice byte provide by wallet, using to verify address is really exist + SignBytes []byte +} + +type WalletSignRequest struct { + Signer address.Address + ToSign []byte + Meta types.MsgMeta +} diff --git a/venus-shared/types/key_info.go b/venus-shared/types/key_info.go new file mode 100644 index 0000000000..66cb10a89f --- /dev/null +++ b/venus-shared/types/key_info.go @@ -0,0 +1,92 @@ +package types + +import ( + "encoding/json" + "fmt" + "math" + + "github.com/filecoin-project/go-state-types/crypto" +) + +var ( + ErrKeyInfoNotFound = fmt.Errorf("key info not found") + ErrKeyExists = fmt.Errorf("key already exists") +) + +// KeyType defines a type of a key +type KeyType string + +func (kt *KeyType) UnmarshalJSON(bb []byte) error { + { + // first option, try unmarshaling as string + var s string + err := json.Unmarshal(bb, &s) + if err == nil { + *kt = KeyType(s) + return nil + } + } + + { + var b byte + err := json.Unmarshal(bb, &b) + if err != nil { + return fmt.Errorf("could not unmarshal KeyType either as string nor integer: %w", err) + } + bst := crypto.SigType(b) + + switch bst { + case crypto.SigTypeBLS: + *kt = KTBLS + case crypto.SigTypeSecp256k1: + *kt = KTSecp256k1 + default: + return fmt.Errorf("unknown sigtype: %d", bst) + } + return nil + } +} + +type SigType = crypto.SigType + +const ( + SigTypeUnknown = SigType(math.MaxUint8) + + SigTypeSecp256k1 = SigType(iota) + SigTypeBLS +) + +const ( + KTUnknown KeyType = "unknown" + KTBLS KeyType = "bls" + KTSecp256k1 KeyType = "secp256k1" + KTSecp256k1Ledger KeyType = "secp256k1-ledger" +) + +func KeyType2Sign(kt KeyType) SigType { + switch kt { + case KTSecp256k1: + return SigTypeSecp256k1 + case KTBLS: + return SigTypeBLS + default: + return SigTypeUnknown + } +} + +func SignType2Key(kt SigType) KeyType { + switch kt { + case SigTypeSecp256k1: + return KTSecp256k1 + case SigTypeBLS: + return KTBLS + default: + return KTUnknown + } +} + +// KeyInfo is used for storing keys in KeyStore +type KeyInfo struct { + Type KeyType + PrivateKey []byte +} diff --git a/venus-shared/types/market/assigner_type.go b/venus-shared/types/market/assigner_type.go new file mode 100644 index 0000000000..b5f122e8b1 --- /dev/null +++ b/venus-shared/types/market/assigner_type.go @@ -0,0 +1,75 @@ +package market + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type PieceStatus string + +const ( + Undefine PieceStatus = "Undefine" + Assigned PieceStatus = "Assigned" + Packing PieceStatus = "Packing" + Proving PieceStatus = "Proving" +) + +type DealInfo struct { + piecestore.DealInfo + types.ClientDealProposal + + TransferType string + Root cid.Cid + PublishCid cid.Cid + FastRetrieval bool + Status PieceStatus +} + +type GetDealSpec struct { + // max limit of deal count + MaxPiece int + + // max limit of date size in one single deal + MaxPieceSize uint64 + + // min limit of deal count + MinPiece int + + // min limit of data size in one single deal + MinPieceSize uint64 + + // min limit of total space used by deals + MinUsedSpace uint64 + + // start epoch limit of the chosen deals + // if set, the deals should not be activated before or equal than the this epoch + StartEpoch abi.ChainEpoch + + // end epoch limit of the chosen deals + // if set, the deals should not be alive after or equal than the this epoch + EndEpoch abi.ChainEpoch +} + +type DealInfoIncludePath struct { + types.DealProposal + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize + PayloadSize uint64 + DealID abi.DealID + TotalStorageFee abi.TokenAmount + FastRetrieval bool + PublishCid cid.Cid +} + +type PieceInfo struct { + PieceCID cid.Cid + Deals []*DealInfo +} + +type TimeStamp struct { + CreatedAt uint64 + UpdatedAt uint64 +} diff --git a/venus-shared/types/market/cbor_gen.go b/venus-shared/types/market/cbor_gen.go new file mode 100644 index 0000000000..1a11554f43 --- /dev/null +++ b/venus-shared/types/market/cbor_gen.go @@ -0,0 +1,1930 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package market + +import ( + "fmt" + "io" + "math" + "sort" + + address "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + filestore "github.com/filecoin-project/go-fil-markets/filestore" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" + abi "github.com/filecoin-project/go-state-types/abi" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufFundedAddressState = []byte{132} + +func (t *FundedAddressState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufFundedAddressState); err != nil { + return err + } + + // t.Addr (address.Address) (struct) + if err := t.Addr.MarshalCBOR(cw); err != nil { + return err + } + + // t.AmtReserved (big.Int) (struct) + if err := t.AmtReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.MsgCid (cid.Cid) (struct) + + if t.MsgCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.MsgCid); err != nil { + return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err) + } + } + + // t.TimeStamp (market.TimeStamp) (struct) + if err := t.TimeStamp.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) (err error) { + *t = FundedAddressState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Addr (address.Address) (struct) + + { + + if err := t.Addr.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Addr: %w", err) + } + + } + // t.AmtReserved (big.Int) (struct) + + { + + if err := t.AmtReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.AmtReserved: %w", err) + } + + } + // t.MsgCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err) + } + + t.MsgCid = &c + } + + } + // t.TimeStamp (market.TimeStamp) (struct) + + { + + if err := t.TimeStamp.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TimeStamp: %w", err) + } + + } + return nil +} + +var lengthBufMsgInfo = []byte{133} + +func (t *MsgInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMsgInfo); err != nil { + return err + } + + // t.ChannelID (string) (string) + if len(t.ChannelID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ChannelID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ChannelID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ChannelID)); err != nil { + return err + } + + // t.MsgCid (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.MsgCid); err != nil { + return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err) + } + + // t.Received (bool) (bool) + if err := cbg.WriteBool(w, t.Received); err != nil { + return err + } + + // t.Err (string) (string) + if len(t.Err) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Err was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Err))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Err)); err != nil { + return err + } + + // t.TimeStamp (market.TimeStamp) (struct) + if err := t.TimeStamp.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *MsgInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = MsgInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ChannelID (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ChannelID = string(sval) + } + // t.MsgCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err) + } + + t.MsgCid = c + + } + // t.Received (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Received = false + case 21: + t.Received = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Err (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Err = string(sval) + } + // t.TimeStamp (market.TimeStamp) (struct) + + { + + if err := t.TimeStamp.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TimeStamp: %w", err) + } + + } + return nil +} + +var lengthBufChannelInfo = []byte{143} + +func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufChannelInfo); err != nil { + return err + } + + // t.ChannelID (string) (string) + if len(t.ChannelID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ChannelID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ChannelID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ChannelID)); err != nil { + return err + } + + // t.Channel (address.Address) (struct) + if err := t.Channel.MarshalCBOR(cw); err != nil { + return err + } + + // t.Control (address.Address) (struct) + if err := t.Control.MarshalCBOR(cw); err != nil { + return err + } + + // t.Target (address.Address) (struct) + if err := t.Target.MarshalCBOR(cw); err != nil { + return err + } + + // t.Direction (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Direction)); err != nil { + return err + } + + // t.Vouchers ([]*market.VoucherInfo) (slice) + if len(t.Vouchers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Vouchers was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Vouchers))); err != nil { + return err + } + for _, v := range t.Vouchers { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.NextLane (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.NextLane)); err != nil { + return err + } + + // t.Amount (big.Int) (struct) + if err := t.Amount.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableAmount (big.Int) (struct) + if err := t.AvailableAmount.MarshalCBOR(cw); err != nil { + return err + } + + // t.PendingAvailableAmount (big.Int) (struct) + if err := t.PendingAvailableAmount.MarshalCBOR(cw); err != nil { + return err + } + + // t.PendingAmount (big.Int) (struct) + if err := t.PendingAmount.MarshalCBOR(cw); err != nil { + return err + } + + // t.CreateMsg (cid.Cid) (struct) + + if t.CreateMsg == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.CreateMsg); err != nil { + return xerrors.Errorf("failed to write cid field t.CreateMsg: %w", err) + } + } + + // t.AddFundsMsg (cid.Cid) (struct) + + if t.AddFundsMsg == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsMsg); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsMsg: %w", err) + } + } + + // t.Settling (bool) (bool) + if err := cbg.WriteBool(w, t.Settling); err != nil { + return err + } + + // t.TimeStamp (market.TimeStamp) (struct) + if err := t.TimeStamp.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = ChannelInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 15 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ChannelID (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ChannelID = string(sval) + } + // t.Channel (address.Address) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Channel = new(address.Address) + if err := t.Channel.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Channel pointer: %w", err) + } + } + + } + // t.Control (address.Address) (struct) + + { + + if err := t.Control.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Control: %w", err) + } + + } + // t.Target (address.Address) (struct) + + { + + if err := t.Target.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Target: %w", err) + } + + } + // t.Direction (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Direction = uint64(extra) + + } + // t.Vouchers ([]*market.VoucherInfo) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Vouchers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Vouchers = make([]*VoucherInfo, extra) + } + + for i := 0; i < int(extra); i++ { + + var v VoucherInfo + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Vouchers[i] = &v + } + + // t.NextLane (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.NextLane = uint64(extra) + + } + // t.Amount (big.Int) (struct) + + { + + if err := t.Amount.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Amount: %w", err) + } + + } + // t.AvailableAmount (big.Int) (struct) + + { + + if err := t.AvailableAmount.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.AvailableAmount: %w", err) + } + + } + // t.PendingAvailableAmount (big.Int) (struct) + + { + + if err := t.PendingAvailableAmount.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PendingAvailableAmount: %w", err) + } + + } + // t.PendingAmount (big.Int) (struct) + + { + + if err := t.PendingAmount.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PendingAmount: %w", err) + } + + } + // t.CreateMsg (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CreateMsg: %w", err) + } + + t.CreateMsg = &c + } + + } + // t.AddFundsMsg (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsMsg: %w", err) + } + + t.AddFundsMsg = &c + } + + } + // t.Settling (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Settling = false + case 21: + t.Settling = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.TimeStamp (market.TimeStamp) (struct) + + { + + if err := t.TimeStamp.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TimeStamp: %w", err) + } + + } + return nil +} + +var lengthBufVoucherInfo = []byte{131} + +func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufVoucherInfo); err != nil { + return err + } + + // t.Voucher (paych.SignedVoucher) (struct) + if err := t.Voucher.MarshalCBOR(cw); err != nil { + return err + } + + // t.Proof ([]uint8) (slice) + if len(t.Proof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Proof was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Proof))); err != nil { + return err + } + + if _, err := cw.Write(t.Proof[:]); err != nil { + return err + } + + // t.Submitted (bool) (bool) + if err := cbg.WriteBool(w, t.Submitted); err != nil { + return err + } + return nil +} + +func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = VoucherInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Voucher (paych.SignedVoucher) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Voucher = new(paych.SignedVoucher) + if err := t.Voucher.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Voucher pointer: %w", err) + } + } + + } + // t.Proof ([]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Proof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Proof = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Proof[:]); err != nil { + return err + } + // t.Submitted (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Submitted = false + case 21: + t.Submitted = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufMinerDeal = []byte{152, 24} + +func (t *MinerDeal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMinerDeal); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.Miner (peer.ID) (string) + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Client)); err != nil { + return err + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.PiecePath)); err != nil { + return err + } + + // t.PayloadSize (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PayloadSize)); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.MetadataPath)); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.FundsReserved (big.Int) (struct) + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.Ref (storagemarket.DataRef) (struct) + if err := t.Ref.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.TransferChannelID (datatransfer.ChannelID) (struct) + if err := t.TransferChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.Offset (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.PieceStatus (market.PieceStatus) (string) + if len(t.PieceStatus) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PieceStatus was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PieceStatus))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.PieceStatus)); err != nil { + return err + } + + // t.InboundCAR (string) (string) + if len(t.InboundCAR) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.InboundCAR was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.InboundCAR))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.InboundCAR)); err != nil { + return err + } + + // t.TimeStamp (market.TimeStamp) (struct) + if err := t.TimeStamp.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *MinerDeal) UnmarshalCBOR(r io.Reader) (err error) { + *t = MinerDeal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 24 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.Miner (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.Client (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.PiecePath (filestore.Path) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.PayloadSize (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PayloadSize = uint64(extra) + + } + // t.MetadataPath (filestore.Path) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.FundsReserved (big.Int) (struct) + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.Ref (storagemarket.DataRef) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ref = new(storagemarket.DataRef) + if err := t.Ref.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.AvailableForRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.CreationTime (typegen.CborTime) (struct) + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.TransferChannelID (datatransfer.ChannelID) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.TransferChannelID = new(datatransfer.ChannelID) + if err := t.TransferChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelID pointer: %w", err) + } + } + + } + // t.SectorNumber (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.Offset (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.PieceStatus (market.PieceStatus) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PieceStatus = PieceStatus(sval) + } + // t.InboundCAR (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.InboundCAR = string(sval) + } + // t.TimeStamp (market.TimeStamp) (struct) + + { + + if err := t.TimeStamp.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TimeStamp: %w", err) + } + + } + return nil +} + +var lengthBufRetrievalAsk = []byte{134} + +func (t *RetrievalAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufRetrievalAsk); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + + // t.PricePerByte (big.Int) (struct) + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + // t.TimeStamp (market.TimeStamp) (struct) + if err := t.TimeStamp.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *RetrievalAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = RetrievalAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 6 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.PricePerByte (big.Int) (struct) + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.UnsealPrice (big.Int) (struct) + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + // t.TimeStamp (market.TimeStamp) (struct) + + { + + if err := t.TimeStamp.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TimeStamp: %w", err) + } + + } + return nil +} + +var lengthBufProviderDealState = []byte{140} + +func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufProviderDealState); err != nil { + return err + } + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.SelStorageProposalCid (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.SelStorageProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.SelStorageProposalCid: %w", err) + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Receiver)); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if err := t.FundsReceived.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + + // t.TimeStamp (market.TimeStamp) (struct) + if err := t.TimeStamp.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 12 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = uint64(extra) + + } + // t.SelStorageProposalCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.SelStorageProposalCid: %w", err) + } + + t.SelStorageProposalCid = c + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.ChannelID = new(datatransfer.ChannelID) + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = retrievalmarket.DealStatus(extra) + + } + // t.Receiver (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.TotalSent (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.FundsReceived (big.Int) (struct) + + { + + if err := t.FundsReceived.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.CurrentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.LegacyProtocol (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.TimeStamp (market.TimeStamp) (struct) + + { + + if err := t.TimeStamp.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TimeStamp: %w", err) + } + + } + return nil +} + +var lengthBufTimeStamp = []byte{130} + +func (t *TimeStamp) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufTimeStamp); err != nil { + return err + } + + // t.CreatedAt (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CreatedAt)); err != nil { + return err + } + + // t.UpdatedAt (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.UpdatedAt)); err != nil { + return err + } + + return nil +} + +func (t *TimeStamp) UnmarshalCBOR(r io.Reader) (err error) { + *t = TimeStamp{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.CreatedAt (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CreatedAt = uint64(extra) + + } + // t.UpdatedAt (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.UpdatedAt = uint64(extra) + + } + return nil +} + +var lengthBufSignedStorageAsk = []byte{131} + +func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufSignedStorageAsk); err != nil { + return err + } + + // t.Ask (storagemarket.StorageAsk) (struct) + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + + // t.TimeStamp (market.TimeStamp) (struct) + if err := t.TimeStamp.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedStorageAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedStorageAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Ask (storagemarket.StorageAsk) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(storagemarket.StorageAsk) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + // t.TimeStamp (market.TimeStamp) (struct) + + { + + if err := t.TimeStamp.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TimeStamp: %w", err) + } + + } + return nil +} diff --git a/venus-shared/types/market/client/data.go b/venus-shared/types/market/client/data.go new file mode 100644 index 0000000000..3a38e2d2f1 --- /dev/null +++ b/venus-shared/types/market/client/data.go @@ -0,0 +1,98 @@ +package client + +import ( + "fmt" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" +) + +// DataSelector specifies ipld selector string +// - if the string starts with '{', it's interpreted as json selector string +// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/ +// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path) +// see https://github.com/ipld/go-ipld-selector-text-lite +type DataSelector string + +type FileRef struct { + Path string + IsCAR bool +} + +type ImportID uint64 + +func (id ImportID) DsKey() datastore.Key { + return datastore.NewKey(fmt.Sprintf("%d", id)) +} + +type ImportRes struct { + Root cid.Cid + ImportID ImportID +} + +type Import struct { + Key ImportID + Err string + + Root *cid.Cid + + // Source is the provenance of the import, e.g. "import", "unknown", else. + // Currently useless but may be used in the future. + Source string + + // FilePath is the path of the original file. It is important that the file + // is retained at this path, because it will be referenced during + // the transfer (when we do the UnixFS chunking, we don't duplicate the + // leaves, but rather point to chunks of the original data through + // positional references). + FilePath string + + // CARPath is the path of the CAR file containing the DAG for this import. + CARPath string +} + +type ExportRef struct { + Root cid.Cid + + // DAGs array specifies a list of DAGs to export + // - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node + // - If exporting into a car file + // - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root + // - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car + // - When not specified defaults to a single DAG: + // - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}` + DAGs []DagSpec + + FromLocalCAR string // if specified, get data from a local CARv2 file. + DealID retrievalmarket.DealID +} + +type DagSpec struct { + // DataSelector matches data to be retrieved + // - when using textselector, the path specifies subtree + // - the matched graph must have a single root + DataSelector *DataSelector + + // ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector + // When true, in addition to the selection target, the resulting CAR will contain every block along the + // path back to, and including the original root + // When false the resulting CAR contains only the blocks of the target subdag + ExportMerkleProof bool +} + +type CommPRet struct { + Root cid.Cid + Size abi.UnpaddedPieceSize +} + +type DataSize struct { + PayloadSize int64 + PieceSize abi.PaddedPieceSize +} +type DataCIDSize struct { + PayloadSize int64 + PieceSize abi.PaddedPieceSize + PieceCID cid.Cid +} diff --git a/venus-shared/types/market/client/deal.go b/venus-shared/types/market/client/deal.go new file mode 100644 index 0000000000..b85663caf9 --- /dev/null +++ b/venus-shared/types/market/client/deal.go @@ -0,0 +1,49 @@ +package client + +import ( + "time" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/types/market" +) + +type StartDealParams struct { + Data *storagemarket.DataRef + Wallet address.Address + Miner address.Address + EpochPrice types.BigInt + MinBlocksDuration uint64 + ProviderCollateral types.BigInt + DealStartEpoch abi.ChainEpoch + FastRetrieval bool + VerifiedDeal bool +} + +type DealInfo struct { + ProposalCid cid.Cid + State storagemarket.StorageDealStatus + Message string // more information about deal state, particularly errors + DealStages *storagemarket.DealStages + Provider address.Address + + DataRef *storagemarket.DataRef + PieceCID cid.Cid + Size uint64 + + PricePerEpoch types.BigInt + Duration uint64 + + DealID abi.DealID + + CreationTime time.Time + Verified bool + + TransferChannelID *datatransfer.ChannelID + DataTransfer *market.DataTransferChannel +} diff --git a/venus-shared/types/market/client/query.go b/venus-shared/types/market/client/query.go new file mode 100644 index 0000000000..3351d07b9a --- /dev/null +++ b/venus-shared/types/market/client/query.go @@ -0,0 +1,42 @@ +package client + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type QueryOffer struct { + Err string + + Root cid.Cid + Piece *cid.Cid + + Size uint64 + MinPrice types.BigInt + UnsealPrice types.BigInt + PricePerByte abi.TokenAmount + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + Miner address.Address + MinerPeer retrievalmarket.RetrievalPeer +} + +func (o *QueryOffer) Order(client address.Address) RetrievalOrder { + return RetrievalOrder{ + Root: o.Root, + Piece: o.Piece, + Size: o.Size, + Total: o.MinPrice, + UnsealPrice: o.UnsealPrice, + PaymentInterval: o.PaymentInterval, + PaymentIntervalIncrease: o.PaymentIntervalIncrease, + Client: client, + + Miner: o.Miner, + MinerPeer: &o.MinerPeer, + } +} diff --git a/venus-shared/types/market/client/retrieval_order.go b/venus-shared/types/market/client/retrieval_order.go new file mode 100644 index 0000000000..b526e751f8 --- /dev/null +++ b/venus-shared/types/market/client/retrieval_order.go @@ -0,0 +1,35 @@ +package client + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/google/uuid" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type RetrievalOrder struct { + // TODO: make this less unixfs specific + Root cid.Cid + Piece *cid.Cid + DataSelector *DataSelector + + Size uint64 + Total types.BigInt + + UnsealPrice types.BigInt + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + Client address.Address + Miner address.Address + MinerPeer *retrievalmarket.RetrievalPeer + + RemoteStore *RemoteStoreID `json:"RemoteStore,omitempty"` +} + +type RemoteStoreID = uuid.UUID + +type RestrievalRes struct { + DealID retrievalmarket.DealID +} diff --git a/venus-shared/types/market/client/retriveval_info.go b/venus-shared/types/market/client/retriveval_info.go new file mode 100644 index 0000000000..f1a6661014 --- /dev/null +++ b/venus-shared/types/market/client/retriveval_info.go @@ -0,0 +1,32 @@ +package client + +import ( + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/filecoin-project/venus/venus-shared/types/market" +) + +type RetrievalInfo struct { + PayloadCID cid.Cid + ID retrievalmarket.DealID + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + + Status retrievalmarket.DealStatus + Message string // more information about deal state, particularly errors + Provider peer.ID + BytesReceived uint64 + BytesPaidFor uint64 + TotalPaid abi.TokenAmount + + TransferChannelID *datatransfer.ChannelID + DataTransfer *market.DataTransferChannel + + // optional event if part of ClientGetRetrievalUpdates + Event *retrievalmarket.ClientEvent +} diff --git a/venus-shared/types/market/common.go b/venus-shared/types/market/common.go new file mode 100644 index 0000000000..f80e18cbc0 --- /dev/null +++ b/venus-shared/types/market/common.go @@ -0,0 +1,24 @@ +package market + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type SignInfo struct { + Data interface{} + Type types.MsgType + Addr address.Address +} + +type User struct { + Addr address.Address + Account string +} + +type MarketBalance struct { //nolint + Escrow big.Int + Locked big.Int +} diff --git a/venus-shared/types/market/dagstore.go b/venus-shared/types/market/dagstore.go new file mode 100644 index 0000000000..f0166e3059 --- /dev/null +++ b/venus-shared/types/market/dagstore.go @@ -0,0 +1,32 @@ +package market + +// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that +// we expose through JSON-RPC to avoid clients having to depend on the +// dagstore lib. +type DagstoreShardInfo struct { + Key string + State string + Error string +} + +// DagstoreShardResult enumerates results per shard. +type DagstoreShardResult struct { + Key string + Success bool + Error string +} + +type DagstoreInitializeAllParams struct { + MaxConcurrency int + IncludeSealed bool +} + +// DagstoreInitializeAllEvent represents an initialization event. +type DagstoreInitializeAllEvent struct { + Key string + Event string // "start", "end" + Success bool + Error string + Total int + Current int +} diff --git a/venus-shared/types/market/data_transfer.go b/venus-shared/types/market/data_transfer.go new file mode 100644 index 0000000000..ee65cd7e53 --- /dev/null +++ b/venus-shared/types/market/data_transfer.go @@ -0,0 +1,55 @@ +package market + +import ( + "encoding/json" + "fmt" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +type DataTransferChannel struct { + TransferID datatransfer.TransferID + Status datatransfer.Status + BaseCID cid.Cid + IsInitiator bool + IsSender bool + Voucher string + Message string + OtherPeer peer.ID + Transferred uint64 + Stages *datatransfer.ChannelStages +} + +// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id +func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel { + channel := DataTransferChannel{ + TransferID: channelState.TransferID(), + Status: channelState.Status(), + BaseCID: channelState.BaseCID(), + IsSender: channelState.Sender() == hostID, + Message: channelState.Message(), + } + stringer, ok := channelState.Voucher().(fmt.Stringer) + if ok { + channel.Voucher = stringer.String() + } else { + voucherJSON, err := json.Marshal(channelState.Voucher()) + if err != nil { + channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error() //nolint:stylecheck + } else { + channel.Voucher = string(voucherJSON) + } + } + if channel.IsSender { + channel.IsInitiator = !channelState.IsPull() + channel.Transferred = channelState.Sent() + channel.OtherPeer = channelState.Recipient() + } else { + channel.IsInitiator = channelState.IsPull() + channel.Transferred = channelState.Received() + channel.OtherPeer = channelState.Sender() + } + return channel +} diff --git a/venus-shared/types/market/exported.go b/venus-shared/types/market/exported.go new file mode 100644 index 0000000000..3dd7a4330d --- /dev/null +++ b/venus-shared/types/market/exported.go @@ -0,0 +1,12 @@ +package market + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" +) + +type ( + SectorSize = abi.SectorSize + MinerAddress = address.Address + MinerID = abi.ActorID +) diff --git a/venus-shared/types/market/funded_address_state.go b/venus-shared/types/market/funded_address_state.go new file mode 100644 index 0000000000..c46dec78a0 --- /dev/null +++ b/venus-shared/types/market/funded_address_state.go @@ -0,0 +1,19 @@ +package market + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" +) + +// FundedAddressState keeps track of the state of an address with funds in the +// datastore +type FundedAddressState struct { + Addr address.Address + // AmtReserved is the amount that must be kept in the address (cannot be + // withdrawn) + AmtReserved abi.TokenAmount + // MsgCid is the cid of an in-progress on-chain message + MsgCid *cid.Cid + TimeStamp +} diff --git a/venus-shared/types/market/miner_deal.go b/venus-shared/types/market/miner_deal.go new file mode 100644 index 0000000000..7211234b05 --- /dev/null +++ b/venus-shared/types/market/miner_deal.go @@ -0,0 +1,72 @@ +package market + +import ( + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + cbg "github.com/whyrusleeping/cbor-gen" +) + +type MinerDeal struct { + types.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State storagemarket.StorageDealStatus + PiecePath filestore.Path + PayloadSize uint64 + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + FundsReserved abi.TokenAmount + Ref *storagemarket.DataRef + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime + + TransferChannelID *datatransfer.ChannelID `json:"TransferChannelId"` + SectorNumber abi.SectorNumber + + Offset abi.PaddedPieceSize + PieceStatus PieceStatus + + InboundCAR string + + TimeStamp +} + +func (deal *MinerDeal) FilMarketMinerDeal() *storagemarket.MinerDeal { + return &storagemarket.MinerDeal{ + ClientDealProposal: deal.ClientDealProposal, + ProposalCid: deal.ProposalCid, + AddFundsCid: deal.AddFundsCid, + PublishCid: deal.PublishCid, + Miner: deal.Miner, + Client: deal.Client, + State: deal.State, + PiecePath: deal.PiecePath, + MetadataPath: deal.MetadataPath, + SlashEpoch: deal.SlashEpoch, + FastRetrieval: deal.FastRetrieval, + Message: deal.Message, + FundsReserved: deal.FundsReserved, + Ref: deal.Ref, + AvailableForRetrieval: deal.AvailableForRetrieval, + + DealID: deal.DealID, + CreationTime: deal.CreationTime, + + TransferChannelId: deal.TransferChannelID, + SectorNumber: deal.SectorNumber, + + InboundCAR: deal.InboundCAR, + } +} diff --git a/venus-shared/types/market/paych.go b/venus-shared/types/market/paych.go new file mode 100644 index 0000000000..3fa671e0f7 --- /dev/null +++ b/venus-shared/types/market/paych.go @@ -0,0 +1,165 @@ +package market + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/ipfs/go-cid" +) + +type VoucherInfo struct { + Voucher *types.SignedVoucher + Proof []byte // ignored + Submitted bool +} + +type VoucherInfos []*VoucherInfo + +func (info *VoucherInfos) Scan(value interface{}) error { + data, ok := value.([]byte) + if !ok { + return fmt.Errorf("value must be []byte") + } + return json.Unmarshal(data, info) +} + +func (info VoucherInfos) Value() (driver.Value, error) { + return json.Marshal(info) +} + +// ChannelInfo keeps track of information about a channel +type ChannelInfo struct { + // ChannelID is a uuid set at channel creation + ChannelID string + // Channel address - may be nil if the channel hasn't been created yet + Channel *address.Address + // Control is the address of the local node + Control address.Address + // Target is the address of the remote node (on the other end of the channel) + Target address.Address + // Direction indicates if the channel is inbound (Control is the "to" address) + // or outbound (Control is the "from" address) + Direction uint64 + // Vouchers is a list of all vouchers sent on the channel + Vouchers []*VoucherInfo + // NextLane is the number of the next lane that should be used when the + // client requests a new lane (eg to create a voucher for a new deal) + NextLane uint64 + // Amount added to the channel. + // Note: This amount is only used by GetPaych to keep track of how much + // has locally been added to the channel. It should reflect the channel's + // Balance on chain as long as all operations occur on the same datastore. + Amount big.Int + // AvailableAmount indicates how much afil is non-reserved + AvailableAmount big.Int + // PendingAvailableAmount is available amount that we're awaiting confirmation of + PendingAvailableAmount big.Int + // PendingAmount is the amount that we're awaiting confirmation of + PendingAmount big.Int + // CreateMsg is the CID of a pending create message (while waiting for confirmation) + CreateMsg *cid.Cid + // AddFundsMsg is the CID of a pending add funds message (while waiting for confirmation) + AddFundsMsg *cid.Cid + // Settling indicates whether the channel has entered into the settling state + Settling bool + + TimeStamp +} + +func (ci *ChannelInfo) From() address.Address { + if ci.Direction == DirOutbound { + return ci.Control + } + return ci.Target +} + +func (ci *ChannelInfo) To() address.Address { + if ci.Direction == DirOutbound { + return ci.Target + } + return ci.Control +} + +// infoForVoucher gets the VoucherInfo for the given voucher. +// returns nil if the channel doesn't have the voucher. +func (ci *ChannelInfo) InfoForVoucher(sv *types.SignedVoucher) (*VoucherInfo, error) { + for _, v := range ci.Vouchers { + eq, err := cborutil.Equals(sv, v.Voucher) + if err != nil { + return nil, err + } + if eq { + return v, nil + } + } + return nil, nil +} + +func (ci *ChannelInfo) HasVoucher(sv *types.SignedVoucher) (bool, error) { + vi, err := ci.InfoForVoucher(sv) + return vi != nil, err +} + +// markVoucherSubmitted marks the voucher, and any vouchers of lower nonce +// in the same lane, as being submitted. +// Note: This method doesn't write anything to the store. +func (ci *ChannelInfo) MarkVoucherSubmitted(sv *types.SignedVoucher) error { + vi, err := ci.InfoForVoucher(sv) + if err != nil { + return err + } + if vi == nil { + return fmt.Errorf("cannot submit voucher that has not been added to channel") + } + + // Mark the voucher as submitted + vi.Submitted = true + + // Mark lower-nonce vouchers in the same lane as submitted (lower-nonce + // vouchers are superseded by the submitted voucher) + for _, vi := range ci.Vouchers { + if vi.Voucher.Lane == sv.Lane && vi.Voucher.Nonce < sv.Nonce { + vi.Submitted = true + } + } + + return nil +} + +// wasVoucherSubmitted returns true if the voucher has been submitted +func (ci *ChannelInfo) WasVoucherSubmitted(sv *types.SignedVoucher) (bool, error) { + vi, err := ci.InfoForVoucher(sv) + if err != nil { + return false, err + } + if vi == nil { + return false, fmt.Errorf("cannot submit voucher that has not been added to channel") + } + return vi.Submitted, nil +} + +// MsgInfo stores information about a create channel / add funds message +// that has been sent +type MsgInfo struct { + // ChannelID links the message to a channel + ChannelID string + // MsgCid is the CID of the message + MsgCid cid.Cid + // Received indicates whether a response has been received + Received bool + // Err is the error received in the response + Err string + TimeStamp +} + +const ( + DirInbound = 1 + DirOutbound = 2 +) + +var ErrChannelNotFound = fmt.Errorf("channel not found") diff --git a/venus-shared/types/market/piecestorage.go b/venus-shared/types/market/piecestorage.go new file mode 100644 index 0000000000..09139fb6f7 --- /dev/null +++ b/venus-shared/types/market/piecestorage.go @@ -0,0 +1,22 @@ +package market + +type PieceStorageInfos struct { + FsStorage []FsStorage + S3Storage []S3Storage +} + +type FsStorage struct { + Path string + Name string + ReadOnly bool + Status StorageStatus +} + +type S3Storage struct { + Name string + ReadOnly bool + EndPoint string + Bucket string + SubDir string + Status StorageStatus +} diff --git a/venus-shared/types/market/retrieval_ask.go b/venus-shared/types/market/retrieval_ask.go new file mode 100644 index 0000000000..fc846894a6 --- /dev/null +++ b/venus-shared/types/market/retrieval_ask.go @@ -0,0 +1,15 @@ +package market + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" +) + +type RetrievalAsk struct { + Miner address.Address + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + TimeStamp +} diff --git a/venus-shared/types/market/retrieval_deal.go b/venus-shared/types/market/retrieval_deal.go new file mode 100644 index 0000000000..3e2a6d30c6 --- /dev/null +++ b/venus-shared/types/market/retrieval_deal.go @@ -0,0 +1,48 @@ +package market + +import ( + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +// ProviderDealState is the current state of a deal from the point of view +// of a retrieval provider +type ProviderDealState struct { + retrievalmarket.DealProposal + StoreID uint64 + SelStorageProposalCid cid.Cid + ChannelID *datatransfer.ChannelID + Status retrievalmarket.DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 + LegacyProtocol bool + TimeStamp +} + +func (deal *ProviderDealState) TotalPaidFor() uint64 { + totalPaidFor := uint64(0) + if !deal.PricePerByte.IsZero() { + totalPaidFor = big.Div(big.Max(big.Sub(deal.FundsReceived, deal.UnsealPrice), big.Zero()), deal.PricePerByte).Uint64() + } + return totalPaidFor +} + +func (deal *ProviderDealState) IntervalLowerBound() uint64 { + return deal.Params.IntervalLowerBound(deal.CurrentInterval) +} + +func (deal *ProviderDealState) NextInterval() uint64 { + return deal.Params.NextInterval(deal.CurrentInterval) +} + +// Identifier provides a unique id for this provider deal +func (deal ProviderDealState) Identifier() retrievalmarket.ProviderDealIdentifier { + return retrievalmarket.ProviderDealIdentifier{Receiver: deal.Receiver, DealID: deal.ID} +} diff --git a/venus-shared/types/market/storage.go b/venus-shared/types/market/storage.go new file mode 100644 index 0000000000..54ecfcc119 --- /dev/null +++ b/venus-shared/types/market/storage.go @@ -0,0 +1,155 @@ +package market + +import ( + "time" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/venus/venus-shared/types" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" +) + +// todo move to sealer + +// PendingDealInfo has info about pending deals and when they are due to be +// published +type PendingDealInfo struct { + Deals []types.ClientDealProposal + PublishPeriodStart time.Time + PublishPeriod time.Duration +} + +type SectorOffset struct { + Sector abi.SectorNumber + Offset abi.PaddedPieceSize +} + +// DealInfo is a tuple of deal identity and its schedule +type PieceDealInfo struct { + PublishCid *cid.Cid + DealID abi.DealID + DealProposal *types.DealProposal + DealSchedule DealSchedule + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a piecestorage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} + +type SectorState string + +type SealTicket struct { + Value abi.SealRandomness + Epoch abi.ChainEpoch +} + +type SealSeed struct { + Value abi.InteractiveSealRandomness + Epoch abi.ChainEpoch +} + +type SectorLog struct { + Kind string + Timestamp uint64 + + Trace string + + Message string +} + +type SectorInfo struct { + SectorID abi.SectorNumber + State SectorState + CommD *cid.Cid + CommR *cid.Cid + Proof []byte + Deals []abi.DealID + Ticket SealTicket + Seed SealSeed + PreCommitMsg *cid.Cid + CommitMsg *cid.Cid + Retries uint64 + ToUpgrade bool + + LastErr string + + Log []SectorLog + + // On Chain Info + SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s + Activation abi.ChainEpoch // Epoch during which the sector proof was accepted + Expiration abi.ChainEpoch // Epoch during which the sector expires + DealWeight abi.DealWeight // Integral of active deals over sector lifetime + VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime + InitialPledge abi.TokenAmount // Pledge collected to commit this sector + // Expiration Info + OnTime abi.ChainEpoch + // non-zero if sector is faulty, epoch at which it will be permanently + // removed if it doesn't recover + Early abi.ChainEpoch +} + +type SealedRef struct { + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Size abi.UnpaddedPieceSize +} + +type SealedRefs struct { + Refs []SealedRef +} + +type AddrUse int + +const ( + PreCommitAddr AddrUse = iota + CommitAddr + DealPublishAddr + PoStAddr + + TerminateSectorsAddr +) + +// StorageDealStatistic storage statistical information +// The struct is used here for statistical information that may need to be added in the future +type StorageDealStatistic struct { + DealsStatus map[storagemarket.StorageDealStatus]int64 +} + +// RetrievalDealStatistic storage statistical information +// The struct is used here for statistical information that may need to be added in the future +type RetrievalDealStatistic struct { + DealsStatus map[retrievalmarket.DealStatus]int64 +} + +type StorageStatus struct { + Capacity int64 + Available int64 + Reserved int64 +} + +type SignedStorageAsk struct { + Ask *storagemarket.StorageAsk + Signature *crypto.Signature + TimeStamp +} + +func (sa *SignedStorageAsk) ToChainAsk() *storagemarket.SignedStorageAsk { + return &storagemarket.SignedStorageAsk{ + Ask: sa.Ask, + Signature: sa.Signature, + } +} + +func FromChainAsk(s *storagemarket.SignedStorageAsk) *SignedStorageAsk { + return &SignedStorageAsk{Ask: s.Ask, Signature: s.Signature} +} diff --git a/venus-shared/types/message.go b/venus-shared/types/message.go new file mode 100644 index 0000000000..e419ad5ff5 --- /dev/null +++ b/venus-shared/types/message.go @@ -0,0 +1,13 @@ +package types + +import ( + "github.com/filecoin-project/venus/venus-shared/internal" +) + +const MessageVersion = internal.MessageVersion + +type ChainMsg = internal.ChainMsg + +var DecodeMessage = internal.DecodeMessage + +type Message = internal.Message diff --git a/venus-shared/types/message_marshal.go b/venus-shared/types/message_marshal.go new file mode 100644 index 0000000000..05aba47cb6 --- /dev/null +++ b/venus-shared/types/message_marshal.go @@ -0,0 +1,7 @@ +package types + +import ( + "github.com/filecoin-project/venus/venus-shared/internal" +) + +type RawMessage = internal.RawMessage diff --git a/venus-shared/types/message_receipt.go b/venus-shared/types/message_receipt.go new file mode 100644 index 0000000000..8140272037 --- /dev/null +++ b/venus-shared/types/message_receipt.go @@ -0,0 +1,26 @@ +package types + +import ( + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-state-types/exitcode" +) + +// MessageReceipt is what is returned by executing a message on the vm. +type MessageReceipt struct { + ExitCode exitcode.ExitCode + Return []byte + GasUsed int64 +} + +func (r *MessageReceipt) String() string { + errStr := "(error encoding MessageReceipt)" + + js, err := json.MarshalIndent(r, "", " ") + if err != nil { + return errStr + } + + return fmt.Sprintf("MessageReceipt: %s", string(js)) +} diff --git a/venus-shared/types/message_receipt_test.go b/venus-shared/types/message_receipt_test.go new file mode 100644 index 0000000000..546a29fe74 --- /dev/null +++ b/venus-shared/types/message_receipt_test.go @@ -0,0 +1,57 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/exitcode" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestMessageReceiptBasic(t *testing.T) { + tf.UnitTest(t) + dataLen := 32 + + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst MessageReceipt + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + require.Nil(t, src.Return) + }, + + ProvideOpts: []interface{}{ + testutil.BytesFixedProvider(dataLen), + testutil.IntRangedProvider(10_000_000, 50_000_000), + func(t *testing.T) exitcode.ExitCode { + p := testutil.IntRangedProvider(0, 20) + next := p(t) + return exitcode.ExitCode(next) + }, + }, + + Provided: func() { + require.Len(t, src.Return, dataLen) + + require.GreaterOrEqual(t, src.ExitCode, exitcode.ExitCode(0)) + require.Less(t, src.ExitCode, exitcode.ExitCode(20)) + + require.GreaterOrEqual(t, src.GasUsed, int64(10_000_000)) + require.Less(t, src.GasUsed, int64(50_000_000)) + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + require.Equal(t, src.String(), dst.String(), "string representation") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/message_root.go b/venus-shared/types/message_root.go new file mode 100644 index 0000000000..9211f1f9db --- /dev/null +++ b/venus-shared/types/message_root.go @@ -0,0 +1,55 @@ +package types + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/abi" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type MessageRoot struct { + BlsRoot cid.Cid + SecpkRoot cid.Cid +} + +func (mr *MessageRoot) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := mr.MarshalCBOR(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (mr *MessageRoot) SerializeWithCid() (cid.Cid, []byte, error) { + data, err := mr.Serialize() + if err != nil { + return cid.Undef, nil, err + } + + c, err := abi.CidBuilder.Sum(data) + if err != nil { + return cid.Undef, nil, err + } + + return c, data, nil +} + +func (mr *MessageRoot) ToStorageBlock() (blocks.Block, error) { + c, data, err := mr.SerializeWithCid() + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) +} + +func (mr *MessageRoot) Cid() cid.Cid { + c, _, err := mr.SerializeWithCid() + if err != nil { + panic(err) + } + + return c +} diff --git a/venus-shared/types/message_root_test.go b/venus-shared/types/message_root_test.go new file mode 100644 index 0000000000..a5de123ed2 --- /dev/null +++ b/venus-shared/types/message_root_test.go @@ -0,0 +1,47 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestMessageRootBasic(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst MessageRoot + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + require.Equal(t, src.BlsRoot, cid.Undef) + require.Equal(t, src.SecpkRoot, cid.Undef) + }, + + Provided: func() { + require.NotEqual(t, src.BlsRoot, cid.Undef) + require.NotEqual(t, src.SecpkRoot, cid.Undef) + }, + + Finished: func() { + require.Equal(t, src, dst, "from src to dst through cbor") + + blk, err := src.ToStorageBlock() + require.NoError(t, err, "ToStorageBlock") + + srcCid := src.Cid() + require.Equal(t, srcCid, dst.Cid(), "cid compare to dst") + require.Equal(t, srcCid, blk.Cid(), "cid compare to sblk") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/message_test.go b/venus-shared/types/message_test.go new file mode 100644 index 0000000000..4eab547106 --- /dev/null +++ b/venus-shared/types/message_test.go @@ -0,0 +1,210 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/filecoin-project/venus/venus-shared/types/params" + blocks "github.com/ipfs/go-block-format" + "github.com/stretchr/testify/require" +) + +func TestMessageBasic(t *testing.T) { + tf.UnitTest(t) + paramsLen := 32 + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst Message + var blk blocks.Block + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + }, + + ProvideOpts: []interface{}{ + testutil.BytesFixedProvider(paramsLen), + testutil.BlsAddressProvider(), + }, + + Provided: func() { + require.NotEqual(t, src, dst, "value provided") + require.Equal(t, src.From.Protocol(), address.BLS, "from addr proto") + require.Equal(t, src.To.Protocol(), address.BLS, "to addr proto") + require.Len(t, src.Params, paramsLen, "params length") + + src.Version = MessageVersion + + sblk, err := src.ToStorageBlock() + require.NoError(t, err, "ToStorageBlock") + blk = sblk + }, + + Marshaled: func(b []byte) { + decoded, err := DecodeMessage(b) + require.NoError(t, err, "DecodeMessage") + require.True(t, src.Equals(decoded)) + }, + + Finished: func() { + require.Equal(t, src, dst) + require.True(t, src.Equals(&dst)) + require.True(t, src.EqualCall(&dst)) + require.Equal(t, src.Cid(), dst.Cid()) + require.Equal(t, src.Cid(), blk.Cid()) + require.Equal(t, src.String(), dst.String()) + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} + +func TestMessageValidForBlockInclusion(t *testing.T) { + tf.UnitTest(t) + var msg Message + testutil.Provide( + t, + &msg, + testutil.IntRangedProvider(0, params.BlockGasLimit), + func(t *testing.T) big.Int { + ip := testutil.IntRangedProvider(0, int(params.FilBase)) + return FromFil(uint64(ip(t))) + }, + ) + + // ensure that random assignments won't break the validation + msg.Version = 0 + msg.GasPremium = msg.GasFeeCap + + err := msg.ValidForBlockInclusion(0, network.Version7) + require.NoError(t, err, "ValidForBlockInclusion") + + neg := NewInt(1).Neg() + + valCases := []struct { + name string + sets []interface{} + minGas int64 + version network.Version + }{ + { + name: "Version != 0", + sets: []interface{}{ + &msg.Version, + uint64(MessageVersion + 1), + }, + }, + { + name: "To:Undef", + sets: []interface{}{ + &msg.To, + address.Undef, + }, + }, + { + name: "To:ZeroAddress", + sets: []interface{}{ + &msg.To, + ZeroAddress, + }, + version: network.Version7, + }, + { + name: "From:Undef", + sets: []interface{}{ + &msg.From, + address.Undef, + }, + }, + { + name: "Value:nil", + sets: []interface{}{ + &msg.Value, + EmptyInt, + }, + }, + { + name: "Value:neg", + sets: []interface{}{ + &msg.Value, + neg, + }, + }, + { + name: "Value:TooLarge", + sets: []interface{}{ + &msg.Value, + BigAdd(TotalFilecoinInt, NewInt(1)), + }, + }, + { + name: "GasFeeCap:nil", + sets: []interface{}{ + &msg.GasFeeCap, + EmptyInt, + }, + }, + { + name: "GasFeeCap:neg", + sets: []interface{}{ + &msg.GasFeeCap, + neg, + }, + }, + { + name: "GasPremium:nil", + sets: []interface{}{ + &msg.GasPremium, + EmptyInt, + }, + }, + { + name: "GasPremium:neg", + sets: []interface{}{ + &msg.GasPremium, + neg, + }, + }, + { + name: "GasPremium: > GasFeeCap", + sets: []interface{}{ + &msg.GasPremium, + BigAdd(msg.GasFeeCap, NewInt(1)), + }, + }, + { + name: "GasLimit: > BlockGasLimit", + sets: []interface{}{ + &msg.GasLimit, + int64(params.BlockGasLimit) + 1, + }, + }, + { + name: "GasLimit: < minGas", + sets: []interface{}{ + &msg.GasLimit, + int64(-1), + }, + }, + } + + for _, c := range valCases { + onSet := func() { + err := msg.ValidForBlockInclusion(c.minGas, c.version) + require.Errorf(t, err, "after invalid values set for %s", c.name) + } + + onReset := func() { + err := msg.ValidForBlockInclusion(c.minGas, c.version) + require.NoErrorf(t, err, "after values reset for %s", c.name) + } + + testutil.ValueSetNReset(t, c.name, onSet, onReset, c.sets...) + } +} diff --git a/venus-shared/types/messager/address.go b/venus-shared/types/messager/address.go new file mode 100644 index 0000000000..3dfeb90d81 --- /dev/null +++ b/venus-shared/types/messager/address.go @@ -0,0 +1,60 @@ +package messager + +import ( + "fmt" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/venus/venus-shared/types" +) + +type AddressState int + +const ( + _ AddressState = iota + AddressStateAlive + AddressStateRemoving + AddressStateRemoved + AddressStateForbbiden // forbbiden received message +) + +func (as AddressState) String() string { + switch as { + case AddressStateAlive: + return "Alive" + case AddressStateRemoving: + return "Removing" + case AddressStateRemoved: + return "Removed" + case AddressStateForbbiden: + return "Forbbiden" + default: + return fmt.Sprintf("unknow state %d", as) + } +} + +func AddressStateToString(state AddressState) string { + return state.String() +} + +type Address struct { + ID types.UUID `json:"id"` + Addr address.Address `json:"addr"` + // max for current, use nonce and +1 + Nonce uint64 `json:"nonce"` + Weight int64 `json:"weight"` + // number of address selection messages + SelMsgNum uint64 `json:"selMsgNum"` + State AddressState `json:"state"` + GasOverEstimation float64 `json:"gasOverEstimation"` + MaxFee big.Int `json:"maxFee,omitempty"` + GasFeeCap big.Int `json:"gasFeeCap"` + GasOverPremium float64 `json:"gasOverPremium"` + BaseFee big.Int `json:"baseFee"` + + IsDeleted int `json:"isDeleted"` // 是否删除 1:是 -1:否 + CreatedAt time.Time `json:"createAt"` // 创建时间 + UpdatedAt time.Time `json:"updateAt"` // 更新时间 +} diff --git a/venus-shared/types/messager/message.go b/venus-shared/types/messager/message.go new file mode 100644 index 0000000000..3805de40d2 --- /dev/null +++ b/venus-shared/types/messager/message.go @@ -0,0 +1,164 @@ +package messager + +import ( + "encoding/json" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + + shared "github.com/filecoin-project/venus/venus-shared/types" +) + +// ---> FailedMsg <------ +// | | +// UnFillMsg ---------------> FillMsg --------> OnChainMsg +// | | +// NoWalletMsg <--- ---->ReplacedMsg +// + +type MessageState int + +const ( + UnKnown MessageState = iota + UnFillMsg + FillMsg + OnChainMsg + FailedMsg + ReplacedMsg + NoWalletMsg +) + +func (mst MessageState) String() string { + switch mst { + case UnFillMsg: + return "UnFillMsg" + case FillMsg: + return "FillMsg" + case OnChainMsg: + return "OnChainMsg" + case FailedMsg: + return "Failed" + case ReplacedMsg: + return "ReplacedMsg" + case NoWalletMsg: + return "NoWalletMsg" + default: + return "UnKnown" + } +} + +func MessageStateToString(state MessageState) string { + return state.String() +} + +type MessageWithUID struct { + UnsignedMessage shared.Message + ID string +} + +func FromUnsignedMessage(unsignedMsg shared.Message) *Message { + return &Message{ + Message: unsignedMsg, + } +} + +type Message struct { + ID string + + UnsignedCid *cid.Cid + SignedCid *cid.Cid + shared.Message + Signature *crypto.Signature + + Height int64 + Confidence int64 + Receipt *shared.MessageReceipt + TipSetKey shared.TipSetKey + Meta *SendSpec + WalletName string + + State MessageState + + // Error is set if the message failed to fill + ErrorMsg string + + CreatedAt time.Time + UpdatedAt time.Time +} + +// todo ignore use message MarshalJSON method +func (m *Message) MarshalJSON() ([]byte, error) { + type msg struct { + Version uint64 + To address.Address + From address.Address + Nonce uint64 + Value abi.TokenAmount + GasLimit int64 + GasFeeCap abi.TokenAmount + GasPremium abi.TokenAmount + Method abi.MethodNum + Params []byte + } + type fMsg struct { + ID string + + UnsignedCid *cid.Cid + SignedCid *cid.Cid + msg + Signature *crypto.Signature + + Height int64 + Confidence int64 + Receipt *shared.MessageReceipt + TipSetKey shared.TipSetKey + Meta *SendSpec + WalletName string + + State MessageState + ErrorMsg string + CreatedAt time.Time + UpdatedAt time.Time + } + return json.Marshal(fMsg{ + ID: m.ID, + UnsignedCid: m.UnsignedCid, + SignedCid: m.SignedCid, + msg: msg{ + Version: m.Message.Version, + To: m.Message.To, + From: m.Message.From, + Nonce: m.Message.Nonce, + Value: m.Message.Value, + GasLimit: m.Message.GasLimit, + GasFeeCap: m.Message.GasFeeCap, + GasPremium: m.Message.GasPremium, + Method: m.Message.Method, + Params: m.Message.Params, + }, + Signature: m.Signature, + Height: m.Height, + Confidence: m.Confidence, + Receipt: m.Receipt, + TipSetKey: m.TipSetKey, + Meta: m.Meta, + WalletName: m.WalletName, + State: m.State, + ErrorMsg: m.ErrorMsg, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + }) +} + +type ReplacMessageParams struct { + ID string + Auto bool + MaxFee abi.TokenAmount + GasLimit int64 + GasPremium abi.TokenAmount + GasFeecap abi.TokenAmount + GasOverPremium float64 +} diff --git a/venus-shared/types/messager/node.go b/venus-shared/types/messager/node.go new file mode 100644 index 0000000000..9ad468a386 --- /dev/null +++ b/venus-shared/types/messager/node.go @@ -0,0 +1,22 @@ +package messager + +import ( + "github.com/filecoin-project/venus/venus-shared/types" +) + +type NodeType int + +const ( + _ NodeType = iota + FullNode + LightNode +) + +type Node struct { + ID types.UUID + + Name string + URL string + Token string + Type NodeType +} diff --git a/venus-shared/types/messager/quick_send.go b/venus-shared/types/messager/quick_send.go new file mode 100644 index 0000000000..9d3d7b3a49 --- /dev/null +++ b/venus-shared/types/messager/quick_send.go @@ -0,0 +1,28 @@ +package messager + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" +) + +type QuickSendParamsCodec string + +const ( + QuickSendParamsCodecJSON QuickSendParamsCodec = "json" + QuickSendParamsCodecHex QuickSendParamsCodec = "hex" +) + +type QuickSendParams struct { + To address.Address + From address.Address + Val abi.TokenAmount + Account string + + GasPremium *abi.TokenAmount + GasFeeCap *abi.TokenAmount + GasLimit *int64 + + Method abi.MethodNum + Params string + ParamsType QuickSendParamsCodec // json or hex +} diff --git a/venus-shared/types/messager/spec.go b/venus-shared/types/messager/spec.go new file mode 100644 index 0000000000..999d2fffdf --- /dev/null +++ b/venus-shared/types/messager/spec.go @@ -0,0 +1,47 @@ +package messager + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" +) + +type SendSpec struct { + ExpireEpoch abi.ChainEpoch `json:"expireEpoch"` + GasOverEstimation float64 `json:"gasOverEstimation"` + MaxFee big.Int `json:"maxFee,omitempty"` + GasOverPremium float64 `json:"gasOverPremium"` +} + +type SharedSpec struct { + ID uint `json:"id"` + + GasOverEstimation float64 `json:"gasOverEstimation"` + MaxFee big.Int `json:"maxFee,omitempty"` + GasFeeCap big.Int `json:"gasFeeCap"` + GasOverPremium float64 `json:"gasOverPremium"` + BaseFee big.Int `json:"baseFee"` + + SelMsgNum uint64 `json:"selMsgNum"` +} + +type AddressSpec struct { + Address address.Address `json:"address"` + GasOverEstimation float64 `json:"gasOverEstimation"` + GasOverPremium float64 `json:"gasOverPremium"` + MaxFeeStr string `json:"maxFeeStr"` + GasFeeCapStr string `json:"gasFeeCapStr"` + BaseFeeStr string `json:"baseFeeStr"` +} + +func (ss *SharedSpec) GetSendSpec() *SendSpec { + if ss == nil { + return nil + } + + return &SendSpec{ + GasOverEstimation: ss.GasOverEstimation, + MaxFee: ss.MaxFee, + GasOverPremium: ss.GasOverPremium, + } +} diff --git a/venus-shared/types/mpool.go b/venus-shared/types/mpool.go new file mode 100644 index 0000000000..1f071d84d8 --- /dev/null +++ b/venus-shared/types/mpool.go @@ -0,0 +1,54 @@ +package types + +import ( + "github.com/ipfs/go-cid" +) + +type CheckStatusCode int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckStatusCode -trimprefix=CheckStatus +const ( + _ CheckStatusCode = iota + // Message Checks + CheckStatusMessageSerialize + CheckStatusMessageSize + CheckStatusMessageValidity + CheckStatusMessageMinGas + CheckStatusMessageMinBaseFee + CheckStatusMessageBaseFee + CheckStatusMessageBaseFeeLowerBound + CheckStatusMessageBaseFeeUpperBound + CheckStatusMessageGetStateNonce + CheckStatusMessageNonce + CheckStatusMessageGetStateBalance + CheckStatusMessageBalance +) + +type CheckStatus struct { + Code CheckStatusCode + OK bool + Err string + Hint map[string]interface{} +} + +type MessageCheckStatus struct { + Cid cid.Cid + CheckStatus +} + +type MessagePrototype struct { + Message Message + ValidNonce bool +} + +type MpoolChange int + +const ( + MpoolAdd MpoolChange = iota + MpoolRemove +) + +type MpoolUpdate struct { + Type MpoolChange + Message *SignedMessage +} diff --git a/venus-shared/types/mpool_config.go b/venus-shared/types/mpool_config.go new file mode 100644 index 0000000000..b48112b847 --- /dev/null +++ b/venus-shared/types/mpool_config.go @@ -0,0 +1,16 @@ +package types + +import ( + "time" + + "github.com/filecoin-project/go-address" +) + +type MpoolConfig struct { + PriorityAddrs []address.Address + SizeLimitHigh int + SizeLimitLow int + ReplaceByFeeRatio float64 + PruneCooldown time.Duration + GasLimitOverestimation float64 +} diff --git a/venus-shared/types/msg_meta.go b/venus-shared/types/msg_meta.go new file mode 100644 index 0000000000..2efb4e7e8a --- /dev/null +++ b/venus-shared/types/msg_meta.go @@ -0,0 +1,41 @@ +package types + +type MsgType string + +const ( + MTUnknown = MsgType("unknown") + + // Signing message CID. MsgMeta.Extra contains raw cbor message bytes + MTChainMsg = MsgType("message") + + // Signing a blockheader. signing raw cbor block bytes (MsgMeta.Extra is empty) + MTBlock = MsgType("block") + + // Signing a deal proposal. signing raw cbor proposal bytes (MsgMeta.Extra is empty) + MTDealProposal = MsgType("dealproposal") + // extra is nil, 'toSign' is cbor raw bytes of 'DrawRandomParams' + // following types follow above rule + MTDrawRandomParam = MsgType("drawrandomparam") + MTSignedVoucher = MsgType("signedvoucher") + MTStorageAsk = MsgType("storageask") + MTAskResponse = MsgType("askresponse") + MTNetWorkResponse = MsgType("networkresposne") + + // reference : storagemarket/impl/remotecli.go:330 + // sign storagemarket.ClientDeal.ProposalCid, + // MsgMeta.Extra is nil, 'toSign' is market.ClientDealProposal + // storagemarket.ClientDeal.ProposalCid equals cborutil.AsIpld(market.ClientDealProposal).Cid() + MTClientDeal = MsgType("clientdeal") + + MTProviderDealState = MsgType("providerdealstate") + + MTVerifyAddress = MsgType("verifyaddress") +) + +type MsgMeta struct { + Type MsgType + + // Additional data related to what is signed. Should be verifiable with the + // signed bytes (e.g. CID(Extra).Bytes() == toSign) + Extra []byte +} diff --git a/venus-shared/types/net.go b/venus-shared/types/net.go new file mode 100644 index 0000000000..62d914ec9b --- /dev/null +++ b/venus-shared/types/net.go @@ -0,0 +1,31 @@ +package types + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" +) + +type RawHost host.Host + +type ExtendedPeerInfo struct { + ID peer.ID + Agent string + Addrs []string + Protocols []string + ConnMgrMeta *ConnMgrInfo +} + +type ConnMgrInfo struct { + FirstSeen time.Time + Value int + Tags map[string]int + Conns map[string]time.Time +} + +type NatInfo struct { + Reachability network.Reachability + PublicAddr string +} diff --git a/venus-shared/types/padded_byte.go b/venus-shared/types/padded_byte.go new file mode 100644 index 0000000000..66e520c0a7 --- /dev/null +++ b/venus-shared/types/padded_byte.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" +) + +type UnpaddedByteIndex uint64 + +func (i UnpaddedByteIndex) Padded() PaddedByteIndex { + return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded()) +} + +func (i UnpaddedByteIndex) Valid() error { + if i%127 != 0 { + return fmt.Errorf("unpadded byte index must be a multiple of 127") + } + + return nil +} + +type PaddedByteIndex uint64 diff --git a/venus-shared/types/param.go b/venus-shared/types/param.go new file mode 100644 index 0000000000..50cd96a165 --- /dev/null +++ b/venus-shared/types/param.go @@ -0,0 +1,29 @@ +package types + +import ( + "math/big" + + "github.com/filecoin-project/venus/venus-shared/internal" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/filecoin-project/venus/venus-shared/types/params" +) + +var blocksPerEpochBig = big.NewInt(0).SetUint64(params.BlocksPerEpoch) + +var TotalFilecoinInt = internal.TotalFilecoinInt + +var ZeroAddress = internal.ZeroAddress + +var EmptyTokenAmount = abi.TokenAmount{} + +// The multihash function identifier to use for content addresses. +const DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) + +// A builder for all blockchain CIDs. +// Note that sector commitments use a different scheme. +var DefaultCidBuilder = cid.V1Builder{Codec: cid.DagCBOR, MhType: DefaultHashFunction} diff --git a/venus-shared/types/params/chain.go b/venus-shared/types/params/chain.go new file mode 100644 index 0000000000..57fd6273a4 --- /dev/null +++ b/venus-shared/types/params/chain.go @@ -0,0 +1,26 @@ +package params + +import ( + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" +) + +var ( + BlocksPerEpoch = uint64(builtin0.ExpectedLeadersPerEpoch) + MaxWinCount = 3 * int64(BlocksPerEpoch) +) + +// /////// +// Limits + +// TODO: If this is gonna stay, it should move to specs-actors +const BlockMessageLimit = 10000 + +const ( + BlockGasLimit = 10_000_000_000 + BlockGasTarget = BlockGasLimit / 2 + BaseFeeMaxChangeDenom = 8 // 12.5% + InitialBaseFee = 100e6 + MinimumBaseFee = 100 + PackingEfficiencyNum = 4 + PackingEfficiencyDenom = 5 +) diff --git a/venus-shared/types/params/circulating.go b/venus-shared/types/params/circulating.go new file mode 100644 index 0000000000..34d0d13c5e --- /dev/null +++ b/venus-shared/types/params/circulating.go @@ -0,0 +1,11 @@ +package params + +const ( + FilBase = uint64(2_000_000_000) + FilAllocStorageMining = uint64(1_100_000_000) +) + +const ( + FilecoinPrecision = uint64(1_000_000_000_000_000_000) + FilReserved = uint64(300_000_000) +) diff --git a/venus-shared/types/paych.go b/venus-shared/types/paych.go new file mode 100644 index 0000000000..49b90a30e7 --- /dev/null +++ b/venus-shared/types/paych.go @@ -0,0 +1,54 @@ +package types + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" +) + +type PCHDir int + +const ( + PCHUndef PCHDir = iota + PCHInbound + PCHOutbound +) + +type Status struct { + ControlAddr address.Address + Direction PCHDir +} + +type PaychGetOpts struct { + OffChain bool +} + +type ChannelInfo struct { + Channel address.Address + WaitSentinel cid.Cid +} + +type PaymentInfo struct { + Channel address.Address + WaitSentinel cid.Cid + Vouchers []*SignedVoucher +} + +type VoucherSpec struct { + Amount BigInt + TimeLockMin abi.ChainEpoch + TimeLockMax abi.ChainEpoch + MinSettle abi.ChainEpoch + + Extra *ModVerifyParams +} + +// VoucherCreateResult is the response to calling PaychVoucherCreate +type VoucherCreateResult struct { + // Voucher that was created, or nil if there was an error or if there + // were insufficient funds in the channel + Voucher *SignedVoucher + // Shortfall is the additional amount that would be needed in the channel + // in order to be able to create the voucher + Shortfall BigInt +} diff --git a/venus-shared/types/shared_func.go b/venus-shared/types/shared_func.go new file mode 100644 index 0000000000..c06cf021fe --- /dev/null +++ b/venus-shared/types/shared_func.go @@ -0,0 +1,68 @@ +package types + +import ( + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" +) + +func MustParseAddress(addr string) address.Address { + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +} + +func MustParseCid(c string) cid.Cid { + ret, err := cid.Decode(c) + if err != nil { + panic(err) + } + + return ret +} + +func NewGasFeeCap(price int64) abi.TokenAmount { + return abi.NewTokenAmount(price) +} + +func NewGasPremium(price int64) abi.TokenAmount { + return abi.NewTokenAmount(price) +} + +// BlockTopic returns the network pubsub topic identifier on which new blocks are announced. +func BlockTopic(networkName string) string { + return fmt.Sprintf("/fil/blocks/%s", networkName) +} + +// MessageTopic returns the network pubsub topic identifier on which new messages are announced. +// The message payload is just a SignedMessage. +func MessageTopic(networkName string) string { + return fmt.Sprintf("/fil/msgs/%s", networkName) +} + +func IndexerIngestTopic(networkName string) string { + // The network name testnetnet is here for historical reasons. + // Going forward we aim to use the name `mainnet` where possible. + if networkName == "testnetnet" { + networkName = "mainnet" + } + + return "/indexer/ingest/" + networkName +} + +func DrandTopic(chainInfoJSON string) (string, error) { + drandInfo := struct { + Hash string `json:"hash"` + }{} + err := json.Unmarshal([]byte(chainInfoJSON), &drandInfo) + if err != nil { + return "", fmt.Errorf("could not unmarshal drand chain info: %w", err) + } + return "/drand/pubsub/v0.0.0/" + drandInfo.Hash, nil +} diff --git a/venus-shared/types/signed_message.go b/venus-shared/types/signed_message.go new file mode 100644 index 0000000000..f1797fe9e1 --- /dev/null +++ b/venus-shared/types/signed_message.go @@ -0,0 +1,111 @@ +package types + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// SignedMessage contains a message and its signature +type SignedMessage struct { + Message Message + Signature crypto.Signature +} + +// nolint +func (smsg *SignedMessage) ChainLength() int { + var data []byte + var err error + if smsg.Signature.Type == crypto.SigTypeBLS { + data, err = smsg.Message.Serialize() + } else { + data, err = smsg.Serialize() + } + + if err != nil { + panic(err) + } + + return len(data) +} + +// Serialize return message binary +func (smsg *SignedMessage) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := smsg.MarshalCBOR(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Serialize return message binary +func (smsg *SignedMessage) SerializeWithCid() (cid.Cid, []byte, error) { + data, err := smsg.Serialize() + if err != nil { + return cid.Undef, nil, err + } + + c, err := abi.CidBuilder.Sum(data) + if err != nil { + return cid.Undef, nil, err + } + + return c, data, nil +} + +func (smsg *SignedMessage) ToStorageBlock() (blocks.Block, error) { + var c cid.Cid + var data []byte + var err error + if smsg.Signature.Type == crypto.SigTypeBLS { + c, data, err = smsg.Message.SerializeWithCid() + } else { + c, data, err = smsg.SerializeWithCid() + } + + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) +} + +func (smsg *SignedMessage) Cid() cid.Cid { + if smsg.Signature.Type == crypto.SigTypeBLS { + return smsg.Message.Cid() + } + + c, _, err := smsg.SerializeWithCid() + if err != nil { + panic(fmt.Errorf("failed to marshal signed-message: %w", err)) + } + + return c +} + +// String return message json string +func (smsg *SignedMessage) String() string { + errStr := "(error encoding SignedMessage)" + c, _, err := smsg.SerializeWithCid() + if err != nil { + return errStr + } + + js, err := json.MarshalIndent(smsg, "", " ") + if err != nil { + return errStr + } + + return fmt.Sprintf("SignedMessage cid=[%v]: %s", c, string(js)) +} + +func (smsg *SignedMessage) VMMessage() *Message { + return &smsg.Message +} + +var _ ChainMsg = (*SignedMessage)(nil) diff --git a/venus-shared/types/signed_message_marshal.go b/venus-shared/types/signed_message_marshal.go new file mode 100644 index 0000000000..8d14d6ede3 --- /dev/null +++ b/venus-shared/types/signed_message_marshal.go @@ -0,0 +1,21 @@ +package types + +import ( + "encoding/json" + + "github.com/ipfs/go-cid" +) + +type smCid struct { + *RawSignedMessage + CID cid.Cid +} + +type RawSignedMessage SignedMessage + +func (sm *SignedMessage) MarshalJSON() ([]byte, error) { + return json.Marshal(&smCid{ + RawSignedMessage: (*RawSignedMessage)(sm), + CID: sm.Cid(), + }) +} diff --git a/venus-shared/types/signed_message_test.go b/venus-shared/types/signed_message_test.go new file mode 100644 index 0000000000..18995bb3dd --- /dev/null +++ b/venus-shared/types/signed_message_test.go @@ -0,0 +1,69 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/crypto" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestSignedMessageBasic(t *testing.T) { + tf.UnitTest(t) + sliceLen := 16 + bytesLen := 32 + + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst SignedMessage + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst) + require.Nil(t, src.Signature.Data) + }, + + ProvideOpts: []interface{}{ + testutil.WithSliceLen(sliceLen), + testutil.BytesFixedProvider(bytesLen), + }, + + Provided: func() { + require.NotEqual(t, src, dst, "value provided") + require.Len(t, src.Signature.Data, bytesLen) + }, + + Finished: func() { + require.Equal(t, src, dst, "after unmarshaling") + require.Equal(t, src.String(), dst.String()) + + c := src.Cid() + + blk, err := src.ToStorageBlock() + require.NoError(t, err, "ToStorageBlock") + + require.Equal(t, c, blk.Cid()) + require.Equal(t, c, dst.Cid()) + + switch src.Signature.Type { + case crypto.SigTypeBLS: + require.Equal(t, c, src.Message.Cid()) + require.Equal(t, src.ChainLength(), src.Message.ChainLength()) + + case crypto.SigTypeSecp256k1: + require.NotEqual(t, c, src.Message.Cid()) + require.Greater(t, src.ChainLength(), src.Message.ChainLength()) + + default: + t.Fatalf("unexpected sig type %d", src.Signature.Type) + } + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/signer.go b/venus-shared/types/signer.go new file mode 100644 index 0000000000..4b7aeaf1e0 --- /dev/null +++ b/venus-shared/types/signer.go @@ -0,0 +1,14 @@ +package types + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" +) + +// Signer signs data with a private key obtained internally from a provided address. +type Signer interface { + SignBytes(ctx context.Context, data []byte, addr address.Address) (*crypto.Signature, error) + HasAddress(ctx context.Context, addr address.Address) (bool, error) +} diff --git a/venus-shared/types/state_types_gen.go b/venus-shared/types/state_types_gen.go new file mode 100755 index 0000000000..955f388f4f --- /dev/null +++ b/venus-shared/types/state_types_gen.go @@ -0,0 +1,232 @@ +// Code generated by github.com/filecoin-project/venus/venus-devtool/state-type-gen. DO NOT EDIT. +package types + +import ( + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" +) + +////////// market ////////// +const ( + DealMaxLabelSize = market.DealMaxLabelSize + EpochUndefined = market.EpochUndefined + ProposalsAmtBitwidth = market.ProposalsAmtBitwidth + StatesAmtBitwidth = market.StatesAmtBitwidth +) + +type ( + ActivateDealsParams = market.ActivateDealsParams + ActivateDealsResult = market.ActivateDealsResult + ClientDealProposal = market.ClientDealProposal + ComputeDataCommitmentParams = market.ComputeDataCommitmentParams + ComputeDataCommitmentReturn = market.ComputeDataCommitmentReturn + DealArray = market.DealArray + DealLabel = market.DealLabel + DealProposal = market.DealProposal + DealSpaces = market.DealSpaces + DealState = market.DealState + OnMinerSectorsTerminateParams = market.OnMinerSectorsTerminateParams + PublishStorageDealsParams = market.PublishStorageDealsParams + PublishStorageDealsReturn = market.PublishStorageDealsReturn + SectorDataSpec = market.SectorDataSpec + SectorDealData = market.SectorDealData + SectorDeals = market.SectorDeals + SetMultimap = market.SetMultimap + VerifiedDealInfo = market.VerifiedDealInfo + VerifyDealsForActivationParams = market.VerifyDealsForActivationParams + VerifyDealsForActivationReturn = market.VerifyDealsForActivationReturn + MarketWithdrawBalanceParams = market.WithdrawBalanceParams +) + +var ( + AsDealProposalArray = market.AsDealProposalArray + AsSetMultimap = market.AsSetMultimap + DealClientCollateralBounds = market.DealClientCollateralBounds + DealDurationBounds = market.DealDurationBounds + DealPricePerEpochBounds = market.DealPricePerEpochBounds + DealProviderCollateralBounds = market.DealProviderCollateralBounds + DealWeight = market.DealWeight + MakeEmptySetMultimap = market.MakeEmptySetMultimap + NewLabelFromBytes = market.NewLabelFromBytes + NewLabelFromString = market.NewLabelFromString + StoreEmptySetMultimap = market.StoreEmptySetMultimap + ValidateDealsForActivation = market.ValidateDealsForActivation +) + +////////// miner ////////// +const ( + AddressedPartitionsMax = miner.AddressedPartitionsMax + AddressedSectorsMax = miner.AddressedSectorsMax + ChainFinality = miner.ChainFinality + CronEventProcessEarlyTerminations = miner.CronEventProcessEarlyTerminations + CronEventProvingDeadline = miner.CronEventProvingDeadline + CronEventWorkerKeyChange = miner.CronEventWorkerKeyChange + DeadlineExpirationAmtBitwidth = miner.DeadlineExpirationAmtBitwidth + DeadlineOptimisticPoStSubmissionsAmtBitwidth = miner.DeadlineOptimisticPoStSubmissionsAmtBitwidth + DeadlinePartitionsAmtBitwidth = miner.DeadlinePartitionsAmtBitwidth + DeclarationsMax = miner.DeclarationsMax + FaultDeclarationCutoff = miner.FaultDeclarationCutoff + MaxAggregateProofSize = miner.MaxAggregateProofSize + MaxAggregatedSectors = miner.MaxAggregatedSectors + MaxPartitionsPerDeadline = miner.MaxPartitionsPerDeadline + MaxSectorExpirationExtension = miner.MaxSectorExpirationExtension + MinAggregatedSectors = miner.MinAggregatedSectors + MinSectorExpiration = miner.MinSectorExpiration + PartitionEarlyTerminationArrayAmtBitwidth = miner.PartitionEarlyTerminationArrayAmtBitwidth + PartitionExpirationAmtBitwidth = miner.PartitionExpirationAmtBitwidth + PreCommitSectorBatchMaxSize = miner.PreCommitSectorBatchMaxSize + PrecommitCleanUpAmtBitwidth = miner.PrecommitCleanUpAmtBitwidth + ProveReplicaUpdatesMaxSize = miner.ProveReplicaUpdatesMaxSize + SectorsAmtBitwidth = miner.SectorsAmtBitwidth + WPoStChallengeLookback = miner.WPoStChallengeLookback + WPoStPeriodDeadlines = miner.WPoStPeriodDeadlines + WorkerKeyChangeDelay = miner.WorkerKeyChangeDelay +) + +type ( + ActiveBeneficiary = miner.ActiveBeneficiary + ApplyRewardParams = miner.ApplyRewardParams + BeneficiaryTerm = miner.BeneficiaryTerm + ChangeBeneficiaryParams = miner.ChangeBeneficiaryParams + ChangeMultiaddrsParams = miner.ChangeMultiaddrsParams + ChangePeerIDParams = miner.ChangePeerIDParams + ChangeWorkerAddressParams = miner.ChangeWorkerAddressParams + CheckSectorProvenParams = miner.CheckSectorProvenParams + CompactPartitionsParams = miner.CompactPartitionsParams + CompactSectorNumbersParams = miner.CompactSectorNumbersParams + ConfirmSectorProofsParams = miner.ConfirmSectorProofsParams + CronEventPayload = miner.CronEventPayload + CronEventType = miner.CronEventType + Deadlines = miner.Deadlines + DeclareFaultsParams = miner.DeclareFaultsParams + DeclareFaultsRecoveredParams = miner.DeclareFaultsRecoveredParams + DeferredCronEventParams = miner.DeferredCronEventParams + DisputeWindowedPoStParams = miner.DisputeWindowedPoStParams + ExpirationExtension = miner.ExpirationExtension + ExpirationExtension2 = miner.ExpirationExtension2 + ExpirationQueue = miner.ExpirationQueue + ExpirationSet = miner.ExpirationSet + ExtendSectorExpiration2Params = miner.ExtendSectorExpiration2Params + ExtendSectorExpirationParams = miner.ExtendSectorExpirationParams + FaultDeclaration = miner.FaultDeclaration + GetBeneficiaryReturn = miner.GetBeneficiaryReturn + GetControlAddressesReturn = miner.GetControlAddressesReturn + PartitionKey = miner.PartitionKey + PendingBeneficiaryChange = miner.PendingBeneficiaryChange + PoStPartition = miner.PoStPartition + PowerPair = miner.PowerPair + PreCommitSectorBatchParams = miner.PreCommitSectorBatchParams + PreCommitSectorBatchParams2 = miner.PreCommitSectorBatchParams2 + PreCommitSectorParams = miner.PreCommitSectorParams + ProveCommitAggregateParams = miner.ProveCommitAggregateParams + ProveCommitSectorParams = miner.ProveCommitSectorParams + ProveReplicaUpdatesParams = miner.ProveReplicaUpdatesParams + ProveReplicaUpdatesParams2 = miner.ProveReplicaUpdatesParams2 + RecoveryDeclaration = miner.RecoveryDeclaration + ReplicaUpdate = miner.ReplicaUpdate + ReplicaUpdate2 = miner.ReplicaUpdate2 + ReportConsensusFaultParams = miner.ReportConsensusFaultParams + SectorClaim = miner.SectorClaim + SectorOnChainInfo = miner.SectorOnChainInfo + SectorPreCommitInfo = miner.SectorPreCommitInfo + SectorPreCommitOnChainInfo = miner.SectorPreCommitOnChainInfo + Sectors = miner.Sectors + SubmitWindowedPoStParams = miner.SubmitWindowedPoStParams + TerminateSectorsParams = miner.TerminateSectorsParams + TerminateSectorsReturn = miner.TerminateSectorsReturn + TerminationDeclaration = miner.TerminationDeclaration + VestSpec = miner.VestSpec + VestingFund = miner.VestingFund + VestingFunds = miner.VestingFunds + WindowedPoSt = miner.WindowedPoSt + MinerWithdrawBalanceParams = miner.WithdrawBalanceParams + WorkerKeyChange = miner.WorkerKeyChange +) + +var ( + AggregatePreCommitNetworkFee = miner.AggregatePreCommitNetworkFee + AggregateProveCommitNetworkFee = miner.AggregateProveCommitNetworkFee + ConstructDeadline = miner.ConstructDeadline + ConstructDeadlines = miner.ConstructDeadlines + ConstructVestingFunds = miner.ConstructVestingFunds + ExpectedRewardForPower = miner.ExpectedRewardForPower + ExpectedRewardForPowerClampedAtAttoFIL = miner.ExpectedRewardForPowerClampedAtAttoFIL + FindSector = miner.FindSector + InitialPledgeForPower = miner.InitialPledgeForPower + LoadExpirationQueue = miner.LoadExpirationQueue + LoadSectors = miner.LoadSectors + NewDeadlineInfo = miner.NewDeadlineInfo + NewPowerPair = miner.NewPowerPair + NewPowerPairZero = miner.NewPowerPairZero + PreCommitDepositForPower = miner.PreCommitDepositForPower + QAPowerForSector = miner.QAPowerForSector + QAPowerForWeight = miner.QAPowerForWeight + QAPowerMax = miner.QAPowerMax + QualityForWeight = miner.QualityForWeight + QuantSpecForDeadline = miner.QuantSpecForDeadline + SectorKey = miner.SectorKey +) + +////////// paych ////////// +const ( + LaneStatesAmtBitwidth = paych.LaneStatesAmtBitwidth + SettleDelay = paych.SettleDelay +) + +type ( + ConstructorParams = paych.ConstructorParams + LaneState = paych.LaneState + Merge = paych.Merge + ModVerifyParams = paych.ModVerifyParams + SignedVoucher = paych.SignedVoucher + UpdateChannelStateParams = paych.UpdateChannelStateParams +) + +////////// verifreg ////////// +const ( + EndOfLifeClaimDropPeriod = verifreg.EndOfLifeClaimDropPeriod + MaximumVerifiedAllocationExpiration = verifreg.MaximumVerifiedAllocationExpiration + MaximumVerifiedAllocationTerm = verifreg.MaximumVerifiedAllocationTerm + MinimumVerifiedAllocationSize = verifreg.MinimumVerifiedAllocationSize + MinimumVerifiedAllocationTerm = verifreg.MinimumVerifiedAllocationTerm + NoAllocationID = verifreg.NoAllocationID + SignatureDomainSeparation_RemoveDataCap = verifreg.SignatureDomainSeparation_RemoveDataCap +) + +type ( + AddVerifiedClientParams = verifreg.AddVerifiedClientParams + AddVerifierParams = verifreg.AddVerifierParams + Allocation = verifreg.Allocation + AllocationId = verifreg.AllocationId + AllocationRequest = verifreg.AllocationRequest + AllocationRequests = verifreg.AllocationRequests + AllocationsResponse = verifreg.AllocationsResponse + BatchReturn = verifreg.BatchReturn + Claim = verifreg.Claim + ClaimAllocationsParams = verifreg.ClaimAllocationsParams + ClaimAllocationsReturn = verifreg.ClaimAllocationsReturn + ClaimExtensionRequest = verifreg.ClaimExtensionRequest + ClaimId = verifreg.ClaimId + ClaimTerm = verifreg.ClaimTerm + ExtendClaimTermsParams = verifreg.ExtendClaimTermsParams + ExtendClaimTermsReturn = verifreg.ExtendClaimTermsReturn + FailCode = verifreg.FailCode + GetClaimsParams = verifreg.GetClaimsParams + GetClaimsReturn = verifreg.GetClaimsReturn + ReceiverType = verifreg.ReceiverType + RemoveDataCapParams = verifreg.RemoveDataCapParams + RemoveDataCapProposal = verifreg.RemoveDataCapProposal + RemoveDataCapRequest = verifreg.RemoveDataCapRequest + RemoveDataCapReturn = verifreg.RemoveDataCapReturn + RemoveExpiredAllocationsParams = verifreg.RemoveExpiredAllocationsParams + RemoveExpiredAllocationsReturn = verifreg.RemoveExpiredAllocationsReturn + RemoveExpiredClaimsParams = verifreg.RemoveExpiredClaimsParams + RemoveExpiredClaimsReturn = verifreg.RemoveExpiredClaimsReturn + RestoreBytesParams = verifreg.RestoreBytesParams + RmDcProposalID = verifreg.RmDcProposalID + SectorAllocationClaim = verifreg.SectorAllocationClaim + UniversalReceiverParams = verifreg.UniversalReceiverParams + UseBytesParams = verifreg.UseBytesParams +) diff --git a/venus-shared/types/supply.go b/venus-shared/types/supply.go new file mode 100644 index 0000000000..8a43a9375d --- /dev/null +++ b/venus-shared/types/supply.go @@ -0,0 +1,12 @@ +package types + +import "github.com/filecoin-project/go-state-types/abi" + +type CirculatingSupply struct { + FilVested abi.TokenAmount + FilMined abi.TokenAmount + FilBurnt abi.TokenAmount + FilLocked abi.TokenAmount + FilCirculating abi.TokenAmount + FilReserveDisbursed abi.TokenAmount +} diff --git a/venus-shared/types/tipset.go b/venus-shared/types/tipset.go new file mode 100644 index 0000000000..43e0a29cf0 --- /dev/null +++ b/venus-shared/types/tipset.go @@ -0,0 +1,294 @@ +package types + +import ( + "bytes" + "fmt" + "sort" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" +) + +type blockHeaderWithCid struct { + c cid.Cid + b *BlockHeader +} + +// UndefTipSet is a singleton representing a nil or undefined tipset. +var UndefTipSet = &TipSet{} + +func NewTipSet(bhs []*BlockHeader) (*TipSet, error) { + if len(bhs) == 0 { + return nil, fmt.Errorf("no blocks for tipset") + } + + blks := make([]*blockHeaderWithCid, len(bhs)) + first := bhs[0] + blks[0] = &blockHeaderWithCid{ + c: first.Cid(), + b: first, + } + + seen := make(map[cid.Cid]struct{}) + seen[blks[0].c] = struct{}{} + + for i := 1; i < len(bhs); i++ { + blk := bhs[i] + if blk.Height != first.Height { + return nil, fmt.Errorf("inconsistent block heights %d and %d", first.Height, blk.Height) + } + + if !sortedCidArrsEqual(blk.Parents, first.Parents) { + return nil, fmt.Errorf("inconsistent block parents %s and %s", NewTipSetKey(first.Parents...), NewTipSetKey(blk.Parents...)) + } + + if !blk.ParentWeight.Equals(first.ParentWeight) { + return nil, fmt.Errorf("inconsistent block parent weights %d and %d", first.ParentWeight, blk.ParentWeight) + } + + bcid := blk.Cid() + if _, ok := seen[bcid]; ok { + return nil, fmt.Errorf("duplicate block %s", bcid) + } + + seen[bcid] = struct{}{} + blks[i] = &blockHeaderWithCid{ + c: bcid, + b: blk, + } + } + + sortBlockHeadersInTipSet(blks) + blocks := make([]*BlockHeader, len(blks)) + cids := make([]cid.Cid, len(blks)) + for i := range blks { + blocks[i] = blks[i].b + cids[i] = blks[i].c + } + + return &TipSet{ + blocks: blocks, + + key: NewTipSetKey(cids...), + cids: cids, + + height: first.Height, + + parentsKey: NewTipSetKey(first.Parents...), + }, nil +} + +// TipSet is a non-empty, immutable set of blocks at the same height with the same parent set. +// Blocks in a tipset are canonically ordered by ticket. Blocks may be iterated either via +// ToSlice() (which involves a shallow copy) or efficiently by index with At(). +// TipSet is a lightweight value type; passing by pointer is usually unnecessary. +// +// Canonical tipset newBlock ordering does not match the order of CIDs in a TipSetKey used as +// a tipset "key". +type TipSet struct { + // This slice is wrapped in a struct to enforce immutability. + blocks []*BlockHeader + // Key is computed at construction and cached. + key TipSetKey + cids []cid.Cid + + height abi.ChainEpoch + + parentsKey TipSetKey +} + +// Defined checks whether the tipset is defined. +// Invoking any other methods on an undefined tipset will result in undefined behaviour (c.f. cid.Undef) +func (ts *TipSet) Defined() bool { + return ts != nil && len(ts.blocks) > 0 +} + +func (ts *TipSet) Equals(ots *TipSet) bool { + if ts == nil && ots == nil { + return true + } + if ts == nil || ots == nil { + return false + } + + if ts.height != ots.height { + return false + } + + if len(ts.cids) != len(ots.cids) { + return false + } + + for i, cid := range ts.cids { + if cid != ots.cids[i] { + return false + } + } + + return true +} + +// Len returns the number of blocks in the tipset. +func (ts *TipSet) Len() int { + if ts == nil { + return 0 + } + return len(ts.blocks) +} + +// At returns the i'th newBlock in the tipset. +// An index outside the half-open range [0, Len()) will panic. +func (ts *TipSet) At(i int) *BlockHeader { + return ts.blocks[i] +} + +func (ts *TipSet) Blocks() []*BlockHeader { + return ts.blocks +} + +// Key returns a key for the tipset. +func (ts *TipSet) Key() TipSetKey { + if ts == nil { + return EmptyTSK + } + return ts.key +} + +func (ts *TipSet) Cids() []cid.Cid { + if !ts.Defined() { + return []cid.Cid{} + } + + dst := make([]cid.Cid, len(ts.cids)) + copy(dst, ts.cids) + return dst +} + +// Height returns the height of a tipset. +func (ts *TipSet) Height() abi.ChainEpoch { + if ts.Defined() { + return ts.height + } + + return 0 +} + +// Parents returns the CIDs of the parents of the blocks in the tipset. +func (ts *TipSet) Parents() TipSetKey { + if ts.Defined() { + return ts.parentsKey + } + + return EmptyTSK +} + +// Parents returns the CIDs of the parents of the blocks in the tipset. +func (ts *TipSet) ParentState() cid.Cid { + if ts.Defined() { + return ts.blocks[0].ParentStateRoot + } + return cid.Undef +} + +// ParentWeight returns the tipset's ParentWeight in fixed point form. +func (ts *TipSet) ParentWeight() big.Int { + if ts.Defined() { + return ts.blocks[0].ParentWeight + } + return big.Zero() +} + +// String returns a formatted string of the CIDs in the TipSet. +// "{ }" +// Note: existing callers use this as a unique key for the tipset. We should change them +// to use the TipSetKey explicitly +func (ts TipSet) String() string { + return ts.Key().String() +} + +func (ts *TipSet) IsChildOf(parent *TipSet) bool { + return CidArrsEqual(ts.Parents().Cids(), parent.key.Cids()) && + // FIXME: The height check might go beyond what is meant by + // "parent", but many parts of the code rely on the tipset's + // height for their processing logic at the moment to obviate it. + ts.Height() > parent.Height() +} + +func (ts *TipSet) MinTicketBlock() *BlockHeader { + min := ts.blocks[0] + + for _, b := range ts.blocks[1:] { + if b.LastTicket().Less(min.LastTicket()) { + min = b + } + } + + return min +} + +// MinTicket returns the smallest ticket of all blocks in the tipset. +func (ts *TipSet) MinTicket() *Ticket { + return ts.MinTicketBlock().Ticket +} + +func (ts *TipSet) MinTimestamp() uint64 { + minTS := ts.blocks[0].Timestamp + for _, bh := range ts.blocks[1:] { + if bh.Timestamp < minTS { + minTS = bh.Timestamp + } + } + return minTS +} + +// ToSlice returns an ordered slice of pointers to the tipset's blocks. +func (ts *TipSet) ToSlice() []*BlockHeader { + slice := make([]*BlockHeader, len(ts.blocks)) + copy(slice, ts.blocks) + return slice +} + +func sortedCidArrsEqual(a, b []cid.Cid) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +func sortBlockHeadersInTipSet(blks []*blockHeaderWithCid) { + sort.Slice(blks, func(i, j int) bool { + cmp := blks[i].b.Ticket.Compare(blks[j].b.Ticket) + if cmp == 0 { + // Break ticket ties with the newBlock CIDs, which are distinct. + cmp = bytes.Compare(blks[i].c.Bytes(), blks[j].c.Bytes()) + } + return cmp < 0 + }) +} + +func CidArrsEqual(a, b []cid.Cid) bool { + if len(a) != len(b) { + return false + } + + // order ignoring compare... + s := make(map[cid.Cid]bool) + for _, c := range a { + s[c] = true + } + + for _, c := range b { + if !s[c] { + return false + } + } + return true +} diff --git a/venus-shared/types/tipset_exp_tipset_test.go b/venus-shared/types/tipset_exp_tipset_test.go new file mode 100644 index 0000000000..ccc60e179e --- /dev/null +++ b/venus-shared/types/tipset_exp_tipset_test.go @@ -0,0 +1,48 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestExpTipSet(t *testing.T) { + tf.UnitTest(t) + sliceLen := 5 + bytesLen := 32 + + var buf bytes.Buffer + for i := 0; i < 32; i++ { + var src, dst ExpTipSet + + opt := testutil.CborErBasicTestOptions{ + Buf: &buf, + Prepare: func() { + require.Equal(t, src, dst, "empty values") + require.Nil(t, src.Cids, "empty cids") + require.Nil(t, src.Blocks, "empty blocks") + }, + + ProvideOpts: []interface{}{ + testutil.WithSliceLen(sliceLen), + testutil.BytesFixedProvider(bytesLen), + }, + + Provided: func() { + require.NotEqual(t, src, dst, "src value provided") + require.Len(t, src.Cids, sliceLen, "cids length") + require.Len(t, src.Blocks, sliceLen, "blocks length") + }, + + Finished: func() { + require.Equal(t, src, dst) + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opt) + } +} diff --git a/venus-shared/types/tipset_key.go b/venus-shared/types/tipset_key.go new file mode 100644 index 0000000000..b0ba29558f --- /dev/null +++ b/venus-shared/types/tipset_key.go @@ -0,0 +1,222 @@ +package types + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" +) + +// TipSetKey is an immutable set of CIDs forming a unique key for a TipSet. +// Equal keys will have equivalent iteration order. CIDs are maintained in +// the same order as the canonical iteration order of blocks in a tipset (which is by ticket). +// This convention is maintained by the caller. The order of input cids to the constructor +// must be the same as this canonical order. It is the caller's responsibility to not +// construct a key with duplicate ids +// TipSetKey is a lightweight value type; passing by pointer is usually unnecessary. + +var ( + _ json.Marshaler = TipSetKey{} + _ json.Unmarshaler = (*TipSetKey)(nil) +) + +var EmptyTSK = TipSetKey{} + +// The length of a newBlock header CID in bytes. +var blockHeaderCIDLen int + +func init() { + // hash a large string of zeros so we don't estimate based on inlined CIDs. + var buf [256]byte + c, err := abi.CidBuilder.Sum(buf[:]) + if err != nil { + panic(err) + } + blockHeaderCIDLen = len(c.Bytes()) +} + +// NewTipSetKey builds a new key from a slice of CIDs. +// The CIDs are assumed to be ordered correctly. +func NewTipSetKey(cids ...cid.Cid) TipSetKey { + encoded := encodeKey(cids) + return TipSetKey{string(encoded)} +} + +// A TipSetKey is an immutable collection of CIDs forming a unique key for a tipset. +// The CIDs are assumed to be distinct and in canonical order. Two keys with the same +// CIDs in a different order are not considered equal. +// TipSetKey is a lightweight value type, and may be compared for equality with ==. +type TipSetKey struct { + // The internal representation is a concatenation of the bytes of the CIDs, which are + // self-describing, wrapped as a string. + // These gymnastics make the a TipSetKey usable as a map key. + // The empty key has value "". + value string +} + +// Cids returns a slice of the CIDs comprising this key. +func (tsk TipSetKey) Cids() []cid.Cid { + cids, err := decodeKey([]byte(tsk.value)) + if err != nil { + panic("invalid tipset key: " + err.Error()) + } + return cids +} + +// String returns a human-readable representation of the key. +func (tsk TipSetKey) String() string { + b := strings.Builder{} + b.WriteString("{") + for _, c := range tsk.Cids() { + b.Write([]byte(fmt.Sprintf(" %s", c.String()))) + } + b.WriteString(" }") + return b.String() +} + +// Bytes returns a binary representation of the key. +func (tsk TipSetKey) Bytes() []byte { + return []byte(tsk.value) +} + +func (tsk TipSetKey) MarshalJSON() ([]byte, error) { + return json.Marshal(tsk.Cids()) +} + +func (tsk *TipSetKey) UnmarshalJSON(b []byte) error { + var cids []cid.Cid + if err := json.Unmarshal(b, &cids); err != nil { + return err + } + tsk.value = string(encodeKey(cids)) + return nil +} + +func (tsk TipSetKey) IsEmpty() bool { + return len(tsk.value) == 0 +} + +// ContainsAll checks if another set is a subset of this one. +// We can assume that the relative order of members of one key is +// maintained in the other since we assume that all ids are sorted +// by corresponding newBlock ticket value. +func (tsk TipSetKey) ContainsAll(other TipSetKey) bool { + // Since we assume the ids must have the same relative sorting we can + // perform one pass over this set, advancing the other index whenever the + // values match. + cids := tsk.Cids() + otherCids := other.Cids() + otherIdx := 0 + for i := 0; i < len(cids) && otherIdx < len(otherCids); i++ { + if cids[i].Equals(otherCids[otherIdx]) { + otherIdx++ + } + } + // otherIdx is advanced the full length only if every element was found in this set. + return otherIdx == len(otherCids) +} + +// Has checks whether the set contains `id`. +func (tsk TipSetKey) Has(id cid.Cid) bool { + // Find index of the first CID not less than id. + for _, cid := range tsk.Cids() { + if cid == id { + return true + } + } + return false +} + +// Equals checks whether the set contains exactly the same CIDs as another. +func (tsk TipSetKey) Equals(other TipSetKey) bool { + return tsk.value == other.value +} + +// TipSetKeyFromBytes wraps an encoded key, validating correct decoding. +func TipSetKeyFromBytes(encoded []byte) (TipSetKey, error) { + _, err := decodeKey(encoded) + if err != nil { + return TipSetKey{}, err + } + return TipSetKey{string(encoded)}, nil +} + +func (tsk *TipSetKey) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Parents: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + cids := make([]cid.Cid, extra) + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return fmt.Errorf("reading cid field t.Parents failed: %v", err) + } + cids[i] = c + } + tsk.value = string(encodeKey(cids)) + } + return nil +} + +func (tsk TipSetKey) MarshalCBOR(w io.Writer) error { + cids := tsk.Cids() + if len(cids) > cbg.MaxLength { + return fmt.Errorf("slice value in field t.Parents was too long") + } + scratch := make([]byte, 9) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(cids))); err != nil { + return err + } + for _, v := range cids { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return fmt.Errorf("failed writing cid field t.Parents: %v", err) + } + } + return nil +} + +func encodeKey(cids []cid.Cid) []byte { + buffer := new(bytes.Buffer) + for _, c := range cids { + // bytes.Buffer.Write() err is documented to be always nil. + _, _ = buffer.Write(c.Bytes()) + } + return buffer.Bytes() +} + +func decodeKey(encoded []byte) ([]cid.Cid, error) { + // To avoid reallocation of the underlying array, estimate the number of CIDs to be extracted + // by dividing the encoded length by the expected CID length. + estimatedCount := len(encoded) / blockHeaderCIDLen + cids := make([]cid.Cid, 0, estimatedCount) + nextIdx := 0 + for nextIdx < len(encoded) { + nr, c, err := cid.CidFromBytes(encoded[nextIdx:]) + if err != nil { + return nil, err + } + cids = append(cids, c) + nextIdx += nr + } + return cids, nil +} diff --git a/venus-shared/types/tipset_key_test.go b/venus-shared/types/tipset_key_test.go new file mode 100644 index 0000000000..c062d8130c --- /dev/null +++ b/venus-shared/types/tipset_key_test.go @@ -0,0 +1,45 @@ +package types + +import ( + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" +) + +func TestTipSetKey(t *testing.T) { + tf.UnitTest(t) + var cids []cid.Cid + cidNum := 10 + + // value provided + testutil.Provide(t, &cids, testutil.WithSliceLen(cidNum)) + require.Len(t, cids, cidNum) + require.NotEqual(t, make([]cid.Cid, cidNum), cids) + + // construct + tsk := NewTipSetKey(cids...) + require.False(t, tsk.IsEmpty()) + + require.NotEqual(t, tsk, EmptyTSK) + + // content + require.Equal(t, tsk.Cids(), cids) + tskStr := tsk.String() + for i := range cids { + require.Contains(t, tskStr, cids[i].String()) + } + + // marshal json + data, err := tsk.MarshalJSON() + require.NoError(t, err, "marshal json") + + var decoded TipSetKey + err = decoded.UnmarshalJSON(data) + require.NoError(t, err) + + require.Equal(t, tsk, decoded) +} diff --git a/venus-shared/types/tipset_marshal.go b/venus-shared/types/tipset_marshal.go new file mode 100644 index 0000000000..a6d178fc57 --- /dev/null +++ b/venus-shared/types/tipset_marshal.go @@ -0,0 +1,70 @@ +package types + +import ( + "encoding/json" + "io" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" +) + +type ExpTipSet struct { + Cids []cid.Cid + Blocks []*BlockHeader + Height abi.ChainEpoch +} + +func (ts *TipSet) MarshalJSON() ([]byte, error) { + // why didnt i just export the fields? Because the struct has methods with the + // same names already + return json.Marshal(ExpTipSet{ + Cids: ts.cids, + Blocks: ts.blocks, + Height: ts.height, + }) +} + +func (ts *TipSet) UnmarshalJSON(b []byte) error { + var ets ExpTipSet + if err := json.Unmarshal(b, &ets); err != nil { + return err + } + + ots, err := NewTipSet(ets.Blocks) + if err != nil { + return err + } + + *ts = *ots + + return nil +} + +func (ts *TipSet) MarshalCBOR(w io.Writer) error { + if ts == nil { + _, err := w.Write(cbg.CborNull) + return err + } + return (&ExpTipSet{ + Cids: ts.cids, + Blocks: ts.blocks, + Height: ts.height, + }).MarshalCBOR(w) +} + +func (ts *TipSet) UnmarshalCBOR(r io.Reader) error { + var ets ExpTipSet + if err := ets.UnmarshalCBOR(r); err != nil { + return err + } + + ots, err := NewTipSet(ets.Blocks) + if err != nil { + return err + } + + *ts = *ots + + return nil +} diff --git a/venus-shared/types/tipset_marshal_test.go b/venus-shared/types/tipset_marshal_test.go new file mode 100644 index 0000000000..5aca1b218b --- /dev/null +++ b/venus-shared/types/tipset_marshal_test.go @@ -0,0 +1,79 @@ +package types + +import ( + "bytes" + "encoding/json" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func checkForTipSetEqual(t *testing.T, a, b *TipSet) { + require.True(t, a.Equals(b)) + require.Equal(t, a.Len(), b.Len(), "Len equals") + require.Equal(t, a.Cids(), b.Cids(), "Cids equals") + require.Equal(t, a.MinTimestamp(), b.MinTimestamp(), "MinTimestamp equals") + require.Equal(t, a.ParentState(), b.ParentState(), "ParentState equals") + require.Equal(t, a.String(), b.String(), "String equals") + require.Equal(t, a.MinTicketBlock(), b.MinTicketBlock(), "MinTicketBlock equals") + require.Equal(t, a.MinTicket(), b.MinTicket(), "MinTicket equals") +} + +func TestTipSetMarshalJSON(t *testing.T) { + tf.UnitTest(t) + height, paretns, weight := constructTipSetKeyInfos(t) + ts := constructTipSet(t, height, paretns, weight) + + data, err := json.Marshal(ts) + require.NoError(t, err, "json mahrshal for TipSet") + + var dst TipSet + err = json.Unmarshal(data, &dst) + require.NoError(t, err, "json unmarshal for TipSet") + + checkForTipSetEqual(t, ts, &dst) +} + +func TestTipSetEquals(t *testing.T) { + tf.UnitTest(t) + height, paretns, weight := constructTipSetKeyInfos(t) + ts := constructTipSet(t, height, paretns, weight) + + assert.True(t, (*TipSet)(nil).Equals(nil), "nil tipset equals") + assert.False(t, ts.Equals(nil), "non-nil is always != nil") +} + +func TestTipSetBasic(t *testing.T) { + tf.UnitTest(t) + var buf bytes.Buffer + + for i := 0; i < 32; i++ { + height, paretns, weight := constructTipSetKeyInfos(t) + ts := constructTipSet(t, height, paretns, weight) + + var src, dst TipSet + + opts := testutil.CborErBasicTestOptions{ + Buf: &buf, + + Provided: func() { + // all fields in TipSet are private, so we assign the value manually + src = *ts + require.NotEqual(t, src, dst, "src provided") + }, + + Finished: func() { + require.Equal(t, src, dst, "struct equals") + checkForTipSetEqual(t, &src, &dst) + + src.height++ + require.False(t, src.Equals(&dst), "height matters in TipSet.Equals") + }, + } + + testutil.CborErBasicTest(t, &src, &dst, opts) + } +} diff --git a/venus-shared/types/tipset_test.go b/venus-shared/types/tipset_test.go new file mode 100644 index 0000000000..21497a90d4 --- /dev/null +++ b/venus-shared/types/tipset_test.go @@ -0,0 +1,122 @@ +package types + +import ( + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/testutil" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +const ( + minBlockHeaderNumForTest = 3 + maxBlockHeaderNumForTest = 10 +) + +func constructTipSetKeyInfos(t *testing.T) (abi.ChainEpoch, []cid.Cid, big.Int) { + now := time.Now().Unix() + height := abi.ChainEpoch(now) + + var parentNum int + testutil.Provide(t, &parentNum, testutil.IntRangedProvider(minBlockHeaderNumForTest, maxBlockHeaderNumForTest)) + + var parents []cid.Cid + testutil.Provide(t, &parents, testutil.WithSliceLen(parentNum)) + require.GreaterOrEqual(t, len(parents), minBlockHeaderNumForTest) + require.Less(t, len(parents), maxBlockHeaderNumForTest) + + var parentWeight big.Int + testutil.Provide(t, &parentWeight, testutil.PositiveBigProvider()) + require.True(t, parentWeight.GreaterThan(big.Zero())) + + return height, parents, parentWeight +} + +func constructTipSet(t *testing.T, height abi.ChainEpoch, parents []cid.Cid, parentWeight big.Int) *TipSet { + appliers := []struct { + fn func(*BlockHeader) + msg string + }{ + { + fn: func(bh *BlockHeader) { + bh.Height = height + }, + msg: "inconsistent block heights ", + }, + + { + fn: func(bh *BlockHeader) { + bh.Parents = make([]cid.Cid, len(parents)) + copy(bh.Parents, parents) + }, + msg: "inconsistent block parents ", + }, + + { + fn: func(bh *BlockHeader) { + bh.ParentWeight.Int.Set(parentWeight.Int) + }, + msg: "inconsistent block parent weights ", + }, + } + + var blkNum int + testutil.Provide(t, &blkNum, testutil.IntRangedProvider(minBlockHeaderNumForTest, maxBlockHeaderNumForTest)) + + var bhs []*BlockHeader + // use a max-int-limit of 1<<48 to prevent digit precision problem in json + testutil.Provide(t, &bhs, testutil.WithSliceLen(blkNum), testutil.IntRangedProvider(0, 1<<48)) + + require.GreaterOrEqual(t, len(bhs), minBlockHeaderNumForTest) + require.Less(t, len(bhs), maxBlockHeaderNumForTest) + + for ai := 0; ai < len(appliers); ai++ { + _, err := NewTipSet(bhs) + require.Errorf(t, err, "attempt to construct tipset before applier #%d", ai) + require.Containsf(t, err.Error(), appliers[ai].msg, "err msg content before applier #%d", ai) + + for bi := range bhs { + appliers[ai].fn(bhs[bi]) + } + } + + // duplicate bh + _, err := NewTipSet(append(bhs, bhs[0])) + require.Error(t, err, "attempt to construct tipset with duplicated bh") + require.Containsf(t, err.Error(), "duplicate block ", "err msg content for duplicate block") + + // construct + ts, err := NewTipSet(bhs) + require.NoError(t, err, "construct tipset") + + return ts +} + +func TestTipSetConstruct(t *testing.T) { + tf.UnitTest(t) + height, parents, parentWeight := constructTipSetKeyInfos(t) + constructTipSet(t, height, parents, parentWeight) +} + +func TestTipSetMethods(t *testing.T) { + tf.UnitTest(t) + height, parents, parentWeight := constructTipSetKeyInfos(t) + + ts := constructTipSet(t, height, parents, parentWeight) + require.True(t, ts.Defined()) + + tsk := ts.Key() + require.NotEqual(t, EmptyTSK, tsk, "tsk not empty") + + require.Equal(t, ts.Height(), height) + + require.True(t, ts.ParentWeight().Equals(parentWeight), "parent weight") + + child := constructTipSet(t, height+1, tsk.Cids(), BigMul(parentWeight, NewInt(2))) + require.True(t, child.IsChildOf(ts), "check if is child") + require.Equal(t, tsk.Cids(), child.Parents().Cids(), "child.Parents() == parent.Cids()") +} diff --git a/venus-shared/types/uuid.go b/venus-shared/types/uuid.go new file mode 100644 index 0000000000..53dbdf67c2 --- /dev/null +++ b/venus-shared/types/uuid.go @@ -0,0 +1,86 @@ +package types + +import ( + "bytes" + "database/sql/driver" + "fmt" + + "github.com/google/uuid" +) + +type UUID uuid.UUID + +func NewUUID() UUID { + return UUID(uuid.New()) +} + +func ParseUUID(uid string) (UUID, error) { + id, err := uuid.Parse(uid) + if err != nil { + return UUID{}, nil + } + + return UUID(id), nil +} + +// Value implement sql.Scanner +func (uid UUID) IsEmpty() bool { + return uid == UUID{} +} + +// Value implement sql.Scanner +func (uid UUID) String() string { + return uuid.UUID(uid).String() +} + +// Value implement sql.Scanner +func (uid UUID) Value() (driver.Value, error) { + return uuid.UUID(uid).String(), nil +} + +// Scan assigns a value from a database driver. +// An error should be returned if the value cannot be stored +// without loss of information. +// +// Reference types such as []byte are only valid until the next call to Scan +// and should not be retained. Their underlying memory is owned by the driver. +// If retention is necessary, copy their values before the next call to Scan. +func (uid *UUID) Scan(value interface{}) error { + var id uuid.UUID + var err error + switch value := value.(type) { + case string: + id, err = uuid.Parse(value) + case []byte: + id, err = uuid.ParseBytes(value) + default: + return fmt.Errorf("unsupport %t type for uuid", value) + } + if err != nil { + return err + } + *uid = (UUID)(id) + return nil +} + +func (uid UUID) MarshalJSON() ([]byte, error) { + return []byte("\"" + uid.String() + "\""), nil +} + +func (uid *UUID) UnmarshalJSON(b []byte) error { + b = bytes.Trim(b, "\"") + id, err := uuid.ParseBytes(b) + if err != nil { + return err + } + *uid = (UUID)(id) + return nil +} + +func (uid UUID) MarshalBinary() ([]byte, error) { + return uuid.UUID(uid).MarshalBinary() +} + +func (uid *UUID) UnmarshalBinary(b []byte) error { + return (*uuid.UUID)(uid).UnmarshalBinary(b) +} diff --git a/venus-shared/types/uuid_test.go b/venus-shared/types/uuid_test.go new file mode 100644 index 0000000000..4e12c26927 --- /dev/null +++ b/venus-shared/types/uuid_test.go @@ -0,0 +1,76 @@ +package types + +import ( + "encoding/json" + "testing" + + "github.com/google/uuid" +) + +func TestUUID_Scan(t *testing.T) { + uid := uuid.New() + newID := UUID{} + err := newID.Scan(uid.String()) + if err != nil { + t.Error(err) + } + + if newID.String() != uid.String() { + t.Errorf("convert value failed") + } +} + +func TestUUID_Value(t *testing.T) { + uid := uuid.New() + newID := UUID(uid) + + val, err := newID.Value() + if err != nil { + t.Error(err) + } + if val.(string) != uid.String() { + t.Errorf("convert value failed") + } +} + +func TestUUID_JsonMarshal(t *testing.T) { + type T struct { + ID UUID + } + + val := T{ID: NewUUID()} + + marshallBytes, err := json.Marshal(&val) + if err != nil { + t.Error(err) + } + + var val2 T + err = json.Unmarshal(marshallBytes, &val2) + if err != nil { + t.Error(err) + } + + if val2.ID != val.ID { + t.Errorf("UUID json marshal fail") + } +} + +func TestUUID_MarshalBinary(t *testing.T) { + uuid := NewUUID() + + marshallBytes, err := uuid.MarshalBinary() + if err != nil { + t.Error(err) + } + + uuid2 := &UUID{} + err = uuid2.UnmarshalBinary(marshallBytes) + if err != nil { + t.Error(err) + } + + if uuid != *uuid2 { + t.Errorf("UUID binary marshal fail") + } +} diff --git a/venus-shared/types/version.go b/venus-shared/types/version.go new file mode 100644 index 0000000000..699f44640b --- /dev/null +++ b/venus-shared/types/version.go @@ -0,0 +1,37 @@ +package types + +import ( + "fmt" +) + +type APIVersion uint32 + +func NewVer(major, minor, patch uint8) APIVersion { + return APIVersion(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch)) +} + +// Ints returns (major, minor, patch) versions +func (ve APIVersion) Ints() (uint32, uint32, uint32) { + v := uint32(ve) + return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask +} + +func (ve APIVersion) String() string { + vmj, vmi, vp := ve.Ints() + return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp) +} + +func (ve APIVersion) EqMajorMinor(v2 APIVersion) bool { + return ve&minorMask == v2&minorMask +} + +//nolint:varcheck,deadcode +const ( + majorMask = 0xff0000 + minorMask = 0xffff00 + patchMask = 0xffffff + + majorOnlyMask = 0xff0000 + minorOnlyMask = 0x00ff00 + patchOnlyMask = 0x0000ff +) diff --git a/venus-shared/types/wallet/draw_random_params.go b/venus-shared/types/wallet/draw_random_params.go new file mode 100644 index 0000000000..1689eb5566 --- /dev/null +++ b/venus-shared/types/wallet/draw_random_params.go @@ -0,0 +1,64 @@ +package wallet + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/crypto" + fcbor "github.com/fxamacker/cbor/v2" + "github.com/minio/blake2b-simd" +) + +type DrawRandomParams struct { + Rbase []byte + Pers crypto.DomainSeparationTag + Round abi.ChainEpoch + Entropy []byte +} + +// return store.DrawRandomness(dr.Rbase, dr.Pers, dr.Round, dr.Entropy) +func (dr *DrawRandomParams) SignBytes() ([]byte, error) { + h := blake2b.New256() + if err := binary.Write(h, binary.BigEndian, int64(dr.Pers)); err != nil { + return nil, fmt.Errorf("deriving randomness: %w", err) + } + VRFDigest := blake2b.Sum256(dr.Rbase) + _, err := h.Write(VRFDigest[:]) + if err != nil { + return nil, fmt.Errorf("hashing VRFDigest: %w", err) + } + if err := binary.Write(h, binary.BigEndian, dr.Round); err != nil { + return nil, fmt.Errorf("deriving randomness: %w", err) + } + _, err = h.Write(dr.Entropy) + if err != nil { + return nil, fmt.Errorf("hashing entropy: %w", err) + } + + return h.Sum(nil), nil +} + +func (dr *DrawRandomParams) MarshalCBOR(w io.Writer) error { + data, err := fcbor.Marshal(dr) + if err != nil { + return err + } + _, err = w.Write(data) + return err +} + +func (dr *DrawRandomParams) UnmarshalCBOR(r io.Reader) error { + data, err := io.ReadAll(r) + if err != nil { + return err + } + return fcbor.Unmarshal(data, dr) +} + +var ( + _ = cbor.Unmarshaler((*DrawRandomParams)(nil)) + _ = cbor.Marshaler((*DrawRandomParams)(nil)) +) diff --git a/venus-shared/types/wallet/msg_enum.go b/venus-shared/types/wallet/msg_enum.go new file mode 100644 index 0000000000..edac0e66ee --- /dev/null +++ b/venus-shared/types/wallet/msg_enum.go @@ -0,0 +1,130 @@ +package wallet + +import ( + "errors" + "math" + + "github.com/ahmetb/go-linq/v3" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var ErrCodeOverflow = errors.New("code over flow") + +type MsgEnum = uint32 + +const ( + MEUnknown MsgEnum = 1 << iota + MEChainMsg + MEBlock + MEDealProposal + MEDrawRandomParam + MESignedVoucher + MEStorageAsk + MEAskResponse + MENetWorkResponse + MEProviderDealState + MEClientDeal + MEVerifyAddress +) + +var MsgEnumPool = []struct { + Code int + Name string +}{ + {Code: MsgEnumCode(MEUnknown), Name: "unknown"}, + {Code: MsgEnumCode(MEChainMsg), Name: "chainMsg"}, + {Code: MsgEnumCode(MEBlock), Name: "block"}, + {Code: MsgEnumCode(MEDealProposal), Name: "dealProposal"}, + {Code: MsgEnumCode(MEDrawRandomParam), Name: "drawRandomParam"}, + {Code: MsgEnumCode(MESignedVoucher), Name: "signedVoucher"}, + {Code: MsgEnumCode(MEStorageAsk), Name: "storageAsk"}, + {Code: MsgEnumCode(MEAskResponse), Name: "askResponse"}, + {Code: MsgEnumCode(MENetWorkResponse), Name: "netWorkResponse"}, + {Code: MsgEnumCode(MEProviderDealState), Name: "providerDealState"}, + {Code: MsgEnumCode(MEClientDeal), Name: "clientDeal"}, +} +var MaxMsgEnumCode = len(MsgEnumPool) - 1 + +func CheckMsgEnum(me MsgEnum) error { + max := 1 << MaxMsgEnumCode + if me > uint32(max) { + return ErrCodeOverflow + } + return nil +} + +func FindCode(enum MsgEnum) []int { + var codes []int + for power := 0; enum > 0; power++ { + digit := enum % 2 + if digit == 1 { + codes = append(codes, power) + } + enum /= 2 + } + return codes +} + +func AggregateMsgEnumCode(codes []int) (MsgEnum, error) { + if len(codes) == 0 { + return 0, errors.New("nil reference") + } + linq.From(codes).Distinct().ToSlice(&codes) + em := MsgEnum(0) + for _, v := range codes { + me, err := MsgEnumFromInt(v) + if err != nil { + return 0, err + } + em += me + } + return em, nil +} + +func MsgEnumFromInt(code int) (MsgEnum, error) { + if code < 0 || code > MaxMsgEnumCode { + return 0, ErrCodeOverflow + } + return 1 << code, nil +} + +func MsgEnumCode(me MsgEnum) int { + code := math.Log2(float64(me)) + return int(code) +} + +func ContainMsgType(multiME MsgEnum, mt types.MsgType) bool { + me := convertToMsgEnum(mt) + return multiME&me == me +} + +func convertToMsgEnum(mt types.MsgType) MsgEnum { + switch mt { + case types.MTUnknown: + return MEUnknown + case types.MTChainMsg: + return MEChainMsg + case types.MTBlock: + return MEBlock + case types.MTDealProposal: + return MEDealProposal + case types.MTDrawRandomParam: + return MEDrawRandomParam + case types.MTSignedVoucher: + return MESignedVoucher + case types.MTStorageAsk: + return MEStorageAsk + case types.MTAskResponse: + return MEAskResponse + case types.MTNetWorkResponse: + return MENetWorkResponse + case types.MTProviderDealState: + return MEProviderDealState + case types.MTClientDeal: + return MEClientDeal + case types.MTVerifyAddress: + return MEVerifyAddress + default: + return MEUnknown + } +} diff --git a/venus-shared/types/wallet/msg_enum_test.go b/venus-shared/types/wallet/msg_enum_test.go new file mode 100644 index 0000000000..72b970fac3 --- /dev/null +++ b/venus-shared/types/wallet/msg_enum_test.go @@ -0,0 +1,44 @@ +package wallet + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/types" + "gotest.tools/assert" +) + +func TestContainMsgType(t *testing.T) { + tf.UnitTest(t) + multiME := MEUnknown + MEChainMsg + MEStorageAsk + MEProviderDealState + MEVerifyAddress + assert.Equal(t, ContainMsgType(multiME, types.MTChainMsg), true) + assert.Equal(t, ContainMsgType(multiME, types.MTStorageAsk), true) + assert.Equal(t, ContainMsgType(multiME, types.MTProviderDealState), true) + assert.Equal(t, ContainMsgType(multiME, types.MTUnknown), true) + assert.Equal(t, ContainMsgType(multiME, types.MTBlock), false) + assert.Equal(t, ContainMsgType(multiME, types.MTDealProposal), false) + assert.Equal(t, ContainMsgType(multiME, types.MTDrawRandomParam), false) + assert.Equal(t, ContainMsgType(multiME, types.MTSignedVoucher), false) + assert.Equal(t, ContainMsgType(multiME, types.MTAskResponse), false) + assert.Equal(t, ContainMsgType(multiME, types.MTNetWorkResponse), false) + assert.Equal(t, ContainMsgType(multiME, types.MTClientDeal), false) + assert.Equal(t, ContainMsgType(multiME, types.MTVerifyAddress), true) +} + +func TestFindCode(t *testing.T) { + tf.UnitTest(t) + ids := FindCode(38) + assert.DeepEqual(t, []int{1, 2, 5}, ids) + + ids2 := FindCode(8392) + assert.DeepEqual(t, []int{3, 6, 7, 13}, ids2) +} + +func TestAggregateMsgEnumCode(t *testing.T) { + tf.UnitTest(t) + me, err := AggregateMsgEnumCode([]int{1, 2, 3, 4, 5, 6, 7}) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, me, uint32(254)) +} diff --git a/venus-shared/types/wallet/wallet.go b/venus-shared/types/wallet/wallet.go new file mode 100644 index 0000000000..912c47ef79 --- /dev/null +++ b/venus-shared/types/wallet/wallet.go @@ -0,0 +1,74 @@ +package wallet + +import ( + linq "github.com/ahmetb/go-linq/v3" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/venus/venus-shared/types" +) + +type AddressScope struct { + Root bool // is root auth, true : can get all addresses in the wallet + Addresses []address.Address // when root==false, should fill a scope of wallet addresses +} + +type MethodName = string + +// KeyStrategy a uint of wallet strategy +type KeyStrategy struct { + Address address.Address // wallet address + MetaTypes MsgEnum // sum MsgEnum + Methods []MethodName // msg method array +} + +// GroupAuth relation with Group and generate a token for external invocation +type GroupAuth struct { + Token string + GroupID uint + Name string + KeyBinds []*KeyBind +} + +// KeyBind bind wallet usage strategy +// allow designated rule to pass +type KeyBind struct { + BindID uint + Name string + Address string + // source from MsgTypeTemplate or temporary create + MetaTypes MsgEnum + // source from MethodTemplate + Methods []MethodName +} + +func (kb *KeyBind) ContainMsgType(m types.MsgType) bool { + return ContainMsgType(kb.MetaTypes, m) +} + +func (kb *KeyBind) ContainMethod(m string) bool { + return linq.From(kb.Methods).Contains(m) +} + +// Group multi KeyBind +type Group struct { + GroupID uint + Name string + // NOTE: not fill data when query groups + KeyBinds []*KeyBind +} + +// MethodTemplate to quickly create a private key usage strategy +// msg actor and methodNum agg to method name +// NOTE: routeType 4 +type MethodTemplate struct { + MTId uint + Name string + // method name join with ',' + Methods []MethodName +} + +// MsgTypeTemplate to quickly create a private key usage strategy +type MsgTypeTemplate struct { + MTTId uint + Name string + MetaTypes MsgEnum +} diff --git a/venus-shared/typeutil/codecflag_string.go b/venus-shared/typeutil/codecflag_string.go new file mode 100644 index 0000000000..d62c80d4cf --- /dev/null +++ b/venus-shared/typeutil/codecflag_string.go @@ -0,0 +1,43 @@ +// Code generated by "stringer -type=CodecFlag -trimprefix=Codec"; DO NOT EDIT. + +package typeutil + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CodecBinary-1] + _ = x[CodecText-2] + _ = x[CodecJSON-4] + _ = x[CodecCbor-8] + _ = x[_codecLimit-16] +} + +const ( + _CodecFlag_name_0 = "BinaryText" + _CodecFlag_name_1 = "JSON" + _CodecFlag_name_2 = "Cbor" + _CodecFlag_name_3 = "_codecLimit" +) + +var ( + _CodecFlag_index_0 = [...]uint8{0, 6, 10} +) + +func (i CodecFlag) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _CodecFlag_name_0[_CodecFlag_index_0[i]:_CodecFlag_index_0[i+1]] + case i == 4: + return _CodecFlag_name_1 + case i == 8: + return _CodecFlag_name_2 + case i == 16: + return _CodecFlag_name_3 + default: + return "CodecFlag(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/venus-shared/typeutil/fields.go b/venus-shared/typeutil/fields.go new file mode 100644 index 0000000000..b08346bbdc --- /dev/null +++ b/venus-shared/typeutil/fields.go @@ -0,0 +1,50 @@ +package typeutil + +import ( + "go/ast" + "reflect" + "sync" +) + +var exportedFieldsCache = struct { + sync.RWMutex + fields map[reflect.Type][]reflect.StructField +}{ + fields: make(map[reflect.Type][]reflect.StructField), +} + +func ExportedFields(obj interface{}) []reflect.StructField { + typ, ok := obj.(reflect.Type) + if !ok { + typ = reflect.TypeOf(obj) + } + + if kind := typ.Kind(); kind != reflect.Struct { + return nil + } + + exportedFieldsCache.RLock() + fields, ok := exportedFieldsCache.fields[typ] + exportedFieldsCache.RUnlock() + + if ok { + return fields + } + + num := typ.NumField() + fields = make([]reflect.StructField, 0, num) + for i := 0; i < num; i++ { + field := typ.Field(i) + if !ast.IsExported(field.Name) { + continue + } + + fields = append(fields, field) + } + + exportedFieldsCache.Lock() + exportedFieldsCache.fields[typ] = fields + exportedFieldsCache.Unlock() + + return fields +} diff --git a/venus-shared/typeutil/fields_test.go b/venus-shared/typeutil/fields_test.go new file mode 100644 index 0000000000..51d4cd0fa3 --- /dev/null +++ b/venus-shared/typeutil/fields_test.go @@ -0,0 +1,26 @@ +package typeutil + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/require" +) + +func TestExportedFields(t *testing.T) { + tf.UnitTest(t) + + f := ExportedFields(new(int)) + require.Nil(t, f, "nil fields for non-struct type") + + type S struct { + A int + B uint + C bool + d float32 // nolint + _C float64 // nolint + } + + f = ExportedFields(S{}) + require.Len(t, f, 3, "exported fields") +} diff --git a/venus-shared/typeutil/methods.go b/venus-shared/typeutil/methods.go new file mode 100644 index 0000000000..1b432b046a --- /dev/null +++ b/venus-shared/typeutil/methods.go @@ -0,0 +1,81 @@ +package typeutil + +import ( + "go/ast" + "reflect" + "sync" +) + +var exportedMethodsCache = struct { + sync.RWMutex + methods map[reflect.Type][]reflect.Method +}{ + methods: make(map[reflect.Type][]reflect.Method), +} + +func ExportedMethods(obj interface{}) []reflect.Method { + typ, ok := obj.(reflect.Type) + if !ok { + typ = reflect.TypeOf(obj) + } + + exportedMethodsCache.RLock() + methods, ok := exportedMethodsCache.methods[typ] + exportedMethodsCache.RUnlock() + + if ok { + return methods + } + + all := AllMethods(typ) + methods = make([]reflect.Method, 0, len(all)) + for i := range all { + method := all[i] + if !ast.IsExported(method.Name) { + continue + } + + methods = append(methods, method) + } + + exportedMethodsCache.Lock() + exportedMethodsCache.methods[typ] = methods + exportedMethodsCache.Unlock() + + return methods +} + +var allMethodsCache = struct { + sync.RWMutex + methods map[reflect.Type][]reflect.Method +}{ + methods: make(map[reflect.Type][]reflect.Method), +} + +func AllMethods(obj interface{}) []reflect.Method { + typ, ok := obj.(reflect.Type) + if !ok { + typ = reflect.TypeOf(obj) + } + + allMethodsCache.RLock() + methods, ok := allMethodsCache.methods[typ] + allMethodsCache.RUnlock() + + if ok { + return methods + } + + num := typ.NumMethod() + methods = make([]reflect.Method, 0, num) + for i := 0; i < num; i++ { + method := typ.Method(i) + methods = append(methods, method) + } + + allMethodsCache.Lock() + allMethodsCache.methods[typ] = methods + allMethodsCache.Unlock() + + return methods +} diff --git a/venus-shared/typeutil/methods_test.go b/venus-shared/typeutil/methods_test.go new file mode 100644 index 0000000000..0e2e704c28 --- /dev/null +++ b/venus-shared/typeutil/methods_test.go @@ -0,0 +1,40 @@ +package typeutil + +import ( + "io" + "reflect" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/stretchr/testify/require" +) + +func TestExportedMethods(t *testing.T) { + tf.UnitTest(t) + meths := ExportedMethods(reflect.TypeOf((*io.ReadCloser)(nil)).Elem()) + require.Len(t, meths, 2, "exported methods for io.ReadCloser") + + type I interface { + Public() + private() + } + + meths = AllMethods(reflect.TypeOf((*I)(nil)).Elem()) + require.Len(t, meths, 2, "all methods for I") + + meths = ExportedMethods(reflect.TypeOf((*I)(nil)).Elem()) + require.Len(t, meths, 1, "exported methods for I") + + var ci codecInt + meths = ExportedMethods(&ci) + require.Len(t, meths, 8, "exported methods for *codecInt") + + meths = ExportedMethods(ci) + require.Len(t, meths, 4, "exported methods for codecInt") + + meths = AllMethods(&ci) + require.Len(t, meths, 8, "all methods for *codecInt") + + meths = AllMethods(ci) + require.Len(t, meths, 4, "all methods for codecInt") +} diff --git a/venus-shared/typeutil/similar.go b/venus-shared/typeutil/similar.go new file mode 100644 index 0000000000..41edbb9124 --- /dev/null +++ b/venus-shared/typeutil/similar.go @@ -0,0 +1,416 @@ +package typeutil + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "sync" + + "github.com/filecoin-project/go-state-types/cbor" +) + +type CodecFlag uint + +//go:generate go run golang.org/x/tools/cmd/stringer -type=CodecFlag -trimprefix=Codec +const ( + CodecBinary CodecFlag = 1 << iota + CodecText + CodecJSON + CodecCbor + _codecLimit +) + +type SimilarMode uint + +const ( + StructFieldsOrdered SimilarMode = 1 << iota + StructFieldTagsMatch + InterfaceAllMethods + AvoidRecursive +) + +var codecs = []struct { + flag CodecFlag + marshaler reflect.Type + unmarshaler reflect.Type +}{ + { + flag: CodecBinary, + marshaler: reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem(), + unmarshaler: reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem(), + }, + { + flag: CodecText, + marshaler: reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem(), + unmarshaler: reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem(), + }, + { + flag: CodecJSON, + marshaler: reflect.TypeOf((*json.Marshaler)(nil)).Elem(), + unmarshaler: reflect.TypeOf((*json.Unmarshaler)(nil)).Elem(), + }, + { + flag: CodecCbor, + marshaler: reflect.TypeOf((*cbor.Marshaler)(nil)).Elem(), + unmarshaler: reflect.TypeOf((*cbor.Unmarshaler)(nil)).Elem(), + }, +} + +type similarResult struct { + similar bool + reason *Reason +} + +type similarInput struct { + a reflect.Type + b reflect.Type + codecFlag CodecFlag + smode SimilarMode +} + +var similarCache = struct { + sync.RWMutex + results map[similarInput]similarResult +}{ + results: make(map[similarInput]similarResult), +} + +type SimilarStack = [2]reflect.Type + +func Similar(a, b interface{}, codecFlag CodecFlag, smode SimilarMode, stack ...SimilarStack) (bool, *Reason) { + atyp, ok := a.(reflect.Type) + if !ok { + atyp = reflect.TypeOf(a) + } + + btyp, ok := b.(reflect.Type) + if !ok { + btyp = reflect.TypeOf(b) + } + + if atyp == btyp { + return true, nil + } + + sinput := similarInput{ + a: atyp, + b: btyp, + codecFlag: codecFlag, + smode: smode, + } + + similarCache.RLock() + res, has := similarCache.results[sinput] + if !has { + sinput.a, sinput.b = btyp, atyp + res, has = similarCache.results[sinput] + } + similarCache.RUnlock() + + if has { + return res.similar, res.reason + } + + reasonf := makeReasonf(atyp, btyp) + reasonWrap := makeReasonWrap(atyp, btyp) + + // recursive + for si := range stack { + // we assumpt that they are similar + // but we won't cache the result + if (stack[si][0] == atyp && stack[si][1] == btyp) || (stack[si][1] == atyp && stack[si][0] == btyp) { + return smode&AvoidRecursive == 0, reasonf("%w in the stack #%d, now in #%d", ReasonRecursiveCompare, si, len(stack)) + } + } + + stack = append(stack, [2]reflect.Type{atyp, btyp}) + + var yes bool + var reason *Reason + + defer func() { + similarCache.Lock() + similarCache.results[sinput] = similarResult{ + similar: yes, + reason: reason, + } + similarCache.Unlock() + }() + + akind := atyp.Kind() + bkind := btyp.Kind() + + if akind != bkind { + reason = reasonf("%w: %s != %s", ReasonTypeKinds, akind, bkind) + return yes, reason + } + + if codecFlag != 0 { + for i := range codecs { + if codecFlag&codecs[i].flag == 0 { + continue + } + + aMarImpl := atyp.Implements(codecs[i].marshaler) + bMarImpl := btyp.Implements(codecs[i].marshaler) + if aMarImpl != bMarImpl { + reason = reasonf("%w for codec %s: %v != %v", ReasonCodecMarshalerImplementations, codecs[i].flag, aMarImpl, bMarImpl) + return yes, reason + } + + aUMarImpl := atyp.Implements(codecs[i].unmarshaler) + bUMarImpl := btyp.Implements(codecs[i].unmarshaler) + if aUMarImpl != bUMarImpl { + reason = reasonf("%w for codec %s: %v; %v", ReasonCodecUnmarshalerImplementations, codecs[i].flag, aUMarImpl, bUMarImpl) + return yes, reason + } + } + } + + switch akind { + case reflect.Bool: + fallthrough + + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64: + fallthrough + + case reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + fallthrough + + case reflect.Float32, reflect.Float64: + fallthrough + + case reflect.Complex64, reflect.Complex128: + fallthrough + + case reflect.Uintptr, reflect.UnsafePointer: + fallthrough + + case reflect.String: + yes = true + + case reflect.Interface: + + default: + yes = atyp.ConvertibleTo(btyp) + } + + if yes { + return yes, reason + } + + switch akind { + case reflect.Array: + if atyp.Len() != btyp.Len() { + reason = reasonf("%w: %d != %d", ReasonArrayLength, atyp.Len(), btyp.Len()) + break + } + + elemMatch, elemReason := Similar(atyp.Elem(), btyp.Elem(), codecFlag, smode, stack...) + if !elemMatch { + reason = reasonWrap(elemReason, ReasonArrayElement) + break + } + + yes = true + + case reflect.Map: + keyMatch, keyReason := Similar(atyp.Key(), btyp.Key(), codecFlag, smode, stack...) + if !keyMatch { + reason = reasonWrap(keyReason, ReasonMapKey) + break + } + + valueMatch, valueReason := Similar(atyp.Elem(), btyp.Elem(), codecFlag, smode, stack...) + if !valueMatch { + reason = reasonWrap(valueReason, ReasonMapValue) + break + } + + yes = true + + case reflect.Ptr: + elemMatch, elemReason := Similar(atyp.Elem(), btyp.Elem(), codecFlag, smode, stack...) + if !elemMatch { + reason = reasonWrap(elemReason, ReasonPtrElememnt) + break + } + + yes = true + + case reflect.Slice: + elemMatch, elemReason := Similar(atyp.Elem(), btyp.Elem(), codecFlag, smode, stack...) + if !elemMatch { + reason = reasonWrap(elemReason, ReasonSliceElement) + break + } + + yes = true + + case reflect.Struct: + fieldsMatch, fieldsReason := fieldsSimilar(atyp, btyp, codecFlag, smode, stack...) + if !fieldsMatch { + reason = reasonWrap(fieldsReason, ReasonStructField) + break + } + + yes = true + + case reflect.Interface: + methsMatch, methsReason := methodsSimilar(atyp, btyp, codecFlag, smode, stack...) + if !methsMatch { + reason = reasonWrap(methsReason, ReasonInterfaceMethod) + break + } + + yes = true + + case reflect.Chan: + adir := atyp.ChanDir() + bdir := btyp.ChanDir() + if adir != bdir { + reason = reasonf("%w: %s != %s", ReasonChanDir, adir, bdir) + break + } + + elemMatch, elemReason := Similar(atyp.Elem(), btyp.Elem(), codecFlag, smode, stack...) + if !elemMatch { + reason = reasonWrap(elemReason, ReasonChanElement) + break + } + + yes = true + + case reflect.Func: + yes, reason = funcSimilar(atyp, btyp, codecFlag, smode, stack...) + + } + + return yes, reason +} + +func funcSimilar(atyp, btyp reflect.Type, codecFlag CodecFlag, smode SimilarMode, stack ...SimilarStack) (bool, *Reason) { + reasonf := makeReasonf(atyp, btyp) + reasonWrap := makeReasonWrap(atyp, btyp) + + aNumIn := atyp.NumIn() + bNumIn := btyp.NumIn() + if aNumIn != bNumIn { + return false, reasonf("%w: %d != %d", ReasonFuncInNum, aNumIn, bNumIn) + } + + aNumOut := atyp.NumOut() + bNumOut := btyp.NumOut() + if aNumOut != bNumOut { + return false, reasonf("%w: %d != %d", ReasonFuncOutNum, aNumOut, bNumOut) + } + + for i := 0; i < aNumIn; i++ { + inMatch, inReason := Similar(atyp.In(i), btyp.In(i), codecFlag, smode, stack...) + if !inMatch { + return false, reasonWrap(inReason, fmt.Errorf("%w: #%d input", ReasonFuncInType, i)) + } + } + + for i := 0; i < aNumOut; i++ { + outMatch, outReason := Similar(atyp.Out(i), btyp.Out(i), codecFlag, smode, stack...) + if !outMatch { + return false, reasonWrap(outReason, fmt.Errorf("%w: #%d input", ReasonFuncOutType, i)) + } + } + + return true, nil +} + +func fieldsSimilar(a, b reflect.Type, codecFlag CodecFlag, smode SimilarMode, stack ...SimilarStack) (bool, *Reason) { + reasonf := makeReasonf(a, b) + reasonWrap := makeReasonWrap(a, b) + + afields := ExportedFields(a) + bfields := ExportedFields(b) + + if len(afields) != len(bfields) { + return false, reasonf("%w: %d != %d", ReasonExportedFieldsCount, len(afields), len(bfields)) + } + + if smode&StructFieldsOrdered != 0 { + for i := range afields { + if afields[i].Name != bfields[i].Name { + return false, reasonf("%w: #%d field, %s != %s", ReasonExportedFieldName, i, afields[i].Name, bfields[i].Name) + } + + if smode&StructFieldTagsMatch != 0 && afields[i].Tag != bfields[i].Tag { + return false, reasonf("%w: #%d field, %s != %s", ReasonExportedFieldTag, i, afields[i].Tag, bfields[i].Tag) + } + + yes, reason := Similar(afields[i].Type, bfields[i].Type, codecFlag, smode, stack...) + if !yes { + return false, reasonWrap(reason, fmt.Errorf("%w: #%d field named %s", ReasonExportedFieldType, i, afields[i].Name)) + } + } + + return true, nil + } + + mfields := map[string]reflect.StructField{} + for i := range afields { + mfields[afields[i].Name] = afields[i] + } + + for i := range bfields { + f := bfields[i] + af, has := mfields[f.Name] + if !has { + return false, reasonf("%w: named %s", ReasonExportedFieldNotFound, f.Name) + } + + if smode&StructFieldTagsMatch != 0 && af.Tag != f.Tag { + return false, reasonf("%w: named field %s, %s != %s", ReasonExportedFieldTag, f.Name, af.Tag, f.Tag) + } + + yes, reason := Similar(af.Type, f.Type, codecFlag, smode, stack...) + if !yes { + return false, reasonWrap(reason, fmt.Errorf("%w: named %s", ReasonExportedFieldType, f.Name)) + } + } + + return true, nil +} + +func methodsSimilar(a, b reflect.Type, codecFlag CodecFlag, smode SimilarMode, stack ...SimilarStack) (bool, *Reason) { + reasonf := makeReasonf(a, b) + reasonWrap := makeReasonWrap(a, b) + + ameths := ExportedMethods(a) + bmeths := ExportedMethods(b) + if smode&InterfaceAllMethods != 0 { + ameths = AllMethods(a) + bmeths = AllMethods(b) + } + + if len(ameths) != len(bmeths) { + return false, reasonf("%w: %d != %d", ReasonExportedMethodsCount, len(ameths), len(bmeths)) + } + + for i := range ameths { + if ameths[i].Name != bmeths[i].Name { + return false, reasonf("%w: #%d method, %s != %s ", ReasonExportedMethodName, i, ameths[i].Name, bmeths[i].Name) + } + + yes, reason := Similar(ameths[i].Type, bmeths[i].Type, codecFlag, smode, stack...) + if !yes { + return false, reasonWrap(reason, fmt.Errorf("%w: #%d method named %s", ReasonExportedMethodType, i, ameths[i].Name)) + } + } + + return true, nil +} diff --git a/venus-shared/typeutil/similar_reason.go b/venus-shared/typeutil/similar_reason.go new file mode 100644 index 0000000000..45e63820a6 --- /dev/null +++ b/venus-shared/typeutil/similar_reason.go @@ -0,0 +1,85 @@ +package typeutil + +import ( + "errors" + "fmt" + "reflect" +) + +func makeReasonf(a, b reflect.Type) func(f string, args ...interface{}) *Reason { + return func(f string, args ...interface{}) *Reason { + wrapfn := makeReasonWrap(a, b) + return wrapfn(nil, fmt.Errorf(f, args...)) + } +} + +func makeReasonWrap(a, b reflect.Type) func(nested *Reason, base error) *Reason { + return func(nested *Reason, base error) *Reason { + return &Reason{ + TypeA: a, + TypeB: b, + Base: base, + Nested: nested, + } + } +} + +type Reason struct { + TypeA reflect.Type + TypeB reflect.Type + Base error + Nested *Reason +} + +func (r *Reason) Error() string { + if r == nil || r.Base == nil { + return "nil" + } + + return fmt.Sprintf("{[%s <> %s] base=%s; nested=%s}", r.TypeA, r.TypeB, r.Base, r.Nested) +} + +func (r *Reason) Is(target error) bool { + if r == nil || r.Base == nil { + return false + } + + return errors.Is(r.Base, target) +} + +func (r *Reason) Unwrap() error { + if r == nil || r.Nested == nil { + return nil + } + + return r.Nested +} + +var ( + ReasonTypeKinds = fmt.Errorf("type kinds") // nolint + ReasonCodecMarshalerImplementations = fmt.Errorf("codec marshaler implementations") // nolint + ReasonCodecUnmarshalerImplementations = fmt.Errorf("codec unmarshaler implementations") // nolint + ReasonArrayLength = fmt.Errorf("array length") // nolint + ReasonArrayElement = fmt.Errorf("array element") // nolint + ReasonMapKey = fmt.Errorf("map key") // nolint + ReasonMapValue = fmt.Errorf("map value") // nolint + ReasonPtrElememnt = fmt.Errorf("pointed type") // nolint + ReasonSliceElement = fmt.Errorf("slice element") // nolint + ReasonStructField = fmt.Errorf("struct field") // nolint + ReasonInterfaceMethod = fmt.Errorf("interface method") // nolint + ReasonChanDir = fmt.Errorf("channel direction") // nolint + ReasonChanElement = fmt.Errorf("channel element") // nolint + ReasonFuncInNum = fmt.Errorf("func in num") // nolint + ReasonFuncOutNum = fmt.Errorf("func out num") // nolint + ReasonFuncInType = fmt.Errorf("func in type") // nolint + ReasonFuncOutType = fmt.Errorf("func out type") // nolint + ReasonExportedFieldsCount = fmt.Errorf("exported fields count") // nolint + ReasonExportedFieldName = fmt.Errorf("exported field name") // nolint + ReasonExportedFieldTag = fmt.Errorf("exported field tag") // nolint + ReasonExportedFieldNotFound = fmt.Errorf("exported field not found") // nolint + ReasonExportedFieldType = fmt.Errorf("exported field type") // nolint + ReasonExportedMethodsCount = fmt.Errorf("exported methods count") // nolint + ReasonExportedMethodName = fmt.Errorf("exported method name") // nolint + ReasonExportedMethodType = fmt.Errorf("exported method type") // nolint + ReasonRecursiveCompare = fmt.Errorf("recursive compare") // nolint +) diff --git a/venus-shared/typeutil/similar_test.go b/venus-shared/typeutil/similar_test.go new file mode 100644 index 0000000000..b135552105 --- /dev/null +++ b/venus-shared/typeutil/similar_test.go @@ -0,0 +1,695 @@ +package typeutil + +import ( + "context" + "errors" + "io" + "math/bits" + "reflect" + "testing" + "unsafe" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/stretchr/testify/require" +) + +func TestCodecList(t *testing.T) { + tf.UnitTest(t) + zeroes := bits.TrailingZeros(uint(_codecLimit)) + require.Equalf(t, zeroes, len(codecs), "codec count not match, %d != %d", zeroes, len(codecs)) + + for ci := range codecs { + czeroes := bits.TrailingZeros(uint(codecs[ci].flag)) + require.Equalf(t, czeroes, ci, "#%d codec's flag is not matched", ci) + } +} + +type ( + ABool bool + AInt int + AInt8 int8 + AInt16 int16 + AInt32 int32 + AInt64 int64 + AUInt uint + AUInt8 uint8 + AUInt16 uint16 + AUInt32 uint32 + AUInt64 uint64 + AFloat32 float32 + AFloat64 float64 + AComplet64 complex64 + AComplet128 complex128 + AString string + AUintptr uintptr + AUnsafePointer unsafe.Pointer +) + +type ( + BBool bool + BInt int + BInt8 int8 + BInt16 int16 + BInt32 int32 + BInt64 int64 + BUInt uint + BUInt8 uint8 + BUInt16 uint16 + BUInt32 uint32 + BUInt64 uint64 + BFloat32 float32 + BFloat64 float64 + BComplet64 complex64 + BComplet128 complex128 + BString string + BUintptr uintptr + BUnsafePointer unsafe.Pointer +) + +func TestSimilarSimple(t *testing.T) { + tf.UnitTest(t) + alist := []interface{}{ + new(ABool), + new(AInt), + new(AInt8), + new(AInt16), + new(AInt32), + new(AInt64), + new(AUInt), + new(AUInt8), + new(AUInt16), + new(AUInt32), + new(AUInt64), + new(AFloat32), + new(AFloat64), + new(AComplet64), + new(AComplet128), + new(AString), + new(AUintptr), + new(AUnsafePointer), + } + + blist := []interface{}{ + new(BBool), + new(BInt), + new(BInt8), + new(BInt16), + new(BInt32), + new(BInt64), + new(BUInt), + new(BUInt8), + new(BUInt16), + new(BUInt32), + new(BUInt64), + new(BFloat32), + new(BFloat64), + new(BComplet64), + new(BComplet128), + new(BString), + new(BUintptr), + new(BUnsafePointer), + } + + require.Equal(t, len(alist), len(blist), "values of newed types") + + for i := range alist { + aval, bval := alist[i], blist[i] + + yes, reason := Similar(aval, bval, 0, 0) + require.Truef(t, yes, "similar result for %T <-> %T: %s", aval, bval, reason) + + ratyp, rbtyp := reflect.TypeOf(aval), reflect.TypeOf(bval) + require.Truef(t, ratyp != rbtyp, "not same type: %s vs %s", ratyp, rbtyp) + + yes, reason = Similar(ratyp, rbtyp, 0, 0) + require.Truef(t, yes, "similar result for reflect.Type of %s <-> %s: %s", ratyp, rbtyp, reason) + + yes, reason = Similar(ratyp.Elem(), rbtyp.Elem(), 0, 0) + require.Truef(t, yes, "similar result for reflect.Type of %s <-> %s: %s", ratyp.Elem(), rbtyp.Elem(), reason) + } +} + +type similarCase struct { + val interface{} + codecFlag CodecFlag + smode SimilarMode + reasons []error +} + +func similarTest(t *testing.T, origin interface{}, cases []similarCase, checkIndirect bool) { + valOrigin := reflect.ValueOf(origin) + typOrigin := valOrigin.Type() + indirect := checkIndirect && reflect.Indirect(valOrigin).Type() != typOrigin + + for i := range cases { + expectedYes := len(cases[i].reasons) == 0 + + typCase := reflect.TypeOf(cases[i].val) + require.NotEqual(t, typOrigin, typCase, "types should be different") + require.Equal(t, typOrigin.Kind(), typCase.Kind(), "kind should not be different") + + yes, reason := Similar(typOrigin, typCase, cases[i].codecFlag, cases[i].smode) + + require.Equalf(t, expectedYes, yes, "#%d similar result for %s <> %s", i, typOrigin, typCase) + if expectedYes { + require.Nil(t, reason, "reason should be nil") + } else { + require.NotNil(t, reason, "reason should not be nil") + for ei := range cases[i].reasons { + ce := cases[i].reasons[ei] + require.Truef(t, errors.Is(reason, ce), "for case #%d %s <> %s, reason should contains base %s, actual: %s", i, typOrigin, typCase, ce, reason) + } + } + + if indirect { + require.Equal(t, typOrigin.Elem().Kind(), typCase.Elem().Kind(), "kind of indirect type should not be different") + + yes, reason = Similar(typOrigin.Elem(), typCase.Elem(), cases[i].codecFlag, cases[i].smode) + + require.Equalf(t, expectedYes, yes, "#%d similar result for %s <> %s", i, typOrigin.Elem(), typCase.Elem()) + if expectedYes { + require.Nil(t, reason, "reason should be nil") + } else { + require.NotNil(t, reason, "reason should not be nil") + for ei := range cases[i].reasons { + ce := cases[i].reasons[ei] + require.Truef(t, errors.Is(reason, ce), "for case #%d %s <> %s, reason should contains base %s, actual: %s", i, typOrigin.Elem(), typCase.Elem(), ce, reason) + } + } + } + } +} + +func TestArray(t *testing.T) { + tf.UnitTest(t) + type origin [2]int + type case1 [2]uint + type case2 [3]int + + type case3 [2]AInt + type case4 [3]AInt + + cases := []similarCase{ + { + val: new(case1), + reasons: []error{ReasonArrayElement}, + }, + { + val: new(case2), + reasons: []error{ReasonArrayLength}, + }, + { + val: new(case3), + }, + { + val: new(case4), + reasons: []error{ReasonArrayLength}, + }, + } + + similarTest(t, new(origin), cases, true) +} + +func TestMap(t *testing.T) { + tf.UnitTest(t) + type origin map[string]int + + type case1 map[int]int + type case2 map[string]string + + type case3 map[string]AInt + type case4 map[AString]AInt + + cases := []similarCase{ + { + val: new(case1), + reasons: []error{ReasonMapKey}, + }, + { + val: new(case2), + reasons: []error{ReasonMapValue}, + }, + { + val: new(case3), + }, + { + val: new(case4), + }, + } + + similarTest(t, new(origin), cases, true) +} + +func TestSlice(t *testing.T) { + tf.UnitTest(t) + type origin []int + + type case1 []uint + type case2 []string + + type case3 []AInt + + cases := []similarCase{ + { + val: new(case1), + reasons: []error{ReasonSliceElement}, + }, + { + val: new(case2), + reasons: []error{ReasonSliceElement}, + }, + { + val: new(case3), + }, + } + + similarTest(t, new(origin), cases, true) +} + +func TestChan(t *testing.T) { + tf.UnitTest(t) + type origin chan int + + type case1 chan uint + type case2 <-chan int + type case3 chan<- int + + type case4 chan AInt + + cases := []similarCase{ + { + val: new(case1), + reasons: []error{ReasonChanElement}, + }, + { + val: new(case2), + reasons: []error{ReasonChanDir}, + }, + { + val: new(case3), + reasons: []error{ReasonChanDir}, + }, + { + val: new(case4), + }, + } + + similarTest(t, new(origin), cases, true) +} + +func TestStruct(t *testing.T) { + tf.UnitTest(t) + type origin struct { + A uint + B int + } + + type case1 struct { + C uint + B int + } + + type case2 struct { + B int + A uint + } + + type case3 struct { + B AInt + A AUInt + } + + type case4 struct { + A AUInt + B AInt + a bool // nolint + } + + type case5 struct { + A AUInt + b AInt // nolint + } + + type case6 struct { + A uint + B uint + } + + type case7 struct { + A AUInt `json:"a"` + B AInt + } + + cases := []similarCase{ + { + val: new(case1), + smode: StructFieldsOrdered, + reasons: []error{ReasonStructField, ReasonExportedFieldName}, + }, + { + val: new(case1), + smode: 0, + reasons: []error{ReasonStructField, ReasonExportedFieldNotFound}, + }, + { + val: new(case2), + smode: StructFieldsOrdered, + reasons: []error{ReasonStructField, ReasonExportedFieldName}, + }, + { + val: new(case2), + smode: 0, + }, + { + val: new(case3), + smode: StructFieldsOrdered, + reasons: []error{ReasonStructField, ReasonExportedFieldName}, + }, + { + val: new(case3), + smode: 0, + }, + { + val: new(case4), + smode: 0, + }, + { + val: new(case4), + smode: StructFieldsOrdered, + }, + { + val: new(case5), + smode: 0, + reasons: []error{ReasonStructField, ReasonExportedFieldsCount}, + }, + { + val: new(case5), + smode: StructFieldsOrdered, + reasons: []error{ReasonStructField, ReasonExportedFieldsCount}, + }, + { + val: new(case6), + smode: 0, + reasons: []error{ReasonStructField, ReasonExportedFieldType}, + }, + { + val: new(case6), + smode: StructFieldsOrdered, + reasons: []error{ReasonStructField, ReasonExportedFieldType}, + }, + { + val: new(case7), + smode: StructFieldTagsMatch, + reasons: []error{ReasonStructField, ReasonExportedFieldTag}, + }, + { + val: new(case7), + smode: StructFieldsOrdered | StructFieldTagsMatch, + reasons: []error{ReasonStructField, ReasonExportedFieldTag}, + }, + } + + similarTest(t, new(origin), cases, true) +} + +func TestInterface(t *testing.T) { + tf.UnitTest(t) + type origin interface { + Read(context.Context) (int, error) + Write(context.Context, []byte) (int, error) + Close(context.Context) error + } + + type case1 interface { + Write(context.Context, []byte) (int, error) + Read(context.Context) (int, error) + Close(context.Context) error + } + + type case2 interface { + Read1(context.Context) (int, error) + Write(context.Context, []byte) (int, error) + Close(context.Context) error + } + + type case3 interface { + Read(context.Context, []byte) (int, error) + Write(context.Context, []byte) (int, error) + Close(context.Context) error + } + + type case4 interface { + Read(context.Context) error + Write(context.Context, []byte) (int, error) + Close(context.Context) error + } + + type Bytes []byte + type case5 interface { + Read(context.Context) (AInt, error) + Write(context.Context, Bytes) (AInt, error) + Close(context.Context) error + } + + type case6 interface { + Read(context.Context) (AInt, error) + Write(context.Context, Bytes) (AInt, error) + } + + type case7 interface { + Read(context.Context) (AUInt, error) + Write(context.Context, Bytes) (AInt, error) + Close(context.Context) error + } + + type case8 interface { + Read(context.Context) (AInt, error) + Write(context.Context, string) (AInt, error) + Close(context.Context) error + } + + type case9 interface { + Read(context.Context) (AInt, error) + Write(context.Context, Bytes) (AInt, error) + Close(context.Context) error + read(context.Context) + } + + cases := []similarCase{ + { + val: new(case1), + }, + { + val: new(case2), + reasons: []error{ReasonExportedMethodName}, + }, + { + val: new(case3), + reasons: []error{ReasonExportedMethodType, ReasonFuncInNum}, + }, + { + val: new(case4), + reasons: []error{ReasonExportedMethodType, ReasonFuncOutNum}, + }, + { + val: new(case5), + }, + { + val: new(case6), + reasons: []error{ReasonExportedMethodsCount}, + }, + { + val: new(case7), + reasons: []error{ReasonExportedMethodType, ReasonFuncOutType}, + }, + { + val: new(case8), + reasons: []error{ReasonExportedMethodType, ReasonFuncInType}, + }, + { + val: new(case9), + }, + { + val: new(case9), + smode: InterfaceAllMethods, + reasons: []error{ReasonExportedMethodsCount}, + }, + } + + similarTest(t, new(origin), cases, true) +} + +type codecInt int + +func (ci codecInt) MarshalBinary() ([]byte, error) { // nolint + panic("not impl") +} + +func (ci *codecInt) UnmarshalBinary([]byte) error { // nolint + panic("not impl") +} + +func (ci codecInt) MarshalText() ([]byte, error) { // nolint + panic("not impl") +} + +func (ci *codecInt) UnmarshalText([]byte) error { // nolint + panic("not impl") +} + +func (ci codecInt) MarshalJSON() ([]byte, error) { // nolint + panic("not impl") +} + +func (ci *codecInt) UnmarshalJSON([]byte) error { // nolint + panic("not impl") +} + +func (ci codecInt) MarshalCBOR(w io.Writer) error { // nolint + panic("not impl") +} + +func (ci *codecInt) UnmarshalCBOR(r io.Reader) error { // nolint + panic("not impl") +} + +type halfCodecInt int + +func (ci halfCodecInt) MarshalBinary() ([]byte, error) { // nolint + panic("not impl") +} + +func (ci halfCodecInt) MarshalText() ([]byte, error) { // nolint + panic("not impl") +} + +func (ci halfCodecInt) MarshalJSON() ([]byte, error) { // nolint + panic("not impl") +} + +func (ci halfCodecInt) MarshalCBOR(w io.Writer) error { // nolint + panic("not impl") +} + +func TestCodec(t *testing.T) { + tf.UnitTest(t) + cases := []similarCase{ + { + val: new(AInt), + codecFlag: 0, + }, + { + val: new(AInt), + codecFlag: CodecBinary, + reasons: []error{ReasonCodecMarshalerImplementations}, + }, + { + val: new(AInt), + codecFlag: CodecText, + reasons: []error{ReasonCodecMarshalerImplementations}, + }, + { + val: new(AInt), + codecFlag: CodecJSON, + reasons: []error{ReasonCodecMarshalerImplementations}, + }, + { + val: new(AInt), + codecFlag: CodecCbor, + reasons: []error{ReasonCodecMarshalerImplementations}, + }, + { + val: new(codecInt), + codecFlag: 0, + }, + { + val: new(codecInt), + codecFlag: CodecBinary, + reasons: []error{ReasonCodecUnmarshalerImplementations}, + }, + { + val: new(codecInt), + codecFlag: CodecText, + reasons: []error{ReasonCodecUnmarshalerImplementations}, + }, + { + val: new(codecInt), + codecFlag: CodecJSON, + reasons: []error{ReasonCodecUnmarshalerImplementations}, + }, + { + val: new(codecInt), + codecFlag: CodecCbor, + reasons: []error{ReasonCodecUnmarshalerImplementations}, + }, + } + + similarTest(t, new(halfCodecInt), cases, false) +} + +func TestConvertible(t *testing.T) { + tf.UnitTest(t) + type origin struct { + A uint + B int + } + + type another origin + + yes, reason := Similar(new(origin), new(another), 0, 0) + require.Truef(t, yes, "convertible types, got reason: %s", reason) + + type ra = io.ReadCloser + type rb = io.Reader + rta := reflect.TypeOf(new(ra)).Elem() + rtb := reflect.TypeOf(new(rb)).Elem() + require.True(t, rta.ConvertibleTo(rtb)) + + yes, reason = Similar(rta, rtb, 0, 0) + require.False(t, yes, "convertible interface may not be similar") + require.True(t, errors.Is(reason, ReasonExportedMethodsCount)) +} + +func TestRecursive(t *testing.T) { + tf.UnitTest(t) + type origin struct { + A uint + B int + Sub []origin + } + + type case1 struct { + A uint + B int + Sub []case1 + } + + type case2 struct { + A uint + B int + Sub []origin + } + + cases := []similarCase{ + { + val: new(case1), + smode: 0, + }, + { + val: new(case1), + smode: AvoidRecursive, + reasons: []error{ReasonRecursiveCompare}, + }, + { + val: new(case2), + smode: 0, + }, + { + val: new(case2), + smode: AvoidRecursive, + }, + } + + similarTest(t, new(origin), cases, false) +} diff --git a/venus-shared/utils/method_map.go b/venus-shared/utils/method_map.go new file mode 100644 index 0000000000..51993454a5 --- /dev/null +++ b/venus-shared/utils/method_map.go @@ -0,0 +1,113 @@ +package utils + +import ( + "reflect" + + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" + exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" + exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" + exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" + exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" + exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported" + exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported" + _actors "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/ipfs/go-cid" +) + +type MethodMeta struct { + Name string + + Params reflect.Type + Ret reflect.Type +} + +// In the v8 version, different networks will have different actors(venus-shared/builtin-actors/builtin_actors_gen.go). +// Pay attention to the network type when using. +// By default, the actors of the mainnet are loaded. +var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{} + +type actorsWithVersion struct { + av actorstypes.Version + actors []builtin.RegistryEntry +} + +func init() { + loadMethodsMap() +} + +func ReloadMethodsMap() { + MethodsMap = make(map[cid.Cid]map[abi.MethodNum]MethodMeta) + loadMethodsMap() +} + +func loadMethodsMap() { + // TODO: combine with the runtime actor registry. + var actors []actorsWithVersion + + actors = append(actors, actorsWithVersion{av: actorstypes.Version0, actors: builtin.MakeRegistryLegacy(exported0.BuiltinActors())}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version2, actors: builtin.MakeRegistryLegacy(exported2.BuiltinActors())}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version3, actors: builtin.MakeRegistryLegacy(exported3.BuiltinActors())}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version4, actors: builtin.MakeRegistryLegacy(exported4.BuiltinActors())}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version5, actors: builtin.MakeRegistryLegacy(exported5.BuiltinActors())}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version6, actors: builtin.MakeRegistryLegacy(exported6.BuiltinActors())}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version7, actors: builtin.MakeRegistryLegacy(exported7.BuiltinActors())}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version8, actors: builtin.MakeRegistry(actorstypes.Version8)}) + actors = append(actors, actorsWithVersion{av: actorstypes.Version9, actors: builtin.MakeRegistry(actorstypes.Version9)}) + + for _, awv := range actors { + for _, actor := range awv.actors { + // necessary to make stuff work + ac := actor.Code() + var realCode cid.Cid + if awv.av >= actorstypes.Version8 { + name := _actors.CanonicalName(builtin.ActorNameByCode(ac)) + + realCode, _ = _actors.GetActorCodeID(awv.av, name) + } + + exports := actor.Exports() + methods := make(map[abi.MethodNum]MethodMeta, len(exports)) + + // Explicitly add send, it's special. + methods[builtin.MethodSend] = MethodMeta{ + Name: "Send", + Params: reflect.TypeOf(new(abi.EmptyValue)), + Ret: reflect.TypeOf(new(abi.EmptyValue)), + } + + // Iterate over exported methods. Some of these _may_ be nil and + // must be skipped. + for number, export := range exports { + if export.Method == nil { + continue + } + + ev := reflect.ValueOf(export.Method) + et := ev.Type() + + methodMeta := MethodMeta{ + Name: export.Name, + Ret: et.Out(0), + } + + if awv.av <= actorstypes.Version7 { + // methods exported from specs-actors have the runtime as the first param, so we want et.In(1) + methodMeta.Params = et.In(1) + } else { + // methods exported from go-state-types do not, so we want et.In(0) + methodMeta.Params = et.In(0) + } + + methods[abi.MethodNum(number)] = methodMeta + } + + MethodsMap[actor.Code()] = methods + if realCode.Defined() { + MethodsMap[realCode] = methods + } + } + } +} diff --git a/venus-shared/utils/method_map_test.go b/venus-shared/utils/method_map_test.go new file mode 100644 index 0000000000..68f2c602de --- /dev/null +++ b/venus-shared/utils/method_map_test.go @@ -0,0 +1,47 @@ +package utils + +import ( + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/assert" +) + +func TestMethodMap(t *testing.T) { + tf.UnitTest(t) + + t.Run("Default to load mainnet v8 actors", func(t *testing.T) { + for _, actorsMetadata := range actors.EmbeddedBuiltinActorsMetadata { + if actorsMetadata.Network == string(types.NetworkNameMain) { + for _, actor := range actorsMetadata.Actors { + _, ok := MethodsMap[actor] + assert.True(t, ok) + } + } + } + }) + + t.Run("ReLoad butterflynet v8 actors", func(t *testing.T) { + for _, actorsMetadata := range actors.EmbeddedBuiltinActorsMetadata { + if actorsMetadata.Network == string(types.NetworkNameButterfly) { + for _, actor := range actorsMetadata.Actors { + _, ok := MethodsMap[actor] + assert.False(t, ok) + } + } + } + + assert.Nil(t, actors.SetNetworkBundle(int(types.NetworkButterfly))) + ReloadMethodsMap() + for _, actorsMetadata := range actors.EmbeddedBuiltinActorsMetadata { + if actorsMetadata.Network == string(types.NetworkNameButterfly) { + for _, actor := range actorsMetadata.Actors { + _, ok := MethodsMap[actor] + assert.True(t, ok) + } + } + } + }) +} diff --git a/venus-shared/utils/msg_parser/parser.go b/venus-shared/utils/msg_parser/parser.go new file mode 100644 index 0000000000..07857d387c --- /dev/null +++ b/venus-shared/utils/msg_parser/parser.go @@ -0,0 +1,70 @@ +package msgparser + +import ( + "bytes" + "context" + "fmt" + "reflect" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/venus/venus-shared/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/filecoin-project/venus/venus-shared/utils" + "github.com/ipfs/go-cid" +) + +type ActorGetter interface { + StateGetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) +} + +type MessagePaser struct { + getter ActorGetter + actors map[cid.Cid]map[abi.MethodNum]utils.MethodMeta +} + +func NewMessageParser(getter ActorGetter) (*MessagePaser, error) { + return &MessagePaser{getter: getter, actors: utils.MethodsMap}, nil +} + +func (parser *MessagePaser) GetMethodMeta(code cid.Cid, m abi.MethodNum) (utils.MethodMeta, bool) { + meta, ok := parser.actors[code][m] + return meta, ok +} + +func (parser *MessagePaser) ParseMessage(ctx context.Context, msg *types.Message, receipt *types.MessageReceipt) (args interface{}, ret interface{}, err error) { + if int(msg.Method) == int(builtin.MethodSend) { + return nil, nil, nil + } + + actor, err := parser.getter.StateGetActor(ctx, msg.To, types.EmptyTSK) + if err != nil { + return nil, nil, fmt.Errorf("get actor(%s) failed:%w", msg.To.String(), err) + } + + methodMeta, found := parser.GetMethodMeta(actor.Code, msg.Method) + if !found { + return nil, nil, fmt.Errorf("actor:%v method(%d) not exist", actor, msg.Method) + } + + in := reflect.New(methodMeta.Params.Elem()).Interface() + if unmarshaler, isok := in.(cbor.Unmarshaler); isok { + if err = unmarshaler.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, nil, fmt.Errorf("unmarshalerCBOR msg params failed:%w", err) + } + } + + var out interface{} + if receipt != nil && receipt.ExitCode == exitcode.Ok { + out = reflect.New(methodMeta.Ret.Elem()).Interface() + if unmarshaler, isok := out.(cbor.Unmarshaler); isok { + if err = unmarshaler.UnmarshalCBOR(bytes.NewReader(receipt.Return)); err != nil { + return nil, nil, fmt.Errorf("unmarshalerCBOR msg returns failed:%w", err) + } + } + } + + return in, out, nil +} diff --git a/venus-shared/utils/msg_parser/parser_test.go b/venus-shared/utils/msg_parser/parser_test.go new file mode 100644 index 0000000000..54fa6491f7 --- /dev/null +++ b/venus-shared/utils/msg_parser/parser_test.go @@ -0,0 +1,236 @@ +package msgparser + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "reflect" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + + "github.com/filecoin-project/go-address" + cbor "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/stretchr/testify/require" +) + +func init() { + address.CurrentNetwork = address.Mainnet +} + +type mockActorGetter struct { + actors map[string]*types.Actor +} + +func (x *mockActorGetter) StateGetActor(ctx context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) { + actor, isok := x.actors[addr.String()] + if !isok { + return nil, fmt.Errorf("address:%s not found", addr.String()) + } + return actor, nil +} + +func newMockActorGetter(t *testing.T) *mockActorGetter { + return &mockActorGetter{ + actors: map[string]*types.Actor{ + "f01": {Code: builtin.InitActorCodeID}, + "f02": {Code: builtin.RewardActorCodeID}, + "f03": {Code: builtin.CronActorCodeID}, + "f04": {Code: builtin.StoragePowerActorCodeID}, + "f05": {Code: builtin.StorageMarketActorCodeID}, + "f06": {Code: builtin.VerifiedRegistryActorCodeID}, + "f2nbgz5oxgdqjkuvkbtdzwiepisiycnpjyibcvmpy": {Code: builtin.MultisigActorCodeID}, + "f14mj3nt7tlgyditvk6h5p6i36vqebwgfqndjeq6a": {Code: builtin.AccountActorCodeID}, + "f2ttj54ce3xziwij7qnyzbadu3bhxjewriol66tai": {Code: builtin.MultisigActorCodeID}, + "f028027": {Code: builtin.MultisigActorCodeID}, + "f01481491": {Code: builtin.StorageMinerActorCodeID}, + "f066563": {Code: builtin.StorageMinerActorCodeID}, + "f0748179": {Code: builtin.StorageMinerActorCodeID}, + "f0116287": {Code: builtin.StorageMinerActorCodeID}, + "f033036": {Code: builtin.StorageMinerActorCodeID}, + "f0807472": {Code: builtin.StorageMinerActorCodeID}, + "f0392813": {Code: builtin.StorageMinerActorCodeID}, + "f0441116": {Code: builtin.StorageMinerActorCodeID}, + "f01111881": {Code: builtin.StorageMinerActorCodeID}, + "f01387570": {Code: builtin.StorageMinerActorCodeID}, + "f0717289": {Code: builtin.StorageMinerActorCodeID}, + "f01476109": {Code: builtin.StorageMinerActorCodeID}, + "f01451690": {Code: builtin.StorageMinerActorCodeID}, + "f01203636": {Code: builtin.StorageMinerActorCodeID}, + "f01171513": {Code: builtin.StorageMinerActorCodeID}, + "f054464": {Code: builtin.StorageMinerActorCodeID}, + "f0106363": {Code: builtin.StorageMinerActorCodeID}, + "f01190965": {Code: builtin.StorageMinerActorCodeID}, + "f02388": {Code: builtin.StorageMinerActorCodeID}, + "f01203143": {Code: builtin.StorageMinerActorCodeID}, + "f01211558": {Code: builtin.StorageMinerActorCodeID}, + "f01483143": {Code: builtin.StorageMinerActorCodeID}, + "f027083": {Code: builtin.MultisigActorCodeID}, + }, + } +} + +type testCase map[string]interface{} + +func (c testCase) name(t *testing.T) string { + return c.stringField(t, "name") +} + +func (c testCase) stringField(t *testing.T, fieldName string) string { + fs, isok := c[fieldName] + if !isok || fs == nil { + return "" + } + str, isok := fs.(string) + if !isok { + t.Fatalf("'%s' field must be base64 string", fieldName) + } + return str +} + +func (c testCase) base64Field(t *testing.T, fieldName string) []byte { + bytes, err := base64.StdEncoding.DecodeString(c.stringField(t, fieldName)) + if err != nil { + t.Fatalf("decode field(%s) base64 to bytes failed:%v", fieldName, err) + } + return bytes +} + +func (c testCase) addressFiled(t *testing.T, fieldName string) address.Address { + addStr := c.stringField(t, fieldName) + addr, err := address.NewFromString(addStr) + if err != nil { + t.Fatalf("decode string(%s) to address failed:%v", addStr, err) + } + return addr +} + +func (c testCase) numField(t *testing.T, fieldName string) int64 { + num, isok := c[fieldName] + if !isok { + t.Fatalf("can't find field:%s", fieldName) + } + + n, isok := num.(float64) + if !isok { + t.Fatalf("field(%s) is not number", fieldName) + } + return int64(n) +} + +func (c testCase) msgid() string { + if msgCid, isok := c["cid"]; isok { + if s, isok := msgCid.(string); isok { + return s + } + } + return "not exist" +} + +func (c testCase) receipt(t *testing.T) *types.MessageReceipt { + // doesn't care about other filed + return &types.MessageReceipt{ + Return: c.base64Field(t, "return"), + } +} + +func (c testCase) message(t *testing.T) *types.Message { + return &types.Message{ + Version: 0, + To: c.addressFiled(t, "to"), + Method: abi.MethodNum(c.numField(t, "method")), + Params: c.base64Field(t, "params"), + } +} + +func (c testCase) wantArgs(t *testing.T, p reflect.Type) interface{} { + if p == nil { + return nil + } + if p.Kind() == reflect.Ptr { + p = p.Elem() + } + i := reflect.New(p).Interface() + data := c.base64Field(t, "params") + if len(data) == 0 { + data = []byte("{}") + } + if err := cbor.ReadCborRPC(bytes.NewReader(data), i); err != nil { + require.NoError(t, err) + } + return i +} + +func (c testCase) wantRet(t *testing.T, p reflect.Type) interface{} { + if p == nil { + return nil + } + if p.Kind() == reflect.Ptr { + p = p.Elem() + } + i := reflect.New(p).Interface() + data := c.base64Field(t, "return") + if len(data) == 0 { + data = []byte("{}") + } + if err := cbor.ReadCborRPC(bytes.NewReader(data), i); err != nil { + require.NoError(t, err) + } + return i +} + +func (c testCase) wantErr() bool { + isErr, isok := c["is_err"] + if isok { + if b, isok := isErr.(bool); isok { + return b + } + } + return false +} + +func TestMessagePaser_ParseMessage(t *testing.T) { + tf.UnitTest(t) + var tests []testCase + file := "./test_cases_parsing_message.json" + data, err := os.ReadFile(file) + require.NoErrorf(t, err, "read file:%s failed:%v", file, err) + require.NoErrorf(t, json.Unmarshal(data, &tests), "unmarshal data to test cases failed") + + ms, err := NewMessageParser(newMockActorGetter(t)) + require.NoError(t, err) + + ctx := context.TODO() + + for _, tt := range tests { + t.Run(tt.name(t), func(t *testing.T) { + msgID := tt.msgid() + msg := tt.message(t) + gotArgs, gotRets, err := ms.ParseMessage(ctx, msg, tt.receipt(t)) + if (err != nil) != tt.wantErr() { + t.Errorf("ParseMessage(%s) error = %v, wantErr %v\n%#v", + tt.msgid(), err, tt.wantErr(), msg) + return + } + + if tt.wantErr() { + return + } + wantArgs := tt.wantArgs(t, reflect.TypeOf(gotArgs)) + if !reflect.DeepEqual(gotArgs, wantArgs) { + t.Errorf("ParseMessage(%v) gotArgs = %v, want %v", msgID, gotArgs, wantArgs) + } + + wantRets := tt.wantRet(t, reflect.TypeOf(gotRets)) + if !reflect.DeepEqual(gotRets, wantRets) { + t.Errorf("ParseMessage(%s) gotRet = %v, want %v", msgID, gotRets, wantRets) + } + }) + } +} diff --git a/venus-shared/utils/msg_parser/test_cases_parsing_message.json b/venus-shared/utils/msg_parser/test_cases_parsing_message.json new file mode 100644 index 0000000000..c50fa497ae --- /dev/null +++ b/venus-shared/utils/msg_parser/test_cases_parsing_message.json @@ -0,0 +1,315 @@ +[ + { + "cid": "bafy2bzacebqzmce7ryytifmionufwip6ycxekmq7riummnkjckpicxy2tl542", + "signed_cid": "bafy2bzacedhsiclwpdklszjml7lu77hastm65is26pvvvvtvzgytm3evpf64u", + "from": "f1glayo7aosipwxxtj45tfirznjyc6nyl43un67ra", + "to": "f14mj3nt7tlgyditvk6h5p6i36vqebwgfqndjeq6a", + "name": "not-exist-method", + "method": 4, + "params": "", + "args": null, + "return": "", + "returns": null, + "is_err": true, + "epoch": 1313602 + }, + { + "cid": "bafy2bzaceda46lersgjmxyt5ohh747b63zrktvjxkqz2gae3bxkpeuirncjug", + "signed_cid": "bafy2bzaceda46lersgjmxyt5ohh747b63zrktvjxkqz2gae3bxkpeuirncjug", + "from": "f3ut3mcouq52ykfsy3fu234q53phhp3lqiualrgwrx7fjlwyq4jvgykeu6w2zswz6zr2ttkdaglevqnxot6wiq", + "to": "f05", + "name": "Addbalance", + "method": 2, + "params": "RADGmkk=", + "args": "\"f01199430\"", + "return": "", + "returns": "{}", + "epoch": 1313606 + }, + { + "cid": "bafy2bzacebc3tn24otl7kwrtclkewmz7n52wug3gtrp5y2c4pkzw2jbsr7m34", + "signed_cid": "bafy2bzacechrzptbboadfr7nzp5hhfouqk23jt3lx6e6h64gkjxbo2ytclru4", + "from": "f1lwpw2bcv66pla3lpkcuzquw37pbx7ur4m6zvq2a", + "to": "f06", + "name": "AddVerifiedClient", + "method": 4, + "params": "glUBoRE4eZ/mwa2G6Wlkl/9g13eABO5HAA8AAAAAAA==", + "args": "{\"Address\": \"f1ueitq6m743a23bxjnfsjp73a253yabho2sxhjgi\", \"Allowance\": \"16492674416640\"}", + "return": "", + "returns": "{}", + "epoch": 1314705 + }, + { + "cid": "bafy2bzacedvmkbmagfxqa6zsqq7vrbdejbuhvlkmfsfwq7imd7mmwqwm5vwda", + "signed_cid": "bafy2bzaceakrl6vimfhib7el2famgmdgmtkhiswzt4lmxne5bc43nv65ku6p4", + "from": "f12y455jshs7dbgjlufsw4fomwmjgvsbonwau247a", + "to": "f2nbgz5oxgdqjkuvkbtdzwiepisiycnpjyibcvmpy", + "name": "Approve", + "method": 3, + "params": "ggBA", + "args": "{\"ID\": 0, \"ProposalHash\": null}", + "return": "g/UAQA==", + "returns": "{\"Ret\": null, \"Code\": 0, \"Applied\": true}", + "epoch": 1316704 + }, + { + "cid": "bafy2bzacedqpayyhuh3257ddy5pjy6dyl2wzyxjeqo4mhhgcqp3pzrfm6ptum", + "signed_cid": "bafy2bzacedqpayyhuh3257ddy5pjy6dyl2wzyxjeqo4mhhgcqp3pzrfm6ptum", + "from": "f3qhqiugkhpue5xm2wtqug6mhe2fovxnhwejbgj3ztk3as6qncgzfmx5u3bzbpezg564z7eswbr5kd6uzwxzkq", + "to": "f01483143", + "name": "ChangeMultiaddrs", + "method": 18, + "params": "gYFIBGcBQX0GQVU=", + "args": "{\"NewMultiaddrs\": [\"BGcBQX0GQVU=\"]}", + "return": "", + "returns": "{}", + "epoch": 1313964 + }, + { + "cid": "bafy2bzacea7d3pidjkrsfqyptjurhfzd3pzn5xtg6jwiediklkheeih5s5ai2", + "signed_cid": "bafy2bzacea7d3pidjkrsfqyptjurhfzd3pzn5xtg6jwiediklkheeih5s5ai2", + "from": "f3w3zmr3zsau6qdiwbnczh5igi22h2mozbhxeagb56xrnn6u5uswlf73nlzrmvoevfyxkr3kir7dsse6s7jgiq", + "to": "f0717289", + "name": "ChangeOwnerAddress", + "method": 23, + "params": "RACC2Fo=", + "args": "\"f01485826\"", + "return": "", + "returns": "{}", + "epoch": 1314750 + }, + { + "cid": "bafy2bzacecadvykxtaoijrej6vfnpyudy4cqem3fu74b5maifgomzuj2s4beu", + "signed_cid": "bafy2bzacecadvykxtaoijrej6vfnpyudy4cqem3fu74b5maifgomzuj2s4beu", + "from": "f3xgoyz4oabry3gliwcrf6hgk4cvolh4fg7xk3qrkdvhjrv44pfg7cn2psvimxjw5ctak74glqcpfiac4rqxca", + "to": "f01476109", + "name": "ChangePeerID", + "method": 4, + "params": "gVgmACQIARIgGNgUU+yP3FsJZtZ4zaYj+jG9TMUUTIajzwRkbnvZRTI=", + "args": "{\"NewID\": \"ACQIARIgGNgUU+yP3FsJZtZ4zaYj+jG9TMUUTIajzwRkbnvZRTI=\"}", + "return": "", + "returns": "{}", + "epoch": 1314989 + }, + { + "cid": "bafy2bzaceddzm4rziqwgtjdjskgjnpocx6smte2tkej7yxwqowhbm2cx7a2ii", + "signed_cid": "bafy2bzaceddzm4rziqwgtjdjskgjnpocx6smte2tkej7yxwqowhbm2cx7a2ii", + "from": "f3qnwtsxmnsqo4gmm5yuytp6ql6663jdapo5f5myi5w43wosz4uioxwu63gwuv22iotopuoa4gi67lse3wekkq", + "to": "f01451690", + "name": "ChangeWorkerAddress", + "method": 3, + "params": "gkQAj81YgVgxA7Uk3PUa0+KjyP3+jltHRSZCLM7zulT2aV/I7xFP1C5+1C51Ldwe0r4n5DTEOdN/CA==", + "args": "{\"NewWorker\": \"f01451663\", \"NewControlAddrs\": [\"f3wusnz5i22prkhsh572hfwr2fezbcztxtxjkpm2k7zdxrct6ufz7niltvfxob5uv6e7sdjrbz2n7qrx2v2v6q\"]}", + "return": "", + "returns": "{}", + "epoch": 1313746 + }, + { + "cid": "bafy2bzaced6xealtmbg4jmtahtd3nkpbqyyz4iygnyxgczhsjkoryynb2u5uy", + "signed_cid": "bafy2bzaced6xealtmbg4jmtahtd3nkpbqyyz4iygnyxgczhsjkoryynb2u5uy", + "from": "f3uxh43lxmamekixu25n5zpmvdwi3j4ylui7apgwfi6xnqhyas5xxt7dgnx3iiktoz2it6ba7dbe4qavm65n6a", + "to": "f0807472", + "name": "CompactPartitions", + "method": 19, + "params": "ggBBDA==", + "args": "{\"Deadline\": 0, \"Partitions\": [0, 1]}", + "return": "", + "returns": "{}", + "epoch": 1314437 + }, + { + "cid": "bafy2bzacecm6y4dpjrxauoyvptmqkvdrignjmlkb5wulu3uaamjna7gqhdmb2", + "signed_cid": "bafy2bzacecm6y4dpjrxauoyvptmqkvdrignjmlkb5wulu3uaamjna7gqhdmb2", + "from": "f3ul23z6ur3qkucslyue4h3u7jbbl3q2yx7umxxvc72ljoekqej4p7vcwq6nqlwmj2xkwcwbuxc2ab4vsrzsqa", + "to": "f01111881", + "name": "CompactSectorNumbers", + "method": 20, + "params": "gUQkufJe", + "args": "{\"MaskSectorNumbers\": [0, 6146761]}", + "return": "", + "returns": "{}", + "epoch": 1316464 + }, + { + "cid": "bafy2bzacedfi335si2jgli53piswegld54fp5vqfbxe3jzxzwfejlj4jedttm", + "signed_cid": "bafy2bzacedfi335si2jgli53piswegld54fp5vqfbxe3jzxzwfejlj4jedttm", + "from": "f3sk6jeifp7mbt2qfz7zrni4kg6i4ovsochfst4vheo27ul523tdhqd7ue7qdal5ynh5zkbq4hbj4vsnnp2quq", + "to": "f01203636", + "name": "ConfirmUpdateWorkerKey", + "method": 21, + "params": "", + "args": "{}", + "return": "", + "returns": "{}", + "epoch": 1316924 + }, + { + "cid": "bafy2bzacealvmlg3nxhfz7ok4e3mlm6zexqvv2qrddzuzudvthovsyfo5busq", + "signed_cid": "bafy2bzacealvmlg3nxhfz7ok4e3mlm6zexqvv2qrddzuzudvthovsyfo5busq", + "from": "f3uncvkf4vv4avcfaihoeuzbuj6vjdfgkssuncdyfetdtery3u4c6qpgpjdgybyljumacelajooposbd73stla", + "to": "f04", + "name": "CreateMiner", + "method": 2, + "params": "hVgxA6NFVReVrwFRFAg7iUyGifVSMplSlRoh4KSY5kjjdOC9B5npGbAcLTRgBEWBLnPdIFgxA6aJRV5d2e/NM+ykmgCe1qHSeik+msI+rKvdajxWAenJhChKsNqNj5Y+iLffq6KxxQhYJgAkCAESIDZkasYqXe0CO+tFoxVz0P3Dq5YuUiuX83FtVzkx4GG6gA==", + "args": "{\"Peer\": \"ACQIARIgNmRqxipd7QI760WjFXPQ/cOrli5SK5fzcW1XOTHgYbo=\", \"Owner\": \"f3uncvkf4vv4avcfaihoeuzbuj6vjdfgkssuncdyfetdtery3u4c6qpgpjdgybyljumacelajooposbd73stla\", \"Worker\": \"f3u2eukxs53hx42m7musnabhwwuhjhukj6tlbd5lfl3vvdyvqb5heyikckwdni3d4wh2elpx5luky4kvcr6s3a\", \"Multiaddrs\": null, \"WindowPoStProofType\": 8}", + "return": "gkQAg9paVQLelLCKfSrqwt2ahbwfKix9KU1F3A==", + "returns": "{\"IDAddress\": \"f01486083\", \"RobustAddress\": \"f232klbct5flvmfxm2qw6b6krmpuuu2ro4ailbyoq\"}", + "epoch": 1314911 + }, + { + "cid": "bafy2bzacecyso7rjqep5uo3zcqv6u2m5p54p25c3kobedxrcugvo6hzh54ohc", + "signed_cid": "bafy2bzacecyso7rjqep5uo3zcqv6u2m5p54p25c3kobedxrcugvo6hzh54ohc", + "from": "f3xglcd3qois2jui5nri2fd3bgx2befjt4yxnxg6zy27pxebt5cckft4znejth36ehppz47gq5oaetb3qtk2yq", + "to": "f0106363", + "name": "DeclareFaultsRecovered", + "method": 11, + "params": "gYODGBwAWQJX4LZ/IPQTeUphQCJKpXo9E81Wx/EqiqSILUWpVDorHeXKlU5npVUpSU6n6VSkHbOiSIrnKJKirDidkqLsKLWbilhqxUIBSwE/IKECCh1xHFAwURBTzHAkJuMCgchGpZaQdrkSiEqzeO2qolW7tUpNkiq3nKVOq6JYmqRZirflaNJKp1VxJEVuKlanpNidfmllyalIFcVqpsOxJF2RVhTFkSqKpsiKorcVz1EkTelIiqLsOZoivaIokqM4ilNZURRRkXq+5DmaZEmiIkmiYjqaU5I0XfEkraJYFWWnqEgVR5LWFUWRlI5iVRTP0pQVTREVUbG7FUVRVhRF0SRN0iRpybKkLclRHEfSpJUVRVFERYJFURQgKhWrckdTlJ2i5EVCURRFVJSKs6J07rSUiqlYlZqkrSitqlhxlEptRVGkZNxRREVrKUqlIjnKjiIpirKiLEmKqCi9FVERW4qsKK1mR5E6ilnSFNH7TkkRm4qoKooiKUolEY5W6TmKoqwoiiIppYrkaJWWo/QuVb60rDglSdIq70hryo6jrPQ0yVEcRZE6yqKiKYoiOZ5jSWIzHYqoaJKmIHFPMSuKqkiKokiKpkiqIiqKIilKR5EUp9VpVZSV4tcqpYuKUnIqd+zOjuZUHEnp7CxpCiZas9WpOCtKpaJULjmKWFLEzLRbyktKxWwqkqIoyoqkOEpJWVI0pWPdayl6r6oAp2A+oKi1eJSanUoiQlHpK45CRE1ZqSlSrVdRlKriKEpb2WnVFE1RKhVFUUqW8islz5Ecxal0pI6iKE7JEVu1UjuDGBwBWNFAP34hKNV2JDJTc5x2qajIilNb8V6RFGnF+Vbpr1VKzpcc5RWlU1GcivI7LadiKhVFsRylKCmdV5ROS1E6irKiKJ2WorUUuRmJejusDHENyISCREWRmorUUazmlU5HUVqK4yiKs1VRKkql07nSVJTqHQXBgGwHxEmhKKIBiqhApKCkANNRUFHQUBQFEgUKRUxK5hTIBfKnKJkMCEUk0tPMRDzC0VTE+orSU8xqUREVv1dTHMVSnNKK03KUW4qyokiViuMoysqi4yiS4lUUVZF6AYMYHAJYhqCzXyIizU6rVnFKnUxUOoryUi/LAykUULRAXOoqZkeR7iqK4vRKldJ7PcWqlYOUjkqlnI74Cih8dGISnRgGJCIa3fgEJ7wBqahnKBPN613F6VQURam8NBGVSqVTUaRKRVE6nY6idJQrFUVSJEmptVpBL8hFSMKhMFKqdbvVWueO01K0TksC", + "args": "{\"Recoveries\": [{\"Sectors\": [65207, 1, 2548, 1, 79, 1, 133, 1, 17, 1, 4, 1, 4, 1, 14, 1, 14, 1, 19, 1, 9, 1, 5, 1, 3, 3, 7, 1, 2, 2, 4, 2, 8, 1, 5, 2, 2, 1, 2, 1, 2, 1, 3, 1, 1, 2, 1, 3, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 3, 1, 3, 1, 1, 2, 1, 5, 1, 2, 1, 4, 4, 3, 1, 3, 1, 9, 3, 1, 2, 4, 1, 1, 3, 9, 1, 2, 2, 4, 2, 7, 3, 2, 4, 2, 2, 1, 1, 2, 3, 1, 3, 1, 4, 2, 2, 1, 1, 3, 2, 1, 6, 1, 1, 1, 9, 2, 8, 1, 4, 1, 5, 1, 22, 2, 88, 2, 248, 1, 289, 2, 29, 1, 398, 2, 38, 2, 98, 2, 9, 1, 28, 1, 49, 1, 355, 1, 16, 1, 27, 1, 2, 1, 6, 1, 33, 1, 13, 1, 12, 1, 2, 1, 16, 1, 2, 1, 9, 1, 8, 1, 1, 1, 6, 1, 1, 1, 10, 2, 6, 1, 10, 1, 11, 1, 6, 1, 2, 1, 6, 4, 4, 1, 2, 1, 1, 1, 5, 3, 1, 1, 4, 1, 3, 1, 5, 1, 2, 2, 5, 6, 4, 6, 5, 2, 7, 1, 1, 5, 3, 6, 4, 1, 1, 2, 1, 3, 1, 5, 1, 2, 3, 4, 2, 12, 1, 9, 2, 5, 1, 3, 1, 4, 2, 13, 1, 3, 1, 15, 1, 4, 1, 1, 2, 1, 1, 4, 3, 1, 2, 4, 1, 2, 2, 5, 1, 9, 1, 29, 3, 5, 4, 14, 2, 4, 1, 1, 2, 2, 2, 3, 4, 1, 2, 2, 6, 2, 12, 2, 2, 14, 1, 13, 2, 7, 3, 2, 4, 6, 2, 1, 3, 4, 2, 2, 2, 1, 1, 7, 3, 6, 2, 4, 1, 1, 1, 1, 2, 2, 2, 4, 3, 2, 3, 2, 3, 1, 2, 1, 1, 2, 2, 2, 8, 2, 4, 1, 7, 15, 4, 7, 3, 6, 4, 5, 4, 8, 2, 4, 4, 8, 2, 9, 3, 6, 3, 1, 4, 4, 6, 14, 2, 7, 4, 6, 1, 2, 2, 5, 1, 2, 2, 1, 1, 3, 1, 8, 2, 4, 1, 2, 3, 4, 4, 1, 1, 14, 2, 2, 2, 4, 2, 1, 3, 2, 5, 1, 2, 2, 7, 5, 6, 2, 1, 1, 2, 6, 2, 8, 2, 8, 2, 13, 1, 11, 1, 2, 2, 2, 2, 1, 1, 2, 2, 2, 2, 6, 4, 6, 4, 6, 4, 4, 1, 1, 4, 5, 5, 4, 1, 1, 5, 4, 3, 2, 3, 3, 4, 6, 4, 1, 1, 2, 1, 1, 2, 2, 2, 2, 8, 2, 4, 44, 2, 2, 2, 16, 1, 2, 1, 2, 5, 1, 2, 1, 1, 1, 3, 6, 2, 2, 1, 1, 3, 1, 8, 4, 7, 1, 18, 2, 2, 2, 2, 8, 2, 2, 1, 2, 3, 1, 1, 2, 2, 1, 3, 1, 1, 1, 3, 1, 5, 2, 1, 2, 9, 2, 5, 1, 2, 1, 6, 4, 6, 1, 1, 2, 2, 1, 5, 1, 10, 8, 1, 2, 3, 2, 1, 2, 1, 6, 1, 1, 2, 2, 2, 4, 1, 25, 1, 1, 1, 3, 2, 8, 2, 6, 1, 5, 2, 2, 1, 2, 1, 2, 4, 3, 2, 1, 1, 3, 2, 4, 2, 2, 2, 1, 1, 2, 2, 1, 1, 4, 4, 2, 8, 2, 2, 1, 7, 1, 1, 2, 8, 2, 8, 1, 5, 2, 12, 2, 2, 1, 5, 1, 9, 1, 3, 2, 4, 1, 3, 2, 9, 1, 4, 6, 2, 8, 7, 1, 1, 1, 1, 1, 3, 1, 4, 2, 8, 1, 9, 2, 8, 10, 2, 2, 2, 4, 2, 2, 1, 2, 1, 17, 3, 6, 1, 2, 1, 7, 3, 2, 2, 2, 1, 1, 2, 2, 2, 2, 4, 2, 1, 4, 1, 2, 4, 3, 6, 1, 2, 1, 5, 3, 2, 1, 7, 1, 1, 1, 4, 1, 2, 1, 1, 1, 1, 1, 4, 1, 1, 12, 2, 3, 1, 4, 4, 4, 6, 1, 2, 1, 1, 1, 1, 3, 4, 1, 1, 6, 2, 1, 1, 3, 3, 2, 1, 1, 2, 1, 7, 6, 4, 3, 2, 3, 2, 2, 4, 1, 3, 2, 1, 1, 8, 2, 6, 2, 2, 2, 4, 3, 7, 3, 5, 4, 8, 1, 9, 1, 29, 2, 8, 2, 6, 4, 6, 2, 18, 1, 1, 1, 7, 2, 9, 1, 2, 2, 10, 2, 4, 2, 2, 2, 4, 2, 6, 2, 4, 10, 2, 8, 2, 2, 2, 4, 2, 2, 1, 3, 2, 4, 2, 3, 1, 5, 1, 3, 1, 5, 1, 2, 2, 1, 1, 2, 1, 8, 1, 1, 1, 1, 1, 6, 1, 2, 1, 4, 1, 1, 1, 8, 2, 2, 1, 4, 3, 1, 2, 1, 1, 1, 3, 13, 1, 3, 1, 1, 3, 6, 3, 1, 2, 3, 4, 2, 1, 3, 1, 1, 3, 1, 1, 4, 6, 2, 38, 6, 1, 9, 1, 5, 1, 3, 1, 2, 3, 1, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1, 1, 1, 4, 3, 2, 8, 1, 4, 2, 8, 1, 51, 1, 13, 1, 5, 2, 1, 1, 1, 1, 4, 2, 1, 2, 9, 1, 9, 2, 4, 2, 2, 2, 2, 1, 1, 2, 4, 2, 3, 2, 1, 4, 2, 1, 1, 4, 2, 6, 2, 1, 3, 5, 1, 1, 1, 7, 1, 5, 2, 14, 1, 7, 1, 10, 2, 112, 2, 486, 2, 10, 1, 6, 1, 30, 1, 4, 1, 9, 1, 3, 1, 2, 1, 17, 1, 20, 1, 2, 1, 15, 2, 3, 2, 17, 1, 6, 2, 1, 1, 2, 1, 6, 2, 4, 1, 6, 1, 7, 1, 2, 2, 2, 1, 10, 2, 3, 2, 2, 1, 13, 2, 1, 1, 3, 1, 5, 1, 6, 2, 6, 2, 2, 1, 2, 1, 2, 2, 2, 2, 1, 4, 5, 2, 1, 1, 1, 1, 1, 1, 2, 1, 4, 7, 3, 4, 3, 2, 3, 1, 2, 1, 3, 4, 1, 3, 2, 2, 2, 3, 1, 4, 3, 8, 1, 5, 1, 6, 1, 4, 1, 13, 1], \"Deadline\": 28, \"Partition\": 0}, {\"Sectors\": [194810, 1, 40, 1, 10, 1, 13, 1, 18, 1, 51, 1, 6, 3, 3, 1, 13, 1, 4, 1, 8, 2, 12, 2, 3, 1, 6, 1, 1, 2, 7, 1, 1, 1, 1, 2, 4, 2, 4, 1, 1, 2, 3, 1, 1, 1, 1, 1, 5, 1, 4, 1, 1, 1, 1, 1, 1, 1, 6, 1, 2, 1, 4, 3, 1, 1, 1, 1, 1, 4, 3, 2, 1, 1, 1, 1, 2, 2, 1, 3, 1, 2, 2, 3, 1, 2, 2, 1, 1, 1, 1, 1, 1, 3, 1, 5, 3, 1, 2, 9, 2, 1, 2, 2, 2, 5, 3, 2, 1, 8, 4, 2, 1, 3, 1, 1, 1, 1, 2, 2, 1, 3, 1, 5, 2, 2, 1, 3, 2, 2, 1, 1, 2, 2, 2, 1, 3, 1, 5, 2, 6, 1, 5, 2, 12, 1, 9, 1, 18, 1, 14, 1, 13, 1, 1580, 1, 174, 1, 19, 2, 18, 1, 2, 2, 4, 1, 9, 2, 4, 1, 3, 2, 5, 1, 9, 1, 1, 1, 2, 1, 3, 1, 3, 2, 2, 1, 5, 2, 3, 3, 2, 2, 3, 1, 1, 5, 1, 2, 2, 1, 2, 2, 1, 2, 1, 3, 1, 3, 1, 1, 1, 2, 1, 9, 2, 2, 1, 10, 1, 1, 1, 3, 2, 130, 1, 219, 1, 78, 2, 69, 1, 162, 2, 8, 2, 68, 2, 74, 2, 48, 1, 3, 2, 42, 2, 26, 2, 2, 2, 36, 2, 20, 2, 8, 1, 41, 1, 115, 2, 356, 1, 127, 2, 2, 1, 147, 1, 20, 1, 18, 1, 61, 1, 9, 1, 19, 1, 30, 1, 28, 1, 9, 2, 8, 1, 14, 1, 1, 2, 2, 1, 7, 2, 9, 1, 10, 1, 8, 2, 8, 2, 15, 1, 7, 1, 6, 2, 3, 2, 5, 2, 3, 1, 4, 1, 1, 2, 3, 1, 5, 3, 2, 1, 1, 1, 5, 2, 2, 1, 1, 2, 2, 4, 1, 2, 1, 2, 3, 3, 2, 2, 1, 1, 2, 1, 1, 8, 3, 3, 2, 4, 2, 7, 1, 2, 2, 10, 2, 4, 1, 7, 1], \"Deadline\": 28, \"Partition\": 1}, {\"Sectors\": [310941, 1, 34, 1, 9, 1, 3, 1, 5, 1, 6, 1, 2, 3, 1, 4, 1, 3, 1, 19, 1, 2, 1, 3, 2, 2, 1, 1, 1, 1, 4, 1, 7, 1, 459, 1, 261, 2, 6, 1, 16, 1, 1, 1, 4, 1, 11, 2, 9, 1, 3, 2, 4, 1, 1, 1, 11, 2, 2, 2, 3, 1, 7, 1, 4, 1, 2, 1, 4, 1, 1, 1, 1, 7, 1, 7, 2, 5, 1, 6, 1, 12, 1, 72, 1, 29, 1, 2, 1, 2, 1, 12, 1, 29, 1, 318, 2, 31, 1, 3, 1, 38, 1, 58, 1, 134, 1, 17, 1, 26, 1, 11, 1, 62, 1, 56, 1, 188, 1, 21, 1, 14, 1, 67, 1, 19, 1, 9, 1, 1, 1, 14, 1, 11, 2, 3, 1, 3, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1, 1, 4, 1, 1, 17, 1, 2, 1, 2, 1, 2, 1, 3, 1, 2, 2, 4, 1, 2, 1, 2, 2, 2, 1, 3, 1, 3, 1, 3, 2, 2, 1, 3, 2, 1, 1, 1, 2, 1, 2, 2, 4, 2, 4, 4, 2, 1, 6, 1, 5, 1, 5, 1, 744, 1, 23, 1, 36, 1, 28, 2, 35, 1, 4, 1, 6, 1, 11, 1, 11, 1, 10, 1, 6, 1, 3, 1, 1, 1, 3, 3, 1, 5, 2, 6, 1, 3, 1, 5, 4], \"Deadline\": 28, \"Partition\": 2}]}", + "return": "", + "returns": "{}", + "epoch": 1315479 + }, + { + "cid": "bafy2bzaceauwc5kfjmchspkiozemco4j636sqoj33grevn4b7vo527erdmagu", + "signed_cid": "bafy2bzacebh2mmwv7knqa7wyaupxwnc4qler5rhj7bvrnbeirguoxpz2sov7i", + "from": "f1va3xvbq7smsmt3zk2oyiamlyjsa7mq7daas3s2y", + "to": "f01", + "name": "Exec", + "method": 2, + "params": "gtgqUwABVQAOZmlsLzYvbXVsdGlzaWdYMYSCVQGoN3qGH5Mkye8q07CAMXhMgfZD41UB1jnepkeXxhMldCytwrmWYk1ZBc0CAAA=", + "args": "{\"CodeCID\": {\"/\": \"bafkqadtgnfwc6nrpnv2wy5djonuwo\"}, \"ConstructorParams\": \"hIJVAag3eoYfkyTJ7yrTsIAxeEyB9kPjVQHWOd6mR5fGEyV0LK3CuZZiTVkFzQIAAA==\"}", + "return": "gkQA9+daVQI+58QajhupcUYWL2ybsWkVjwKiRQ==", + "returns": "{\"IDAddress\": \"f01487863\", \"RobustAddress\": \"f2h3t4iguodouxcrqwf5wjxmljcwhqfisfky36xny\"}", + "epoch": 1316739 + }, + { + "cid": "bafy2bzacea7nglbjn3lv354s2ydt44kzhecarezgpvxcnmqvpr6jvu5zek7ne", + "signed_cid": "bafy2bzacea7nglbjn3lv354s2ydt44kzhecarezgpvxcnmqvpr6jvu5zek7ne", + "from": "f3sl6qa2fzrj4xavz4ckczo3d2ydivjc4kfhahjsrmw3hgw7yetnuijh2ymuqih4qn6sxwt7jlprelqu2ooqlq", + "to": "f01171513", + "name": "PreCommitSector", + "method": 6, + "params": "iggaAAnlu9gqWCkAAYLiA4HoAiDtbWcyoFnl5/ScS+CIFYOXTQUSiluH3RqbThd9V0LiTRoAFAXQgBoAK69z9AAAAA==", + "args": "{\"DealIDs\": null, \"SealProof\": 8, \"SealedCID\": {\"/\": \"bagboea4b5abcb3lnm4zkawpf472jys7arakyhf2naujiuw4h3unjwtqxpvlufysn\"}, \"Expiration\": 2862963, \"SectorNumber\": 648635, \"SealRandEpoch\": 1312208, \"ReplaceCapacity\": false, \"ReplaceSectorNumber\": 0, \"ReplaceSectorDeadline\": 0, \"ReplaceSectorPartition\": 0}", + "return": "", + "returns": "{}", + "epoch": 1313525 + }, + { + "cid": "bafy2bzaced76cuzmjlvfn4rho47o2dx3psuzgh2kulqotkchqyqqi5dm7vyok", + "signed_cid": "bafy2bzaced76cuzmjlvfn4rho47o2dx3psuzgh2kulqotkchqyqqi5dm7vyok", + "from": "f3us2rmpfgzhkjdx5qgaxvkst5t4nx5ekrvlbhbkrda4f6g5brcobrfzz4yyuy6du3zvtai5ivmoqbab4xp5dq", + "to": "f01211558", + "name": "PreCommitSectorBatch", + "method": 25, + "params": "gYKKCBkvsNgqWCkAAYLiA4HoAiAYgYssr/n5SyWKnaGmvz5RvMFcE9h2II5XutKnMO/DZxoAFAK1gBoAK65W9AAAAIoIGS+x2CpYKQABguIDgegCIH0z/hkkOcn3MEHEv56gBrI1H3V65labjRnaeULgQzxXGgAUAuWAGgArrmT0AAAA", + "args": "{\"Sectors\": [{\"DealIDs\": null, \"SealProof\": 8, \"SealedCID\": {\"/\": \"bagboea4b5abcagebrmwk76pzjmsyvhnbu27t4un4yfobhwdwechfpowsu4yo7q3h\"}, \"Expiration\": 2862678, \"SectorNumber\": 12208, \"SealRandEpoch\": 1311413, \"ReplaceCapacity\": false, \"ReplaceSectorNumber\": 0, \"ReplaceSectorDeadline\": 0, \"ReplaceSectorPartition\": 0}, {\"DealIDs\": null, \"SealProof\": 8, \"SealedCID\": {\"/\": \"bagboea4b5abca7jt7ymsiooj64yedrf7t2qanmrvd52xvzswtogrtwtzilqegpcx\"}, \"Expiration\": 2862692, \"SectorNumber\": 12209, \"SealRandEpoch\": 1311461, \"ReplaceCapacity\": false, \"ReplaceSectorNumber\": 0, \"ReplaceSectorDeadline\": 0, \"ReplaceSectorPartition\": 0}]}", + "return": "", + "returns": "{}", + "epoch": 1313263 + }, + { + "cid": "bafy2bzacedflsijew7h35vrtkkcq36oerhwlycs6mxwarz2wm4anku6ul2fbq", + "signed_cid": "bafy2bzacedt7jc3dhh6fxfkrpwrd52fipydt3jnkkitamrr3jcu5ukxwpe7u6", + "from": "f13sb4pa34qzf35txnan4fqjfkwwqgldz6ekh5trq", + "to": "f028027", + "name": "Propose", + "method": 2, + "params": "hFUBZoMehHLQHg3LWC5xxLtI8LsavYtJAHxF6AB/C2U5AEA=", + "args": "{\"To\": \"f1m2br5bds2apa3s2yfzy4jo2i6c5rvpmlb2uk7ky\", \"Value\": \"8954818522925196601\", \"Method\": 0, \"Params\": null}", + "return": "hBkCMPUAQA==", + "returns": "{\"Ret\": null, \"Code\": 0, \"TxnID\": 560, \"Applied\": true}", + "epoch": 1313250 + }, + { + "cid": "bafy2bzacedlc3exicm4jz6hdgilpoa2szzk3iovl3fifyt5cvuxo5zumxx4zs", + "signed_cid": "bafy2bzacedlc3exicm4jz6hdgilpoa2szzk3iovl3fifyt5cvuxo5zumxx4zs", + "from": "f3wzcpybiiquczmqqsuibcju7wpboo5nl3yyo7cnsrwn6fdcy7r6wc7foq44noooczlcb7epvpz54rqd3sd2cq", + "to": "f0441116", + "name": "ProveCommitAggregate", + "method": 26, + "params": "glgjYNO5IPUBYYpFPxC1jmNloheRUCi+4rRqF3NRUwYjF5V8BCRZcXTYWMNLXiCp+eKvLfICNW9LnBs6XtqBaMYgYaxAoxjhVvOnx9Like3uZPmq3ElcahR4BwFtrkIs1x9FwZfUF+84HGCIlmqsC2MuuM1tZZpNbXH/qipGY7OMegQ2li2MTQtR2bh4STre0QDJsipmrYElqtiK4AiQ+i55ntOgtM3WTUiktPpBT93E/sRXAH1lgBC36hja6LBm1I8d8xAL4p8hIeWfIbrI56VAuFZ0kTYVP0OsZP5VJYGXHRSjW+ZhyAWD6EoIjuqb1GXRnnFsZgrcWyoAyc4xdS+hStayjICZ0bXPWEcMZ9nOCn8kExdo/Q4T01uAXfjD9MipIky46evi5FEwEloFeAQ+5XF4DNOCSo/LOjAP0lPUx9xXTl1RjRPr9SyGucSiigjRATYkdfm0M1frliaWlldxykrYL9C7jeMD651s0bnblAqyKS7c5xPgIeAG3iBfPBUI2sUVdQ0O1v03XgEO8ae8d322rc0X2j6L2Z5cSuYY/ShIHIWinAvfc6LpvwmLghbR0t1ZzQOkjz212O/0tfkp0zRuXwcA625ZDUisAemX8l/5owoG7g8rqgtNnuKQ50oTUNLPBxjr/VLTMH+n+0lGYVv/elOyrWvdfscMBMTiivaln7tkrxXF+dSOedYsxdwfg1zZSWOZrdrDyCJjbLb0YkdBhuRrnhWJLIM/BeSHOHmyV8mXFRZhv8Zi/mYYJGiiX2zmgkN4DkuUDiVHSIIFYoOLb8Pe2o7eSZUc6SOdGjhGVnrTFhHowNJZs5gx2nnrVvWYOnfIy7j98uwbGpY8y0V9WVYHcJVX3Ne6e9T6vrCf5sXSIhWaY93f5osJ9n7wXqsweNFK0b2CgATcflf+lktEFM0SR0/cKb03E9oYMTNU0JGdeAstaXrqBTSKGfyCyiWlAONhL7XHcJgFQgzULXGg3Ze0U72kizPQPS9MdjB8waQV1QY/uskcCww2UOj63aZSg2FAsot2LgeQi3Tje0yhFMy/9s0AQld1hapCBsoI3FxuZhH0QxIzxWKhJZ+00oJZ4B4hjApZQAhCUuhem3BNQrQk+TqhbT1hwfZd9YaxNmA2rhK6CQza5D8tq1/XNUANV0Jk0rnspCkfonVJ8v/lolpXFfLjElJmObSNR2Qx7+8DGhBXpJyp4jsVyUcg6X1wKa5X1UZ4PCqiqbR1LmD9xx6f3sNl84D+zhh6N7hmZGHvzxDl75xHDHAVdtgxg3u557pQUeVnuBJUlPWVwzN8CAyuzs5yEazxhOfTYGValXAFMRDOrrCU3fYOqF08EHUKk+asF6CcuGGaEnrOM/Bh9RaxnPGMDmEPxbOxSdyuxJBW1Qw0eD7u4j581H0Ybj1Ukr6s/ogXrJbgqc0S85ooqD8yQSJjT5SmoOw0nKtZdzAhDhfMhHqab6hdYwR1AbK0XrNNoIDhkTEw4xYYKRggXeok4SZr4mTiSs+hvDk2Ph8JxgpUdCEoTyB+4HGh1pYVraWHdjyaMHQr63ODHj8nrJvKguOS+SPhDN3bwj0BrdG9dAIVvR9DqhilofPg1IyJS7oD76AcmX+1MzEjog4hhtLHXT2tD4CJEmo8wuzSbhv1cwZMxy4GtbUJutxMqkl0ohKUHfIMxHbiJO2uD8NjHxn/HGsk9vDncWquvWleKqVTTRSJkWsdRJfkaVnqJGpLrumpCrMlR5vABJB6t3cvXMLOtahkPIlPf5QMcsRko6e9SwkzzEFAmno4TEn+Cwwfn+FGmptQGddMKllEELjiJIpWFWdReePGZ/rXPZMX+YUTFgL6oJNJ6nTQKEnFhMSwawugrzag8kZxNL8Y5tN1nx09Qk3BvhAawGNKwlflCP/RdRED/F3InezDzXNXZNLpbgdjU9R/825aztMI+xYDIAoeL+3SjzSMriird2mrxl6UFhmJk9g7Xl4Lnghdn+LwmY5axHBNWHrH/dI3sIkQRbf2zu/fnP19WfBsri1sUeT4E1sAAgAASVQyi387zFpt+tYzzU+Jp4LyQoeBZwBgYkic+jBEpJQq4/XPOtl2jE+nz2z4ajYUgTDvIUzu3Xrsggq3eltHwFQsWekCi5un7/wymfCY4EgIaV0XVmWUV6kgjYgHCoYKdUytuftecKtCaJNpjS5Y1Z6euhdJFEA1UUB8R/VUuJpK8WWvTCLt5xLRkV6LsGcF4TAufOmIqfO9PRgnnHEMpxCoQB879ao5NRnVuTR4Of0MeGePG8k3WJ1vlWi5XIYMfaU6Mwk8vMg5PnvViKgIGHH1RvVrFUX4NZTl71UVC5n6GTFvaY5QW5ogSCkRzEkX9ON5Pb6A8ceu3g1FAM4iSdCvHL6uJocprg67dGMjSjdtUSOgHRM55eVndKvdd2MXT4StG4nIYI7YjzumzFyTt532AQQYAMGWSAJI1yej5ZIJ9xKZbER2LYFrjQYohOQDiclAWrFBM6l6jatj+ZNhyjvmj9fVCFl6NIdF7WQqOuT1+KmIfMJZe6bWytRFFPsFpK6mwHDIvtbWc7gvvcpYlaEFuU2/7SiuGYJjzIEn711LB7mB95ADHrMjks7ncwcHuqM8IOzKAAljw+iSVjbXhuavUUCY7K/J672vafO/Rm5Ta2GwqRVbRwrHCK5L9j0YswkiQOGD91IGKV1cii1pwB9beVrh48f6VUY/j2bSQPEytF3lDqJ0hS1FvrM3pyMMP+mH35GPUWE4fXdLIVQzKFtorZb5T0xYjifAxq3nbqcJ4kTh32ENayUcIqsGyhsTGMmn2+S1TfgBptCGqccK3AuWt5++qNzhvrLq7IiIacTh8rryr9xXHZcVEJqqraUTC04z37lulftYiVdzic/WJtJqSR0yI6ipfRhWqeoZCzKtYBIAILmgt1icDS6zvccP6c7+wWcEgZA7uyoEJBVWzDWdPAdNMzIkxHBU8B0VoEXv4QkMWgShC8Z/pXExmuwZlSL8OvMR39SrQMnnhzmG2Q+eEJcXFA6Kfz1ur2R6W/i+UzELPO62qtUQ/X7SC8EGsmXmIvBVM7oy0xCY23HCmXXqzDG9NJuqFLztwdH3NwIyiUGrGYcDXWAjqcMOjjcPmS4esX+Ps03LLbC9sBC8ieOwwsLZBRXa51pK1/CRjjf8e6jREzNfDmn2IbynzdMU7B4hqX0XG8p4gr7QK5XjWt8lFyOQIWMS5q8DorIQdMkrB17v/stW56WdPdqypAUUUoV655GEyNr3UUCnq0GlHIEbAmyEkabzx2PD7R6vfXo51cTEyxIn3+OdLzh70KgZPWWvs0+7mTzA3wm1ierXXk63rgTSkaEqkVAoqmFfIUGS2BYSHBt06/jIrrpuJC0RDnH9UNAed3EyBAtjW4Mq5DB/9ukWTXjT6bEBfgCJ3PV5Eap/ducw0p3YbwTYNvEB4OTbj55uTyfVz8G7AKyqXc6enIb5vdL8xO9pdEANXJUCA/bt8biEGFwhU4y9I7cXjZivviHtO3DTxTIkXlF7ASIyqINjpARIf2LS09RxXC+SMVFqAbxSWQorv6FpgwQHspqsVmWhrgfO0zN4zgj10HNccNJFmTBgh6XYrSZ6X1uDhtwwFVCCTcVz+KhRqQIKNEi8yot/DBkfOqsuJfuGouTrSyxojMxJ4uoXXcmaTcjTwuROYFw2iABFFoILGR4K+7UGvCK26OqnWcTjZGSNPh4ylYFyc3LuPpZ05vPN2uml7XOSdOL5KBfLtYVIgVQK4dXT4bFF1Uzxk3/HCwaGX2mffet4X5KNL4hzvpTnIm7jWsJVkHfL9XhI4MBPhvcQ7047E2vlpu+WXNi5Xq386oNvA9l8OStuDgQ87ycl9P+32+6jRxBt3K2wguaRe7YQXqV9a6RpGZsytnLM54HGz7b9FrXE9eIhzE58OBDrXPde27UFcKLj24KYvoqZJbYTELud0qACgD820MfN3Xd6ICTLcbgfeXNK0201CBMEIl3/HS/XDRh2l2oYu94TYAMJ9fcxoNMdnjZ9eWXsGYDl8HahRyrNxiOljfmENhxZnXjZqWi/mT19DVnxhg17zYEP2+pY1aR2tiI3XFWFXf48ONwz/0rLV7MlabTSgtvQ+CjjjbCgYoyDBeDILxwZwKgJdyzK7l3u+NoRsDLaj+kv3HrbqC27hBJdCMM9PWtwHu3YGquJLGECL4SZ9UTpHEkTD5MdwA6H62J7v0JbraolHvS/+N6cQd7km52YMCbs5eLSd3gFkzb1PyiWwwBWITEVOgaKb9c9f7aiX0oGJewrdTSlF0OhAHcfAqQ+Rz6C5KKC36XCnRmnJjGVQb8ODoQEjH43rjHAMOzlfKzZDg9vq3hNfn5UBAj96FbK3SxKCczqV+im71elC6BbEhBgYk0BZLpM3s53zhkjnT2wLy+cqv3HkLp7ApXZnNmVVxd0NoELnqXQX4FbTWnw5JPZIF4JARFYbbPPk3Zqx7AuqHv1c6cLiDYk6dPenKVKzcF44iMDzP9ijDLIJMcPAUtGAogQtGRfar5esVcrZCXIc6SFUwyz0BZBNqSv3Ou3IY0lDwWtrw3tXkUob3WAb7NwKlgIzT4cFIMntUAApHq7Mh41TYYtw6WITAt79OqjtjbMdc49t3ardRU2UCLYBjMv2oIYogWF5xUCXHN9Mx/wAIG271KA/vYMHbP2hqiQxkjpLSAB/bvtxz1ameb4gt4/VG8LE5AYKkHhlxr3bhzH4RjE1ZHczG0qeJNSM46naBa+Am5Tprp3+8nXpyU48cV1CrMTjLjBHqWi9sQgRkWf0Z+Wmv9BQe2mE9Rr6NI6VELkyf7n/7r//hLygf94WDcGTiwOgkJw1Z1QV67zsVRRVmCGgQzxfncRjr+trRH5yHnBvUw3U4bkekT5A/yTBScnimMIn6BknPCwbD6bxxtxfVzDjlJhlknXml7IqBTivu/RCHdAo/CVhDVDhQRxZnTTQhcSfD3rzgzTLCU9lPN4g+3X2azAi4SuPC8SGO5owtI4luryi8EVPM3sWPJqUY7AylYLRMBMH/+xJuQckcILouEv7z+Ax61QUlNax4b94njoT2QpzmbYLo0aRM0jMRze0dUDYLRB4AeQKoP3ZUO1/dUmjwTVmZ61NUiD25U/pUFOpZUE1Cx4qPIb7QoM5J8u1ZoBV3f8HSWz4qXtHZ3Tr6hSTgJq7c9+ZqbtMIFnl42WW5AqT7k+NhiukcVNuQJl9zUJoQf9qN1vkBEH3JaQlVMujMYVv45paHL80hY6t9cQwTYn6koLl9Q4Wj7tEcifj9sGzLzWVJgq99AGKN22r9FgOE/43vdDBcSoY/88yK3X/ZnOyjLIlVBN6zlBYK/ivIoISSXl/WYYDuDEjpB2mU7wSFwMWreHvgwNA3ZqowIoISWl06Lkksww8+5Tmjk6iZgC41J5V1y/UW/b41QGPnr6cEqeozpB3v1FoJDpuMTaykNAavLzcAnOiGjFDsr7z6kT6kKd/S1U15u0ATlfQGyu7/nkWJr2cJpZo+HFBylKcl2jk1L2vDTKtzAHbFQq5DsM2qMwviSYOm1sJvHlnpbW5itpok+ryVqknUVQjFNQGXVswUUHXLhtRGTD+DAEahcL4e3f47RtIQBFht0ZQCZ6GXlBVslqoEobAoyz5In9acEDgKd/jGebIwJCQszGq7kT/GqTe48pcmW2hQkUtbleTann3PEiy9O4MH0H9CupI40Kz5PCeT2CkWyetsVBChsL1Ck5WWm5bSPqHKRde0gQCnZMuHieppkWX4lhcaRHy2NmFikzIVSTipFBC9ctksYR0VlIXTTsViX+2Py2O2DyurLX0W6hmIk+XD33HBZOKROlEjkoPmg60873nqbVSV8Lzxx26swF5HvZ9Z9dnoeNVWhIEyb4iehiPVVEFVwamF7BtotZ8LE1WeutTQPAlxQUeoSHkbbZAAZ2W2/trv3wCCyfkZOqqSZwnJMYalBLT79ZgBr5ZX7lzrAyBFByBHAFSdBfAOpZA8Y7DAg+QtCxUpee3SLPOiDXvUhE3ZBNWqXJz4NzTC8U6UAdw6SRiycEWKso3mu50zmFML6Qvd4aX3uCQDNd/MvTpLnj/jrZIl67DJcCg0LirZ3xGnZhlSsT3SFbckOqawQ4AK+12D2KE5DzaJFhxrQZ07j8EehgpXewWuiRaLVYbWv3Kp0fP8EEF6QasMda5Hoyx3nNmQ+TIV2JPTKOsAq/QLP+yFrjUB/ertA11OTUuVPW1jQADloSMu9Zo8Ha3HX5y6kTJZc5hKjfT3G6AC6ThGTSgomzkZt4No9r74sMVp37Y6agQeAZdqPwLA5RyPMqIFOSNtgJh/YZRzMD6tuxSM/eC6v5hKcfSRc4Q20VupKFUW153IsRvQuVJFKnIPlf8mgIcinijLQ7/1FZxckf3kL9DMIvIGFPsYy7g6gruofKrfS0pB0LjtFy9dxgsfz+6iIX8Tq9u5DaJgXBzUWkXSDKP23lSflPIKWfHLscA24eoukEiDoIcFv/i5X0Ftn7DEE9EIEuS9Jmppp2KXn5oiKksgHKY/BQxZSn2mqofEOFjRLDMIwAtZxdtAGQhPZXwaw7cOFGMCAU+xNnyJUFy1fv3+r0YO01KmH7HwJlpoJYYHsAOBIRCHI4sxst2HVi96fhXLYtJDU4nLmWTLGss/LTkbx+wjjX4rKMLNoaUXrTnr6iJp4D3gtX2EXjmRV/ViIvGTf7ZI1Hm9ngQP1FliNp3Yh9VYw/PsgKlwm6lrqRZWzf5KcRbIa080IePK/rq4v1QUONnIbtGzC9lz0eBAyt/7o1BEXjPTkN4SbDbsP+cCeuTTcQukoHxOAlIG/GBCQx+BaEl0liE/rl+oWoHXIUA6knXNmaq2WMYCzKoAkj5qHsaA4PvRn+R62uCCNmxHLIzXDQa7mXKwr2CMXdyKID7PHg4wjCbRF5hGAxU4SgDoQ5B2YH01anu1Mjley/6+LEaRQ//8qsKLHA4cd2LovliW2mvqB4xGg6y4aQWq24vHmNuO0BQgRTK4twyCAEpJAU2m892EWD0JSBdlmsGB6Jazc+xRIZwVFysyw9JMgfKLoryMQRJWX0+4o05pjdXCOsWmd9YNiDfjnqX94tbvv6lctbOJh7tXsMbktMyRsnEwsNF3sPxBVnTkx9/ouU8VaL0H7mQPd6P+7QCOOeh0qdAVZCSImGTf0CZWbZ4XRKvPtzq1IEllx8y8Pr/ch08HqNKTcGnhTC8iza0C5IlZMxj8l/aewHlKlF+ec/MLH3vNUALQAAbLfYsegz1LtpIVQMlB2s7B5WfrX9wbWeG1qsXaxNEflnvVT7UcsyOKMGL3ndM4MM2H43G3rUYjMHKrv46h2bE7FVN6bSgPUbAtr8J0pTHCsaSfytz+5bIbXb3liD47IE7aztpPXoFS8A5zFpqZ+7iGFJEMTT3X51z9kH+wx4uglvhPZ4OxUEunLDUkybwv0IjJke9cTpYQ2BjdxvoyJLkhyMV+BRYT58+YmS8gFUZBpKp11X+n13jjhSbGdXKqoETbmFbsNq49hN5pZxZFICkfeV8GQRD7X3SpWiyLxjyd7nBHdo9IuCmXJINW8OL8kKoACj7eTe11OOo7lVFMBjADNGv4XP4El7l0QCxPAmrw7OpQ2VbmjfN/o4WbWYfegBRSEZ6ANEc7noTIAAw/2zO5rGnVgTpbkMZefvQrnSxPF8f6Q52M5aiIqpfK0YqVMF89P2iiJDuYGkTfKtUMA4fi4wrMGF/1Wd8oMwSUGEdJF4ftVOD8FX2vwKhCprT6YI8kiEVlbgwqcDhQikOwfTVXKC2tyq4dt+5DRf1jtRnFFRkZhV669YUjNfWQNlIU8K7hTNtopb6PHQzqklhLCZx1wtxkjbLKsJ57zuvhvIy4qKwRv4t3051u9igy+p6pwBMtkPqSXyog0T0GiUuvY7hZ9qCWU8pDqmGT5KMVBKjKfrBkQzoXCNo6ewHFxX9NYIUfrIFB/krGy7YwBNqndNYt8Z20aMYRva2TWkeXxXD+nfJC8dta/vdSqNbNjhu88Y7ynQTeRf3SX4ZDyqYIVpGI+zx8sU+CzVKUenJd9qSTM52F/TUHxUEvJlvawqW0AIxc8UJTFA2nQ36xg6HD7zuSUO73Swx9ywW1Eum51NAj8O0kKV6CDHMXEaty4fhCsOZaXn8YWeIY6BTTam8F16zGjgHkuoafdlqokC2kQ7N9r9XrKzr80I7lfmnB0xjjIWkmd312sx7bYlop+5R5UcshEZC/v//R9yGU7qJq0AOeeY3wFEC6b8BEXeGxrDQBEKiPRXtbqrYxeUwRSvE5aOZMbYIo0CmtnYNKok2sfwhpPbRu1p+1Y2j4PjHw1O2fUKjK5exfryoj59weYP3NubP3z3XLfViEwlL4VcNCJa6Nh8IJWrr61rgxwaVujeDFsQKrMRoSZ1dueizddRyEn4BFDtSdWpVOvB1nJG1MlkC3j8iDI0z6Vg30WHHhppHYUGKragE5pq2SPWZzuyOHB5Te1yIiaN2FJzLbSGU8RVFwMECNKUXCmXUI5ot/kAejsIKX5Rw5B6ENDAWgSPvl+V4p3MJiTtBntCSFBVmfBTm0MhwMywbUi21kG6Bcv7Da0JSxJaRs3D9iEDKJbz1RNSno/TnGnhkHlHDw9pCcNxvXxe3XpgyDhuv7J0mlFD+b8Z+ybrsVh2TXfT4lgqHqoOXuyajbMF/v9Q41ySkuZ2R3+xv89dRY4MRiJW2KV5mPQCSrpHu4h5Gb/iGUFZqtOLve1ikaVTIlSWu1zemrIfWTqMuShYKviqg6tkH7++DSUF4UQHPH3Q/hPREdwrOUGWpW6qJdIWzlImWkSigwAjl4gQYEGW1X8qXb1sM+EWozEBYSDagSlodYogYlvD1hJQWPECGjsNpXKxItvg2/zTrH/7GWrzxHbOI3c82DQC3UEQKzsfnP7FxSiEJcaZ44uC5/G5l3CdXaRniBFUlr80N3L5ObJtK14ZUkS3LxE4bWIVEY+zETYMVaCiN3Ps268Y69RdtwpQX5lIx5qvLoUBO40RBPJ+/zQUCLOcD9bkABMLKDNx9qCmFHdN/jE1EmoO1WVUgTT9jPGjANBcT5VgaIXv8IT9nT5wLKXc9m9PUYYAdMlaM8qu8q3P36RIm2PjxZcLV3L6nqiMSUtBeDTCldkDYqWXnamLEWEJjLvrP3oNE7HNlLgXqYU/i9kO78nwDN3vdDSCkbu29dOe41kLsjBBM9RVePsgZ/iyr6lFT24T765JqN8By2eWz1wPcoeOqshm6tZZbiFb1YRdtb0cZj6vrK4Of1gKzo3w3Fc3ea8H9zcEd0nuAPA6AEk1rSbAU5JwdVQPrGitgeYAjNDMCRUEfN9Acmx2BKU+rxx32U8CLPhgFu2FEr9NoD59w+hHJLo7UqkWx9tqrz5tYTOMfh9Bv6/lAgK2dxgKIAuMpOoYtZPV/L3vuLpos5EIg/3NfLvmosvB8/WIxtPejVS8Ne2aSQD3oPGuPXfKkOPA0BYLycpemS2DnrLwzxlMRJVh7i7kG6kqSFk2m8uTtDj2ABDzqroQejgLKwg8eb6S5wkBFcveV4BDg2lGS3AdIjzsFY9YlWyV4qNR9Spb+BbPOQ8HitrlMKx6QYZm0cxOPRkHDbC0rpwwPI8NvzcyGNfag9qPLShVAOI01ne9n1phUh5xVoN73j5x9yYQIvyxXaUY+iU2Cqp7FmnTXKKxDCH+IVJfIEyEYw0VEwm2tOv9UIDDEfIlaoMT5sFuFW0egsYT5wibsfmiIHi2TQqq8u+xPHWxM+dTGYRVQh0SecpKQW0dtwIBDnfD61nLsXUkbygS5Hm22ejf/H8CVTewNo2nmNCLuKuLLIcs9eM82K7QCIk3obyZyyDvBAdfGsE6ehYNUGQWOwxCmz75uerxiqR8oxZXdRGYLOP0NgGwfyM+heR8wtawA0EzysGrXu5FyIEEf7F6/+8k3hWAnhEc1lbaaxhX92mbLcWOEw+iEwRimdWFaV7rICvTYa4m5BSJNN0HycU9C2OS6+cyX6VERLh8GH9XQYRm+koaNCjkmQcSm4MqKZ20ZaRyw9nmIsBR0KMEjmbvQEC2+tR8zLTYzuwG+n8yVt7wK0JehljHzhvJMdaRfL5hV5gXREUOkvmEvVwTK6pvRuLMxryrBdwYFFHvmegQLHxWlzOf9j+wp+65t9n8y4lk06IP5zVWl7aMWosVAXxZVY4wTlVHfwz1gYlJz2lE3sSY8swhsPQE4hXjBs76UHsJ5yGs0rICNL4D7e8GixMAv8wrLiwrv0/Yun3ppFH5KK37AgUmtT2yXZrTo5Rgn6DEjdJbbYPjq9cYijgSmVJX/3jwlZgVQ40DGa1WKGgcVbzoB9tZH/d36m3SUmew4eIX3SV+OyM3Xfqs/ekSlQzQXw0yaVjyvLqtUXOU//vDsy/if2JRKOCM4T9VlGdtAW6TeVxJiXRVdzlo+sYF5ZR+7gIE3VlZqELSHM1aT4PjfPzHKV2RVy8x8Zg6PA4ZTcgPqAUmWdizWfTsyzIO7v26d4PGS7sTODxOKwHGQzclEf9bf0gICXT+17Vy3C14OdoYvJWcYh5kzECucQYJ39T5UwoTseqwsXyCNPHHyvnBqmnYrGDcPHHOS46iD6S18otZomv/s1QJU8gj3CAE0NXEUy7z1fgSAODPjhBnaijDljVBRzqa4SoD2zOaeCttIIZIGni9wwhFVFbmdREOUEN9KRWa30Hk7wxtzrfFFpHwnQClsjXUzOOdWtpvv+zh+qzUovA/dsBF2QCwiAsL1+35JkVi63xtN8E0DXiZyXViX6GwnvdSkTr6rkrI/IZFVWoKplcsY0l+yU1KXCcJMP0efcIH76bKp1ZkQSmKzYKHfJYiirs0jvnIes5uQpHRosrZaWfqpmy4/g/SDXAQ+KH6a9f34q9FevtNNpNTkA1EwhDxQX3CwC/E3ciAmSzB6CsW0JQH/JCsiB48qmwG+Qw0ObuaPxN6I69s0Vk5HLuxqtVTxDUzY9Fu1NatGegpb8LFyO4+HDdjQyNLnLYTlTFO6bHm61DM/ABubKBr8ll3TqE1a2YqwuapQ3mnEOblb6CsEyYYgQeZIDvH37oWKciV44pC+webGbMjMG9Kb5W++T5A+1Y3c0W+aMXEwUcbFQb0W+krMmGdEI8QOlMHh14BwBPTJEd05MsT+gUB5IYhNqgXI3BbRDh40z4wGy57d+Lvvq6uX6N3Y3Jv+IEC+GOZY2IskoYAbfF7Hw+653W01eSzeD/67kP5L1HxziS9K0BZRBgJMHrZnGwomh0BUCkM/D/EkE2d0eE7QW9edZZqP22hBvSu8hjvCfUMs0OyA8ERFUX17Hx7Q9E+DDIH6DINln8GHqoL9TqaACMvd8SHNwynBNB5k51aeXikqS4Snmdw3iq6kzmbBgtGwIcJtIaGhFlSuIWexj1IG0mz5iHdCE8C3pC+Q78aATlBBMuC0Ti2aZBP10KJw8ptJhgLrwt9nBcJtV03RR1/K5qAeXwWL2eGKy8CMll3KM31Mlkh3TMBHeRRJ8Q0eQWAXWUH1WyAS9A0c8WwBVQW0VCPZqQJSJ7rhI/ioMV9ysOmqxZpqJ0nmMtPCRRPANQg8z4IRJUNdFFFV7vaZrtM/TRyL2hZGVXfrCJ2X55Pv9ghVwnHx11as5fdjc6zMtHo/NQB6NTruQSLeI3U1ZDN1MGqUVREcZhbEpandMqggNyRpuKh7GTrOZ+clYa7EbrDEC8VxoZjApD900iUmhHSxLT9TFnLv8xOUfTIVX9jtZQZTDwLKl/brmCW4cNrV99ZX2UETkY5B6zj+H4X31srFyYj9ppMuX2nrrCGy8iYSWwrJszYAa4IR52xocgs5zBQS6oXMavg8ELdqyyziHPad5dv8ukvuK/ZxJd00WfoiYXjvHEiXyxGTt7cBfInJdtEWJQMxiI6I5H+gQUYFliAvJMwmKZGbZwxi7QPcGj+xxjGJOM82Sps080yr1WeZb82XD8YJp2nuTFkN5vZ7k/wSdeehq0xP6kCvLt+iux5+TlRlU0yVU8TtipyxVIz8DIC9O8Wr0Nexf7o0Ebjs1HvMEUEJMOUn3fuNLWg1473IPZApLPENbGI2l7oE110eEOT/4YUvYxlN2+83mXqfPkmpKRd1IIcxNPwCQkJDC1yx7NNuWCRKYpBsOJVGeCiTnefohsQC/iZG7DAMr09Yz797SCaWrAddqjwFeMa2WtS68Bv4TNYWP0IZ0tvG/qZc8q/qYUZHknZNk1xPN8w8UFZWbB5HGGgK+xORPjjOv6awGVbrGexiMR7DL/gDtuz938nlJgMRZE80uEcbNh2n1JPXC0vf4hgen2DBD6O9c1I62FnBi9LQhJ7M7+4eCdgEQP8m2kW9/w3kZXyqinDfbPoXoZUoWoVSFuA0iv0dtfO99rJ7OHJHnSuxQWU29MfXK9kiG4YfGX9CsUwaS3vEX9rw/q2zmud4ikGQbIFbacNyg8hpFemgd0pUAgHXZbqs7s4s7QKAM5YU/y9it6tgIzPZqi4CU8XiCvxMGlYtQ3hipVWd6JrXFRgW3BI9fr95w+Xm4ACUXA2EC/xG3ysmVZjB7TPJ86iQsOMWl9hnvgvbXOU9jhQrCBlLBz92QtkCXPbxqUTq9xz2aKNj1Wv3TeCIeO5tB6Vkl8Fl7R2ydXBYX3QZAMXuZRpvsoNVbafANszEC0RRA+XYfTYlzV/HN7SscU6B55eATf6uO0X54e8C4qiajqPhRvKSLlr0EX7cezbfOMKArtekLqCOhvFtPkGtw3+ABRKuTR/+jzrlP6qqiewjhLjcPwLsoZ6yjVmFA7cyboOJ/3/uXcAcy9bIbNXC8Zk3hHPZjB0aB/LYbBGQ6Q8LAvZTUBMnJ0TwFTMJEnGIPUFSPQwmMvFkGuhxZUFo0G9xSqRBlq5odkiRY8hJS4m2/VV/Ix/XFIFSR81kYnqXiEQcKMYEKt8CrZ+ekxsw16AiKRm6P+YNFWgD/3Pj0yUjtddLE8ehY81xgguuKrHNQMGAyMIvNb/zZyXwb2sJmBAnhjw1q2dc8ZEXrb9wA8i6UD5ntl9T1VIV0w0V7bWh9AOESTOwIvQOa3XWaXFleH3FvwqR86WMHWrXciSZwLtAw7dHn01DyGVtdzv6IwUhHcW+9wne8FwmBUwqFhI3CG+7eft8C+RwlGwOTSlSUad7IsohYfRbm3pFx38sDALnkkPYNPWHtNtqFBaeNClq0+gwmcfuQFrPfzUoGrB/lvKRYyb3agGaojm4eUQDl3HHbgBck1tsirsx8LbpFUMIpzFsu84zolMhr02/XA179VG65U3jXK9gXEqGjVG5dQxoZIVFEGAf1AnJ1/kSVqLH7Tmrz514ob5jcxOgXdqO9Jga65KGVucjG6s3DtAFcnT+1IZGXbC1YbQfxcQ8e/qC9x0PNJfpuoE/OHQtQfbFjpyj77fox6gfrrQ8qBppfBwyK4DBkn/9PM66pxhNEH085e+sbxVz0iLZx64qKLEpSORt997L/ybId4ANN69km3lX7sV9EZsSapq8acJSKbct81a/ICro1OY7yIaKgMpK07MZLFPrPuMbiFZDRP4K9K3TuQGNg5rsHl1qNRx8ukPEmDX8HZbbuqMXLUXMky/AxSwef/xoK5n5lpyvUNKIfDBOxALDyJyL4wNUEVBmYpaRefWf37O4fuF9fxIj4Oj1UVnYkhxbeQjOd22WgYibrwgdywYsAZFLwPIsWMVTX+w2aeZcIEdUuZad+3dB3HdqiRb29XrKTVA2o5UV/QNliLXn2AJVb+AEupO4Q2yA6mhwgTAovBkScLcLEy6yALUGVhQ7q3zwPbtpXspUv6SpEwn5cILnPlHLvus2Se+pP/YEgSK3UTbwOS1T21Ud9gBtTzMjrlE7hULJPmHzxTVJn07weMN8Pd2fl8+Hp+VXK6BpuKSBqwFIoEQawOu1mm41+xU73RWgsMU7xHvNXYJcrdjTdUPJP7QLuR1tkAJW08ir6e4jcXGvotw/euyBUWjkY43bZZxn7i904QyskO4Y2aDrUsQVBV6arJ2NetYfHVnsjTF100Xei1nKAsDwUdvBh1w7XOh8y/Pkh75NfErKL/zqDwSw5kBpenxhLFuH+yecSqe6MpBBf+if7MwMtRf5F0pSQe8qyjK4X0bpcFoeUKcfIcDYhlq3EaqdWAqBAqpaA/2Ol1EANohtZmnBmh0cQYylNJPTK2JK64G44quF3PdZ10WLHR92SBzfv7iVwW0rm2G9/29RiDbWto2XGgEdARZsJtUWGf1rLzCC7Ggaa2wNjgUPJE/E5KRKSwh9wiJ3dej67y0dng8FXwM3ZrH+rv45l2r9uZyKRQT5CMXaq9toGoQZR0rYWfzhGNiXdEdqUUIG3OEM1v18WjwqIRYcQrV64wCvWWV2MpUyebcgiyJUXcHJ45NGBWyco1KE9BIU78XtsfFs7kQjvmCQ/RDjUZsbb5aUFyA4XeMpnn71alSXHEW3K94x0g3656HFBBU1rWyONYg4omo0XZirLyxdgifuVV7Lpl8A9IWT3vtGEniYWMJnE3rlGoP3+jomG0asju7WqXNLRxcSMQUObmoQWdfr7fmzZoQpml4RKBtDPyqf+kWpD5X/AzGcIXVqxixoJKeN9BM13eqh3O6yql5yEsUTG1E6DypMV+7nTYPGssDKtMKzGLV030BPUEFMHZk5IpE0CaFY/Ul38k3jNAVTl92Mey5vevKrxsIUQgHOHL7UzsU8NA/3/dJgUWpFFDbyESt1BcYqgnN1VAj+2ZCto+c9kkyX7ngLITLE355TIB2WPgG2lCxOPxmkgjDf8DiZbRIXVwCuzHDFDEXppalsP8Mbg5ko893aup7hZCa50sZKVAOKJWhk89vWe7ltrYY6bMVCRJ22EuKY/89FPrWYybdT4OAbLctldiRMtFtGWLtCOYLJKLv0xG2iavnt0Wkg8LUwdwd9B4kGp/zN7VDV2RFxkCsNdn5Iw1NSgFuHf/qXUUOOVqf9/CKqRjvVM9OUxxm0X9I0FjP5pJW5eNH2GWu7GbsJ8BcIauWRgJRO29sa+sAb5TRfd6VII2saUiJT8QuplVWoXe8WIE/tbcd+xW6L07EjhQsLqMLRtW6EpyqvPYXLMvZo0j4/pTFJFnP6roioyNEk9EWF4mZbcCr/yecNoHHl29FaIXcC9tRUKxq+pwF5Vg+1j8z5POu3lkf3I+ASRKrTOzjrTSuACKT1ZaFrP7Q5WBPJ/aVqDyWUl2aIbgU+I5spINuStRTNqGqjb9ADFCzae+vH2ceqlZrmGV18d6nexhNdMlued7uOm9HvkoPZ06tX8yywisHClkhTrvGrWECdh+lG4kOuO3qyl33TeAQSVfPG2rLlLfgi868ngAPVZM6KsGxP2pizz7sR80e1rx+B5YIEAggd6OL9tsn78vwiUeN1MmJ3JGOxRZ4omoSqPnvQqFoc8PAD486C/GNvlstnZzj9V9SSmce+KMvRw0UvggN5LeL97zOp6IBKrERfGgDZ7uIk0yQgJDUcglEZXkB9sfXxe6qroU4sCaslJOStykzl3ELMEBNUEcsMyQEF6yqOUEQnJo/8uEdpf5jCmG34FctskRFvFqJLgMTnvZTJg15kkxu1aa5evvYnS0FKTjkthvGGD1TSoXt+Nurb3auhRcJLhrCTvY0mUUpWmOi/guovm6bl5c8m/DtRKMQt0UpB1jB5MfksTkYSNgYsN54hCMlP8iguUUrzhM8icASyfubFskb63fqRaXtKIsDopUO4+1NI6mRKNnVrlhTCPE62VanLcObaQCtIXoGcBu/CFDH4A+ahJwfgA4WdeIYc69Yze3mHREung0d6rPp5+40HmGcknF8SsNZ2jz642yldZsI9CDmhYKrFmEGSHQZfddT/dJi77F+iNkf6jJJaIte066Fg8DOOLttP7H97HNz186TYvfU4GHmCZPqR5oUuV4d8ZxIr3RsXRkF+f4KTxHiqhsbzVSUiM5Gus55CmCjoOMcD9yEUTU6MrRwfAMTWJslTHjC5EiSoiHayVpk4hRXe7M2mlQy+XCjDM85RA4joJnQ/ksYx8syxCzK0X4ReM1R+9YGq6CoanMrfcN24jReAHi5blU/a9oBlGfpL3MH8uzqLEfTQfZg5zk1RoIXXtD3r9gAubnVPI4NAlqTiJDmDqUBK2u1gD5uTf0nbjHT3GU85FKx8o5pmdTsnysC5jFhZfcgrK8VvXtsk7cdxsaSrBGoqeh+SBXT9Z3fYEpOPM/5fkAyf+KghN0Ko3wD7pzalSF5IhsRFmBRhB50ukSwIXaIKRYAbzTWJuVXYUnjoRARfY7jHmqVya5v0MAZBKC1lT0cMzsqNxHig+x+We7rCvqsplQfg+s9aqLQeKPpkUMUV31hFKGvOjIHl0IUyLW17MtTDz8uYyVGxAYCn7TDX68AIUMrI3k8vdv15jwrsdI0BihbD/3I/JuOvfoEpptoficFlq3mAwr2M9QJidvtV9/vvzDG1ZJSgLKpGYipQ/SwYkEpDInvkk4SGXEYmkIPC2U6OTD2h8BI0Q94DbpZ7jI/SCxGUPvwnCL50GvNXaGD5puXq3iMJLPaQ2MQ5mFOPCT4dCMrElFXpCF8ohnmrZe7uEk53aHB8Fno8kIlFRBrKT0/WSadZcFidEcN9LJtL4wZj9sKgcd7vUlS1OJIl5J32dM6gHtc2M+u2E9M2eXT6kaeEirxbDCMt+IAnT2qVwB98W2jfz98CIquND1DJgnWV+mWwLXGkSycyERXBtoubwAbLb8PvQW/dqsZ6M4dbUeRlX/0A1tefoLVtiiFtqIDwDaoQcNLYnhcO9AbBRvIoX7V28PzU+Mmhd4CgjvVb19/ZrwceErjDw6VuF5V/ldAYH4bRV/DAN5R9Z3L+g0RRHTj1duNMy3cYSAZyvHmpwwgVWHgj//LezlvwQEgyLYKtCEsHOrJ6HqG3PI2KzVmrbqI8zz3JLRoJ7QS1v7oxFgvDrQ/AlsnrmfbYpnl4xqWzAp61hk5c74KFbj0QmXnemfTuHauHR1DHycZnAJi4gUR+WtxMnSqN8LndOtXJZo3WmoAXM0+Aw1dS4kJnZDQnvhumU25sMsRiuANE9WBM4S02wz2qEWO0DICYUxDV+omSnqXdqieSq1PMTeuZjyGvf/kf+RiqKn+ujURdHBeXSfgcBWvzyW9kvhJf8INwg8ZHML0iN1lrq5OGqXQ2wy+KxTEFrGEQoWnOvYMG++jQ9AFDRpU01OtMMGNHDLg2FRZocti1/+KL8P+Nx9QM4svQVK2pZ4oCr5ygigDxf83VzdNgCidiCOcEUKuGfgZtDznLl1a3w1blrS5NCw3A9oy9Gl0A0vcMAZCs3gJHFgCamyNbpZc+i49kaodanCjkRv6+LAKBHKlErTOkzQAT4OPA8BTyreVqD/FSUENaTQQlCEJeU+Fve/K5d38Bqr6YTJC4g8KCQOs6vOOL4+iNWWx53j74iCqfimldj4IgdLLaCF6M8CTrnuYONPgBamGAksbQ0smvP0WZkovf5/S8IVDzTC70xZY7tMIjB4TxAWdEBxaPXsAdMXEEpFxLVM7WmU3CmyoulVN+/gk++x8QTw91HMNzJgj5vR2y+sKbORz6t+iNka4GTk38S8jScxJ9QAv7LJNUH2xKJ1PwobTPhRrxUuwAVeYlorqFQUH098NUL02/MmxwFCaAhIpFX+4GMGK6mJdVr9Im+yh+6VmrJYyLyoAURgLbf0efZYU6yZxp3jtxfKcMMLA9iFPX+Pqp1pUMC8FP4iEKaOoZ59ovR+g1/l+WlYrVK18q28WzEfj/kXvDmtKSWjd6WF2F6ouYVy72+37gddn0wf5PSDWTIKWNTuj5W9hgindZLYCBAH31NVdGzXsFE46jmt0D0vnKqALZj01P2yzL5jCJjQp/uV/oCxtPuc69Tii5P0N73+mCgXuYeDJ0ODCQtmBje/eX4GmQ02l8I27kIpRF9HBAfZYNwyRQmdOK4QsyY4L8fEHMQkVRo/5DXogLFNA1/eqCMl3VU0KAUMKqmjBgeGMwTH0cv386hRMKyRhGusG7PDLhgCF1sKz8insfC+rowHjHh88gj4FFbLoSynslX2DCSWH5Q5z0gtrm97a4gMInwbSJAe34Z3PrPkRh5VM5hk/kuhBt9eE+WsWtZ44sjzsnKQX2HL6kBNoP3qGtX4IvNbzVSWffOCs36wiLJBn59wbI/g7quzE5591L/u3PUrsNTK7XLwqPqicyfV/BZ8Ru9b3TEd7dKa+sPPpD60MEZs5k8X7E7ICfBvgTFxN4WFxnihDCMhz1QLsVh8YYrwZBFZ1mSaBfpCLAJnOXsWqtrb6HRiUzaAOevcuN2uzpD4AEfgsuanfz4zCmcPeSGMZ4yjYMuS1jHydr/bi1TXopNRz8+9/F+7Hl3pU0R3qN0dPBCR3wp0HLs4knhZ0giERDcXldO/e0Qxt/mH7wZkw4aCWEf/EFjEo8fhQz4RX+7FKIxnNQxF78kTesx8qiQMDHpnJ1F4RKGfPQF8bev8eFkQt3TjyhlJm/InyMSoK9EGNpd7zzOfnKZZgijOCaXwODSMrxU/NJu8Z0m/YmNwUndpMw7QHTtAH6S12aFwAJFRrsC5HR1eXQdngJsPcCSIYVfOOQXSRTKtjrJrgcMIhHpQ8i1dIslb0VPIZhtnDVtfJTbmxsArsAjCKc74lK90KkHyG18JdYvZy3IhiwoJxsP8kEnSLAZFq9jTUniXNmjqh4AH88hrxtp8hSNTOORMCsa/6fD+VRJ4EtvS2hSPLzE+/A1ICBF+9sUIhOhKzBW/0A8yXXnU5nFvTCdpNygcWzL08kEosDF0tEiVnQTG0njpQYkYmGc+nkEjzJO8e/PksBSHPH5xXIvmrtp1ysMMQkQXyzDZcBHsOI1UX8bat2N3igC5COp26WkBxe6k2ZvqjgBinM+5q0dP7r40L7GsAX590e+dF7Y4XBpOiWybehkGbsbZBNJaUobLW/Ga19EJpMMBbT9yty6K09HMjpgYEbnxRod6PAg+wriZtXO4RpGXkfjVjB15tFxoPLA7EcMCy536csWyN/6YCAZkglRgIeNb0mL/OkeiUQs1TY8W+dtvl+WF8FqF41w2CH+Jcv48l03YyJZIgJJuvwh8OujsBOe+VqBryoIUYYA8VloTJeKlY3Gqop79T/plruGVzUx+Zl7kyt9UNZGUE9N0F6zAMACJoZGPY8jBNSgHJ1BxJxUT7a8Az2mvox9QJaEa4cWaWxy53y4TKilTMtrNVEzcV/9m3LRHxvClqr3l+RClIM22O/7Lu7KaGaVfIZSOeUG8JKORueuwszOXnqyPHX+4CIGtHvhIQFRq4AB9aBB1ZwPeuHEurxoD3P9c7KI/vcpgNBeKx2KN/IkZur3NY2g8Yugci9rLh9Hqxj+l2XNpwGKLOKVKAAOnEFjsGC5WJrUc/332S8eyMVwyfHG4pGKsY7OMYUrst48kTZLazGnsNEWSW6Uw52e4GtiDM7593NWf4OlL/ea5kjXpY7LFq/MoLIMCfwx+ee1Fh9I/vg0Kk2v4sc54CF0IWoRmPItTalatkPOr1WncbGjPDFO7g1wIGddpu0cKTKEkJvQZAG40vcAsKl1oW+QbEGM8SPIFKIyRdjWk33pXYkgG1PoCdTIkZ2faTcZv5Y9ZWYJj5f/dSjqwRVmd4l/m1GS1hp9JadUm1ZZBa0UxWVkIFS+4gKSoF4WTsKI4Y3IP8KFHaf57QOVloXvSJtjXPYB8xEZDYQDZ3iowroOqMAgN4Mi62ooEW6FBDaI2esdFS9WAZ/NBXzKzvqf/Ou5jyLq+GJGbxuEtok+4z4uRhr6++WV47PlcV3Y2PzXRGd0ENgdKgvF7pHWyEUI2uWP/tNm9DSN6ujj9YPInLN+3AnnCiJcO1iPUCU/v99E4w8G93AokAxp4IAmtGTsemG7NIXi/QHpTCQGvjvh5N/9o8tOsFUVG2qJcEjLMAALsjBur13LiOU9Ecloy67k7Ry9zs5eAVpQLV6m3E6tsf93UiNv9oKbJFKPMMVHsdWvkEcx2HHOr75iAQeqKJBusz3ovEdvxNV0LRrXZoWLjw8xB/4W+gSJkmKeEWzsUVOVGv5T/bnr2hPP4EnAVYq+g+YiST2guiLAgsGMkpjF8Vq6DcxFKUe2nsy7IVpy1x9wVTjMEo9wUeypSfrDL88lWTfeX5zIjQghaGqFav+jHc7risWMVb2YAnuYMTGOa7Fr1WZW4SD9tzLXrOfbmOrGdKqYChOS3fun6hW/czR3lZlS0DzIYbIsJqFQECJNcvCDxKnK0AIoyqgQw8YH+JDsdyuBkuYiNYPZrvUv3vl5rUvuX2JXW6nuq2rAYTtFmCGslzYRpf/Iiu1nDS/SqI9zCq/xgaEBtNwU+bJf6fO2Wi8iYaIz88volMhrMIy7tGyRMLnSKM05wDOTLUTOw9G3IKI7qx5YD991GXaoKmtHg4r+R/PUihkMPt5UsVOT7wWzRDmfy+qe+r7bD76HaeaeGL0hJ1Dkl1MeQbBJ+tBQHiGjm+42OUaVR8XaUKNmo9sm91UVDZDswgDY3hVv+KTYNHP72wInTakJTgqWQ3aPITGIOUWIZlVXW7nzQZSzX/yx9vLGpwZAD/PoQI1jYhHbGbr7yc2+9ru0eQyWz8SA9UNOjQXFcjDeQcWAsOxAaVZ+Lh8CwCnPR6TKuLKU4fnF2hwm3taa3Q+teYsJR+GumWG7kKS9kTit7gr38BwmlwNUDpxLBZ/1fmCLIWm6J9T1w7bZXmFDLX2Gti/UytTe2l1Hv1z6n5AcsTLroGmZ1VdaMc6WOmjuK8LkgIQXnLLP5PNOJEVNsqdIeo2B/c8WiNey7d9AyHojmAIKsLCohy3ZfcyiqWsgJqDrY/vgExL8cOKI4nnnWFjUSXGQo6shukLxAznBV1fnHrnk0K0l1VADHYCHmif9CK93UYjHSTC+RsnMuj0krGONBXlOpFKB3EUH1kUJuD2u//CPQPd2FTKtzg8dmOBkAsxbbEh7JXxldCZtRJ+w7HXZeOfMqPOUY6RNlJr/7wv0doACYNrSz9fU9UkiqfJXS8ttxdAHCFHKnSzfU5CxajZkTdY0K2fpohRjsTo3ZdRMadQmkWPl9W6BDinS0KCDoEev6G61b2SnfSXf0NE+SW6wE7rHthYF/RDWbkvesW7wdHki0K6NWdin+vNV0f00bDDXVqbKZ7g+/LFNvQbuzw/Mr3316IGCB0xkwSz5ONd2CM8rsZw2xUS9RwdPQjWEkg6quLjRgEEczU0wFLuswpsQjKyfd4g2Vz3WddBoFlVOC4qWwBfmY9vceJPu5HnoXfABrlA4jXwNBbVNZDZsSx+inxg1vz374g49STXtyCp2e34UISGVXKeHLu/N9ekVTKWrDJq6i87L7BxGJ97PA05a8KlPXskGXVnU0FPUAcbUyG32wWIwV7yLfBEYLzsOLGhuuW6ezfU+lpAOr85fXD2UpIhQUtmMgYXayJNxKyR3D2MjEBq2kNAONGkLXgmXO8TyoxSOj0cM1chb5QtCjPIYebyZdEpsBcSvA/txfuxVYbcFIZJa88YHeAmEducflI0La32+i0nJxg3O0bxSqZGXaeovdHesEr11fdXAfwnhX86LUSX7yt6PXocJgARgLVp70q9nRnHFrRjch04uHOqR6Y2OuM/oSGJQ3TPBtoLiVrS5ERoY54Gd4ad4EHNssI7WOHvLBHbhnnzQQAAJJRAInd3/imPjOS9xGP520LQJ+5VXIUIOw+vtI8VzeGF9ljORQhzkceyIfa/GMIDYmFocdl2Zfgy/x00lYtEl290k18KacNcTPbZlbl2AzHLRNRKlG2diiDFW7gH6OLzpGojyjeXnLe5Is06tATZk92UxUjiakGIki7vzJzHGBld7zfPA8DWfrkWQ5kZh+4L51LMqITIgA057lwzqGkrML5dkliHZUO/lb2+jtFfyjVSF44kw3EzkDADrSSfQvKtE8ehzfFZ89n5eaF/HgXMxVJKCdPbMIEvAx7wJkUG3iz/zGreEBkuCaF2uOzWsqRfTGdw9Bo1af2UMuJuh6ewOrJjxeRQP0DndUebkFoMU1ffxtTtVbzLUoZEWivQIA65GvuEmICvgn9lJHaK6BkkE8qo7wm+u8DBKuZYoFP/vQVrb6qcBXL4A3OZ+3YlO1X944p7HCfxqcmOLFkFbELqkgc281YVlgNVORbycEdWJDS6ZBtSjtJkrnDfxbgpG/+Bu9jEyGrqAILpOWN1UTKZXn/lW6yP6EIT9tMShsNhaVGFKtIzE8UgSjrwzXHGQxlFsXw8FNGDdkm4mFO1QylDU3qyUp19SEWATfxSqCnhLl6tZJCiWTf8fgVYrGsaa1v/JEkoYIDpcWmv3+2YOv9+sioGTUfPqUZB3RQbPW/NymeGOVgup9hXAKj1b6qdbKoquUpetrAY5MVWmRgqk2SQw6KjvAbYR8LEe8sLRVgAtSzxv+DE3zhO4Jg5GaB3XBrBO03iH8FkGXe97heXFzprllYu6yZkVYP5B37q9MCUAbTGtxKJuLi01qcJZf37kZ6SauEaBRxcmUDU+RVhR6NlYTgd8obSNoNDfOa6IANmV/HJ9f6wjjqJ0PrkEtvWuSZc/G8yWmw/lR1pb4Sa25otHv8CT89oZICMDOx+DPQAav85RVZW4zmlA/hYyriFdxFmjHV33sTnVUXBjdt7yV3PA+mVvwClv4GRw/vZnRLCxdgP46yK38YDjujRjnf8RUdXiWm/gR7bFb7b6/JzV6UznnHPL397T8Y6PWkbU7RS7HAXQCvwBRu9muVEXdPZs2SvCCizQrYfT40jXKhX7wOWR01AxTs83IJDsPGAti7x+AUnXRUd2j+ZsREvYsJPEWp37gw6D3iBt7FC80H7zcGSt/DQAyCAnYEuxPitCFJpteZmzFVz82GCVBu9H7DUoqIiPt3cfnL5zHEVC3yRnwJ9NhjaDHvLeUIQPDMe82xUbAJeHTBIx7gKGgiXbR6HM5HLKG+zzqPeQJDo34yXykYNFVPqOz6yUgZZTX/aAq3QjmQDFsI4pAYsFD3obPwUEBm0anD1n4d27ovDzLOviuc4DrOmrP253ADoZCqcL4O0xkWdg2C/A1bsOgQ/zVQNX/1Yrlpavid/d1uGmly5DHuLFkIKYhbvRwOjtssGnGJ6X0+zMhf8C9Wb10suNjKZnPM47KA5gZzi8wDdoVADSD5j51bqpXaDE0LFuuJgVjDCzw7S4QTl+EElVs8wzzMz/KVXKBajWFzpj6ANlBExrzLuuPIc/s5QjwT72w3mXT7MkwxerWhoMdvndXU59bAt2J1rBlgXQBv4k0iZkfKyK2re+b04gigAE0Ckjab68th8G2ZCGz8NSdhfdrxbDwCVa6T5ficx34B300JxHW1d83gdChIPM9+lIAFcg7mq1YVizDJkIadGPT0q0d3+DPFN1PaHVtSACkaC0TQkFQRNjedaQaXkFY12OoFpwvVqdfiRq0b9CWuZJ/7tFLGdR7xtkgM5rogg3DiBMoEtTAC+OaA0cHia93TolMUhoy7jVQDcB9h8WsnSLBXcKZkIoSH9EGBZHrbuqGqJpWmYsRiL8jJT3BdxIVgF+8GB1d39H1F4x4X1aLpfhS7qOJ0PqsfKyjPzpmenau0jr6smffK36V8waqMBq9TvXgHTy/hjCpyiODMPLCZM7Ussjfri6PFphJv5+cJFkXyYJUCfKAANq7+dET841xkn2sPorVKk8ECIa195RKa3kk/ywqnqL+fF/trKqkp3V/wVs/yhdX4PbRr1Ynwj43vr3oZQZakXzgXvurNgIWYB0TIWDIaGW31aNUqNcC8tPLS8MIj587DImXDGriQtfa1JDMQB4bpDyu9RMh6olSqthxhvjkVLCDFfFdbkSy1W/6ynImFtYOyert3zzgIx8LvNX4VvaZhwUqWHFAKmrF7DlWwI5hpllYwa4xIbp3piDiHDvpaLGZ14oNr0TbxpJPZ6F8Pcf5YiMI5KImVX5JdTqYLgiMQjdpgnN31PPfu4kcJtnpocawocaPd9g7et4wZYtwP4x9NwPgXNqRT+b8yrI08zTmJnRdlVq4KJ1Fm0lZ0t3k1wltWpzdMUoYy3m4e3q4VSdCBgJvqF00Ce/XaR2Qg+apb2WtgWQAS2SSplse0sDWnzpsyLaB3H4esfXSVkAgC0S4iEtucXv0gFC2s2zf0SUQbCyGb6rkqh1z7fK3jdOc3kjjZg2IonBUsoj/OXVASnFA8qTRnF/f68Ej4TeGEtDpedkHWUO/xIH8fN2qXnjt1zviiMB8yGsOmzDWWU0kDc9btvQK50iDAR7EOfGa9gDC4K0SPczvj34mOdg5/Zktb+Atdheo/MtbHIClgmX4ZIdVOQHcd1hgFWOf7UpvcQU6Y1uUDCikhklTkXIoGS5u2AvNW8d+fJXuEEfpdim4WcPcgYaxBneHawOTQjAM0aeKSWI3HHM9nbD+I99mqdms/6ptoepxhkW0s5PVlk+8S3sgnTLxcdGMfHORNL9CpCOYzAjuLxEqnYKtABoa+/TH+qNl3VNpgFFu7TczWyAoKKMIJiUJX9/kIUyt+GW2Xnsy/H0QNi6EqFv0mVMJkPunqF3ZpIoXqP9r/Ph/zJ2wUO/QW5CGBQuNhGqkuba+ZBJsW49V/H6+UMKwfB1IB+nsAvjXnPnNAGIWtLQ4x/RcRchyp7m5mt7Gzxehh+06WBMoVDYPDCusBVKP31bGfFqEBs+juLr+frbYbPt6OY/ER4ogrlzCJMTcQPo/nZqtjzyzp8Zm2/TeHqQ9/b3fI2hRlvkdLZ54rShCWNBfWFj8FTBNAe5PSq10QOOg2FUgJ/kOoJDbF5SX5GLzWkBuz/tgP+6H+7uSXG0cotkwP8M4VzZoJHriCugCHGwjZ6aol6nP7ZIYU0HCdbe7/GyTxgU/EGdlDn5oorKzFRtAlpKoA6XQwtwg/sBiVdFHojVORJjg+s4h8GLgn2FFP/CNaH/BdiF0uREtzL3MKoq6+Au0H6tRM3/l6t5dR1ClNtbD+oaIseMxuum2qX3MwZ3oFI8x1V8LviphUYQ2rKzU08IoVgmEXE0rKdsJKjEOcv+3qfkeDSxOaW1oPsLH8uuSiFVwIgu93gLWsOn8rYWpRZ5ML1I5AoaJVwXo7LfkL5jcu9zsMo2++or8ufb/7A3S9hWPkjxtcZuIJLVADvLAt8EkXDbaG9HuEiIdaWo0z/oWfDT1NerxRHejulpDzS5XzIhgWH+Bm5G4YYTPf976n5IMI4+Hl+uzMuRet6mPj2P/XEaLFjWAh8SV8GWvWkyorLL+3YcEhZ68W+tjllfBo+/YB8p3k0uxQeRVyGBe+PAfvmeElrMn3wEHA6Zds5gZ5o+SJKlUdIrHvYU14x6HVvS4Ej3O+Llo6DgagMDyJy0Bd7DWm4DfsgKuUe5aH76/5sjJW+EEN5vBVuQmK13VXVQ4KCqZVsJBUzHDpC140Vi0qOjyv2lXuHaE5joqnQ3XeEQX4SV0B28pI5PWXUxKWmBYZrX3221PCTbBquCSA4O6u0dgke+PamB7kl5U2GnB0VW9RDUwmHAjZ4WQa+Yoj90gRHEi/T6WdAJvOHzZPOvn0FF1BXQfm7v1X3Cm2pJuU1PU/nx0YtQFooLLidrXKsBgVnzQOy4tPrIEjBS5bhNd7m7c5/yikv2sv98lVcGoXHwgLdU3Pi4OAZEkUnxTBQ8wV8oZrY/yChI8GGQkbqPpStrse7a+jct+liVOYNbXJA1EiYA7g/soAtlFx/m86XlkI3srEZ2scL87X61/Y1hww/8iS2bjK9LbPUUBV0GA/hRWp3V+6tsstt3Xu2Gp1ztcHbmVX5Y30PTT0s50sIlCESp88R9MNFTqJhgTIGofMWtNXKrKr9T9QfGyqYMV9K74Ow6y1yIcHVcl4ES3w/1r0wthqpMnhPrpnLuThSX+qOvrI4oIl3TjZoWy+tzpWaq0ArO30i93DJL2kjnr0NUpAdqHKlTSMVd64sCZLJSV8AvQvgawqAf6mw09T06cqBzEZX86+H1bZYr513dkLnkcnB4KAE5KavdRqcxcEgcNY6WGDDcXK9ISJWrSsmK7kvqsBRh62F1cra6z7/1pslmqxG5WLo3a9KzoFrAAg1rh56UG4Ag93fuF/rKZHpZ03JNcB3cEXmgMLtwNMbE7h9YZY0T8bITCH+NoDMR3gafFitO5FWNZBin5FR+5kLc3jSYQNWEx3FoU7OjR23jg3lv72WOsDTNzb7ObEy5x9PHbKboNf5WSmxAGuGzYh9/iJjHETX5GGWSA2B/zSyYgvacWYs02M8yMukvIFUv5HwkSm49H24T42MG1a9efZFVjcCYoGpvz38EphybqMjXNy9n421rbdJon+38eOp+p8sML0BqobejnVzdUhp+KlYbkW4RYSbor/lqboSJnlnt6cIeUcvlTxqzcp3Ey2rUgxX1+oXkF66tZL/2v1HsWmyzf4WVwQ+UHuQOTMFfjUldmICvzPrHCbQGuPkRT9eEvc3Adz91UjH+mRq8ieXNBrQkubGucRBILMuLlLfS9Q/TK3FOQD76h//OH6UvXxBFlaRVhsM9HhzooM2aJuhKGFSX5V5DYRkmxzd4HC6nCUW//dR1EkS2wrpxWmqFuIsOyqGSmF4mwTEiH/86IQfWfotDwcF2IS5xYazjgtnA8P1nuvh89L39dF+P33Rg2XsX4cwA8Fq4qvbzrG7l5nB1p2/c7wuUwWKHnO1H8BNqc+nNwxwYr0snQ+kxsAEM0wlP9GS6hHJQwNJ28ku6ihiCGEPG8ew2oXci9DJ13PsbVIQR6G7VxT7BGRGrADlKwjszpd/CNYRAZQD/S0mXGAfdF7iV6IVMsM0EOqcOB3AHv5YXMHYee4rlGPnK5TdOMaJtyuhTLFUGmty3tYJwUqcSQRR7toIw8O353jEQ+FzlbNlgQrD24fOsJK2f8lrNZlF3k16GcM6oAfi/HAVX0gRz6Yfab45woYU772+UEwICBZDj7rKt72bDarTiE2m+wodnTjyrPM83Odd+u/C97kpegvrO54YSgIytU4FExAdnah4HGlTXPHF0PNL377ZKNbttM6SJn8bytuBq4i2p2b/2NZsmCElMoGfUBNJ4P9GgDjoDqAuw1Y9xUdhtJobGpSjz3BHbyH+ndQt+liZij+BzX1U48MQZwWWaclIW5oTKVO7TcY+epyITLRA22IEiOTTU0O6uQR9tXOPgkqs/k/iQwvGERjWCkIYTt7p+y4h5rgZAHbBpw7MV4nsHAadHyloVoShpbcQgoXjULKDbHblsj8GuEe+GsWfbpuHV//me6AXii+Pig0YoYXDFDzDje6iMembTC+CfziY5TCCNUYBa/fXcSChscFs9j/z43L4PF6u/sbyrbgEp8FqxQiUIU5GflWiPHKDf6wSuX6TLzxxfnq7ImsWwQTITejyBkWHfuO9mzs/RMJM3BKnB/mv6PzZIpwcAxURI9Z2T7BCM+4DmaaWo8gk98XER8anXPo/NCeoipcS0YEV2fOMainSVoxbMwpyxupYHXgEevcciLCz5uEBiHMwh0SfygJgLduhsSUBkYzzm/NEZTWlIgSwR3yziQQI+IvONkv31vv2DZW3lmWzKm4XiMN1G+Nf5k7JA7On+i94wmxAQ45S7MUps2GCRcsKqM+C8NdPBjoUNduqQjC1OT5Er0Eb4pcv2c/uxmGJulLqSId9OOVIdcZXsvVEMxRyWCevZozohJCGI7ZFaQ9WR7m+CQC1ElA+xjZv+mqYHkK+zgwwPEyufrp+9Qwoor13AYjUcB7VCqwD4SWwkdqZrMWBj0Vu6dyf+/mlehNcgu0EaXzy0TtJgSdDGmTAzWhB9i2ppdkAHVfBdKZTmEomI3jYGwPAcEORBkTtgxu8UFppJ5v5g5CVFeSj878FE8f/LCkEv9PrZ0Hv2HKNI9LhfHbwXsVLnelzLbbl+D+G593Zf0q3KBr90B1XH0Kn5zgmx12ow/sc1Ha/xgu0Zs4SFg07rkPVRV0lpxKWD0zn+hYWHS4aZ3QoP/wF1QUisMjjQcL+NorNy44rb+0qAXgMe9agpMFUZRc+5dEwk9K7wq9NHUH74uhrQHgpRvmdbMQ/bmfdMAQ8bFjb0G/EoQi0rQOV0AMd4UF9U9SXMyCJBBlBCXpDHJgqtD1irJk20F9abMvM0+rGz+StcxbWRX+p+SJfpgGxoDP360R1gZiztT7q9TNBDgujz/LLyXHfumUPmKtdQxxzhZtmLOuAqrWHKCJYcwYu772MNdfhbqxwYfJ3LkPZQpT4OxR9tSf47lxrkohGBiMh4LxTW8jCIxG+5NtqqoTLMWBlAQ9qg1Oh6OJnWIxK7OTJ6EUKeMK0OLLF4FhT2mmC2WgeXE6AFrsetzgdp0E7KjnKn7q0vbp4Lyk1itJBn26/k1BxKVeZRV/J/vLr8YMJKF2J7vV48ndJ6Qlfr4TeH/Z9dGP4STuZlDP1os5U6wo7iSo0YDwZssPI/jUVr2uWhybLF/xMQOIZCYzrqUFont0Fv/6ujBBA2nmco8oXgcO5YQUAB1a/7eLXGgrCXDVnlH0VZsCy246oDqN/wwQuQmO3AYiPt+X24yUuFYIwvJLa8h0HWIjvvBKUEw2uV/xU3PzidKTgNKUIkETS+UW8Srn2GwiR155+aXuEzyH4tN1CYYJsaJsS5+EScv+yU5+ghCkS4biao3GSHSObGEVjpWHo1jS3Ulz0GZQ8qW8aqjZ1dm2nzyzxlJsn+YEmgkfhP7tz1H2PjB9JV/TeeoQTErB4acwUoPjpbl7p3cNWSmmcvsYEy4aneBf+nXxXjXGl0jV3GK0L4vPsZUHe7gAQt0KCoxU3KfFzGgAb5UQGU8aZvjJZ54NOHQVRPq/4NjlIuIs/TM8M8phKKve6z8XrDmzhOmtXTteww8liIsQQWtsWRwhQjYfljxa92sY9z2Kmn0XIACzyXdGI35t++cLdtBR2tv/EU4L/FmUJJYQX+NAeTt4i+FHmBdxe48tCU6CVGAucv/Cs3l3DXM2FUMNV6E3+sFIAHwQAxS1vVkHCusLQ2xKVDjmyt7QdBV9NJ6rKBLfN7/ABgH9j7py6Z4V1lp28jThWZ1evtjDgDyuVqRg9Itrnxfafb7vcP8GEEr0TYE7jmoWc1BpsAC2G20Hik8L4iaAhB8+Zs4USVPBVFLVkIVDg7wK201oP8FsWb+Rct0amh/8o2Tdk+85/jcSkOspaktfgWyII/XXPLnDmNfxQL2DKM3buNz0cY1vmk4YgrDaeJL7U4tbnHn+smMZtNGfCpA5zY4XWhMbJAyXbiNzCQByl4wFyP+z3nPNaCD7peAx4zNWM3RS4s3ub9sYFDRr2WK4FjA/BD1i826lKMIxIFV/1wnBKKsnZun7ukvmjDqGN6p9O1C5c9OIoS0CdZgERWrZUnAxbfOnDIudrx3pz7aJ4994hiss14hTFf/2WZF7Li5crNApl+8dUTYPk48anQ33hVz11rhqtcTyVKUBP+tdmnOAGAG3gJOZgZbxG8NKYP4qiDNH4UOuKVgLNMpVWHbmyEBQU5w1fOctv1T0LO3hHiqGRwn/WOgL7eB+mAnwsR5BHdDXdJ7jc2wMOY1v5HM9JRDWqeIIl2bKjrvRX6aKO6AhdoZkcp1R7bG6/YCb2ipc4SjN54p9Cl4G1VvLmR1M0u82rdWdoIzHxu2Iu6ssOheerRDuJ+lOLK3Bg20pu/AmiMtdKyvnUisVecYat9w4F3j+yCwXs/HvPysz2roTU2D6u6++Lm3ACax55P3VRPDhL4lZV4ihmV4CR1uXM0R/YnDrZ1QqDRmfUtoStJQ6KSxzl34Z3ezn5gdw37Mf1C9nRmUT4OKg56ERIL7YGmhXZ9mCWyxHQAwIDoBMplwOmu7CljOPtZc1Q26SHfuvwwNTBuSh/YtZV+kAfHUgsY/D8U25HrqliTODrGH/s8LjagktiKjLvHeipvJZtbx8NTERjPd2EwkDrIUCt/YUV2ZPhQkKZj17rcHwg/lS0I29J2dRsVcay2/e+cdFaAapl5LU89Aa9Rz+OooSyLXTE+mdrQaYqFAk4AqMydGh2poKNadJ15qddQO3tl2yXJyoGAbX5ZGYP3WzRQIZ7PHNwlkCm1zLtpRr2/N1KtStTT3b5Pq5AlPZjAYK0n+oWyIvfZC4QvgtRjN/xVwIuP3SC5qRn/sZVBHOdsKvt9f1uLmdtUxFJ9PQQI6xykZ2WwhSLQGibXXNm5+nYUUWGFVljzogtoOS2UZ1xJ6raiZFVkKyG1uyMZvyqJY2ANvDaHAgB+bxk+lQzsnrkq4OTfoYNIw9JuEfU4c8FBrxliG01cY4mCGEOmjeDiYxg6eWOgOaYhVJNS19vsgUAXADcW2Gsaf2ZgfC1w4B/IfZt4ev28AaxwhajhlQ/JroHMMMcRJgoxddL3/SyyWoNowUfpF/zoqFh2czDCdOYWXgK1JGnRB+ulk9TeCg5WYfCPBz1osSA/A7Kyzp8dARgecQcqAfBFRQeeKwc5hcM9E6LazArLmPwKE3JOIq0nqitX04r1kxMTsV+BuYtXcw3vsXsAqdy9/kYWYMbJbvxkomUc6+k13ATkwoS/j2+9fXsMmCSxFh9I/9nCZzNy9Mla8BxJPtN4/MbKHFc7ErpY8Psn/5fferViToA7XD6t7MDcdTfFOPpckaVAiBdoloM7oPjxhBqLx2kxRCRhT1fv3mMxxDdOYakds97zD6SWS3I5id4yFgquaUxZAIMaDqr68YCwdfgztkhC6hAAEf85OsYXSrhQGwxQhLGNL4LQvpP5rFWhvwk4L4oy0aeaLPhPIIDt9XfqWVB+3PVGV3exyPf/eIN7K9kd8NLtcRa+uqOF3GJQOf49LlTZCuqDqiHPYWwcg+WR+C8QcMy6YBGZ76Z5OEASULzLqlYXiXi6S5lZ+Lx2/TQEs39xQf9dqa3N0TFUTdy5/UHspgTGxTw5jNTCwWXbtoqnrN1DStGzzsHlXFdvU+tCgyoBjLMFlGZzYDgZXDNVeJw+zpGXQ4gOPrc9LkzT6p4z/nb6CZhJNS1htec9eSjuL/ujbzBYI2N/IE3lEwcIqm2dKxuyOvpb0b+dALdyxl+lySAi/0fFl+NFqRoeAkD0bOKPm066eXPJcJxELhygWpVIQJKAcQCkzPwmX8PLJjkBzQBS/Z+UrY+8+AW25Y1e/2HtobU+5WU+AZQtjMmgpVdis83HDdMBYZ71KEgjtTmEjQqEkAuPmYvqLDd4F4iyiKwbonUFYkztAR5iYG4Qbxm3RN6E6pyiijw9BIfOAKGr6oq9+x5cXUkVcJc+B4A8R1FRzE5xdMoHYUbRX4nRiWP3rlD03ZSNCG30iJ7X4ZaBiMhC7yw5dn0m42OHBjzb+/Wq6NxiLzoeUNwl150lGGKY0nuvtpgJeT6L741KtlsQUHWRtIBBoKVBsZmXdZnaZ96c9Nus00RSwQ6v3UPVotcmRn8GbZqskybPPI2WgJY7WhZBomnl2M5Ks6YITubiVQvDrFjIzhyWEYv9238rsgf0bYUutz4Z1oVgahtYI1KnV8kL0pmGh4Tn+TYSALA/qSGYI1fIO8CB4TjlvEREjow6jmmi7ThBhatvMB3ac9d5LWVQJdNEwR5Hx2ZH0VaOi/WMzFITjgGacUKSwmU1AwFdhZmvvqCiKqy4BBHVuvc1WVbo1Nfs0SBsRyqK2bDan1nPoG7g55S/YZTWeHE2y9ZP/X5WuyqtUPuuT3XxZlETQckHoq400Aca7NUn28ZKL2JyBrBtcY9dAXA3gpLcUgx/dlTr/HqSc4Dl3x3YnTnxQlPmMHazOQP/F5dXUy8FmAg4AIOrch5Q0Lrvah0N3llng6CF4H7r3YZR4yW7pkKf9Qiq5I52+VHzf+bN0EpvlsRyn8yAiTHvkJFkgZ0eCTYQll58mNDth8KaYeQ+Ve8P9BunVjKjWdRn9g0+Q2AF9cue9KmaSZ0CIJYWQuD+aa1irkC4dO35rjm49w9MuXfL/E1y12/1R6uUoDnN7WkZfFANfhd5EA+xgXNt3xsdhxv7I+HgXd33WKQx+CpJzAM7mbUKuI+z48Hg51GPQ6QD/eOC+J9g+SqUQKFgMK3oZ+yLQQ80BKdv3RakDvGsLRnlrBnidC7/ShUHoEAPmBiWn3o+sPvubJwQMDtgNeTajExMgGXd3Whh8G+5xgOCW5B/oBK9bPPoO2dYYFPAdgy/D28/QR22xmwQkMM4IzLbHVIp/p9aCL7OEDAa8uwC817jVHnjNqMgSD64F0t4gsCtFQNcro6Df8rvoFUx/9xcD6DVq4Cy0nig4FcYHGEInbvEfb9t6hwSUPh4KMgYvsSDX0ol39iQ1mHdMZ1bgEzMONCxYNyk6upgD731xgoBbKOuOuWM1VlCispSTiEcMwylbvMHuSQ7pS380Myxs9ec/Y+7WE6Y5WkZHDvT3wR09XfqOvWHWwdcxjHXK4nXowDwF4FsNgbdPkUE8QaQ8IGLgSjEjBucb30yYCa1kASwJG0DIal4HpDo89gT0h3QgO1BkDOylDEOYSWZwGTjjI1S49A+84rvOFNw3oYbF+FwI86NbAOVazw25PdEV5IPZtlPgbLmQC050FboELeD4DoVlG9GpBfJMUKDTDflT0xAxs8PGiKc92m21M2N1+yYLX3GlyLjdjO2npfisOxQXNmlmNWKeeul1i5c+tGZpzgm/bDpyx6nFy8/z6zny6dOZ3piUfxIVv1lfw7K8L7DFI/KA954TRWJJRoamE+igr29MFr8G2ftDi5kfI49uYKFEEisLDUnJGv70HvHYIW98cGZB3nS3Ao7UjJFTq+C4bgoqeptHcYxFjqS3b1HKqjwk6ufP0p0M6LyB0ZCcKyxgSoY42aTOAYku2VwFhR1DizW4bQRz51tyKFhW+u5qi34bWG30GWAs4ELXyHjwFC9qe78rBBHVAMcSmpSkCMxbBdr59hFVI7Aub+msrRLNhz3qkmXZrW+GF73uBuqkMAh74Zi+Qo7AbHYsiJMiiRomrIEEB3BDOYnNKvsFNG2Sga3E47hNhXeOybNMMAj8HSCed9YSr3/cKq6GE96VqrJSIjCKo8dFh/XouHN3klKPKALh+8D5yNDezjBYZpo4MwuqHWNBC6pnDJOLNDrZNiCUegsU9yUyspCGwH57vCRXSykoySS+XIhUBOI6gT+sVe6+wO3ghW6nx6PJ/bFLcVn+ENiK8m/9pLEVISjxB3E8uvPkTUNn2+XwdR54F0sATEKCXIUs2OxnSXgU4WrT7/DZaawyGIio6ihVQRsm2638I8Crar9r2DaukVqzzsk8KjqicOrpmz2Wr0jpV7yfyk5wj3mB+lpxxCBPvGtfAnyZpIF2lDfJx6JFevmxPJf4XbY26XUwJpXWzwOIA3cvYP9rbKQGK3p5oRieQxpoSJ7yvunTcg9aaIKu1ClyKtx0I1cqhvVaEaja06Yz8ZnOHEFGxuWW3BbBUK60eBFa/FzrL15suEQxrqubRM4QRwvwMZAaoW8ZV+Fs5pEogiGFJFkV2qjfmaQFvHV58c+M9KDdo2D/wmPo1fRYllLySq8MBDrh/84wmr/zDnCxG9cH0fJ3EuyhfqDZFSeCDELDjI5KwhYyYmFOCakaiDcOJJsEYu+NLs5iCYBmyL2H0XqnOTRVG4tPvPVQE+xUuCnD8RSjw24cybfONnzyyThjKX/kWhD4/sKlPQdnhmrh02o3ZYv2bwEP5X0LOkwWkPwoiigl6AP+GM74Sz0/nm7QMYw4F5a6YdvP1+qygu0hI4NjFSdEvYjbO/p6pflyIPXws1guZLmK75fTx6a8uZ5mjtBMIRWR9sAec6MCKSrdDWThq0OY5ig8AnRl0cqSelfTRsODc+cQ9+GmWWPRKj1MQ52gOf60YIi6mLKETHnz0hPcgqThX09Nx28T5/iIMlEOUlUCPS20FM3usOsye7uSx304WYguETyzYBTIMOlPkmzamH3ocTj80lMnUiwuQ7L3cTUP1tO1PUaJ5SjFZ5GXUg4YJrbNsRRnFRtdxtj4rh74asgFS2BBy422D/KRiTBKyXwxc7/Jg2RmIQ/JO++/wMaUO0kpGxWj+Rf5Mv85YTrlTccykmh98TENI33BTK8BLmYSy9xEwSXwQXWNPjPYF6PcUi9Cj1kQC3WPUb1dpO3iacbrHzFMyiZjHejcb6SQvx/nELJxpbCU33T0imEb/GwcJhanrtluxnfUPi+eKq4eVn97O5BTfI1x+FL3UIIA9jJUNc24VTpqLWeWkNSGB8/wBkxnnn/1DxLOxPpBX7ldrS5gPowJTfCzrjQdagF5pWcxLtqnZXIIUWwfvyn6+IzEC0rWfdGk7+LkO5DLi0DWu90ITRakWAmP8QLKkTb0VlWRGE+Mo7jaG2KqM7rD777IZqjPoRER1NhrJruOuff14cGOWilN+IBYdN4o1DLttRXelVVcwKEj2m+K9AfpjDtMMcrM9XRIAALKJG/Gth+URormGqCGo/Ud8xAC/B/67TWI3m8TKPGyZVWqFEtbWdi0HVNGb8io73onVuWkzZ5YRcAGDLYZmBw9zqJoC7k0ImZAQzo/J4AMgKXz7YuLY9DcIGcXwDntb38EJvq7u0RBUrqC7XrFqEDKOKDGtGAEjF+gRaNSNl2aHcX2Jj0+qxH4Wbf4LXlzlemMAEBnC3vya517NX/RoHZKuBgO688258ozYlBEeYdXJmV8PH4KVsngA9I11NS2lsr9z9q8Bu+d2PGkKMN4WV++CXhwesmSHgw/+eQENxStqViCWbjzzwxgZ64R+EiFa0aODZQzIrLkBhkdxz6a2RZmqdcGQSU7xVRTlYKFdXx1Aa0KhqOnte2EP9Qs4YPq/WvQ+N4qIhqjV290wN7LW3RhNQEBBujcFSBKiHymahqgwzN7dNAVnmnAASKwjp7ip7JZM73M7ApgdhYChKQPVR3aAnkZJhjWSS8Vi+KfeM9FgK+qZ/JeprmIROdPf4WEUgJ0sbKMJ6RgPiJyD6nSuYCtrwz16NPAbgCf51Uywr/vVggBm7E+6834EpNXWEhM+8/q+wknOWg2Ex1U7kTxoBdvkfBoNGErmFcH/mJI63+PZrDvzSIWSHS8VQd3j75LpWsqt++MrvUT4QcV8+BwbmZLZdv80towLQtBPmkG62r/c7im0uXIe0sQJt9cz2tEJv/wLAP3UBYLZudMfTBj/c7i8JwjqjpBv0EYjKyKAa9JNW4fjbwH8eA8Sx7f7dpkOijUj8i4x89OR2sHfcp8p2hC+yHd3PubXr8b3HcMGlQvZ7Jc887xzjv0RTt9U9ELjzodXXDbtEipUH3FodN7+wJwjlAHlR52C6dVpLKm4BRITTIDLld2XTNsDzKLryB7h4TFyZNsNAs0G9UMqeUi1GyrwKrEL6z7PImVobBuSKry+Xdmz+ekHUN4Jm2eotDAytVPOv3GG5CcW0mhCgc5KPfcyDZ+z+1G9pSkeo9k8J7o70j4lV2D1Cr0AvzBYM0BaSMNu5nKPvzajbnDkgWRtTP1VvDvCeze68YEd4u/fAr0Q82UTj+H18S0I73b5kV03wYVv6ZqBtoI9+0u5NvAg7dti2mEJPphKr9yoM1PG0s6/5ZyDRfuWFREXxXwlxom3QddVj44myXRnvDxGwnaD+lq8PH6TzBOKnS6HjGmtWBDhVHJYbRUhbZUY/idsZagYVqyzkAkO5mFkEBsaMjxWpWHnTz43v40OzkyBa55s6WudPZJGfEueQhEBMn9hYvtAWys8+LDYfyF0JIpB3TzKR9YCIvkE4vqevHXtgGmWt6Gp+WDIbNIjfFAThsoYPD3qWCc8LVUogJti3L+mwkB7owDhpEfQc+nnOGLo+6Ko9OkaZ4w/xerffPEU8EjEp4Tt7lSh6DVkl8u5M/kXSilzFwFV1OXOru0B6L0BMd31isosnfZo3iRYsfgMLHTwNE1P10Eve5zqajkxh2foKWawYaTrZUH4C1U1GJvJ/VRck814mKG6ul+cJO0LOM4fRMOkOLGlyCkhmYLA3Pm5hOtCLajBGNexoEe7odj0iyHy5q0CMzfzHyDF8moXJHU5D9swE6EgZwGBMzsSN4MdDnvbPYyulWWS4znMnuGu6g0bijKb+A/M6N8I9DANoSqPDNcJDKnf+9Nlx4eY9XofjgSqHJJzRX6chdvv226Ntja85JgpfG2ek4yfuN0T2vZHbJWCr/I9sEiomZ5NfC4rP1WNRDxmYYryj3w+LErXojF88mx4TqGZ5PouXHcK0uDEZGXxQWWBZnWmfFp5QpUSI0qsMK6tb0P9UI8UPjvqsdTvZOvl99eQxoQdKcoQH/m+HsQ7mCyssIkRFikfL9Er0qfJYNb8HGCL7BRDuV4NVBbVkzTISn7da9mlLoUIKB8EQVxZTNhx+lOPXZi3V3cUJlL3cv0FoXtoXpAxU7RTRNMphzGP+s4ljM/+2j4XQOyRTxidGk1FbQDwfTnSm9QUR6TYVr02vEo8pksHhQyJpJzL/7vnxOvbEuDnRLYJrkMhU/VZpNp0uxmp7RkPxO2w5b05czQTH/YIPGJIX5vVGOx0CjMStYKu/EfSV9s1hokfKn8nu3nHdGBzsGB7WC0m+bDfxavp9YPAPYCng4xwJ5DuDWlHqGD8tIOPeeDBiJ+5lmdFFC/lRN33//l9tLN/FHBSsSxzWTUIJlOo1iX2m7Zi0o+5Of9IANuTcD4ppQC+yTx2NVDEuKFYYIfUAX7c0V9kue8n0Uu0w7GAJcAK1yMZyyNZj8wOxPNPcf/OpKlyhkA2f8Mp5/0WkzBUYQSrt+Vyc8CF9TcV3Hgy/WixHkLN5yIasT1y4cGgog4qtkBybK5sGEbZlt3Qks0taHm7oFBETMQge1eKCfgE/PPtaYWhSSSjYaxcV0EN0wpdoxzEdQ7KRNnRHwfHDr06NoIKhjBgPII7PlDCEsLu360r1jt7asnogfc601F+WH+viMUABtaRPzMyX5oaKECCSgoYrEdts72932AN7GxmmmIUYCU5C//34RHH/uhcvgYuldP9RKuOMA4VJYbVOsgKVdhukO4x1iienwN5ld6bt/qTBuHu2TnD975btgweHRbuo7GPpBerMfDwzJL1W09jn6cQvNXqhbO7qn2cpK/DQ6xEVcnKEP+8LfPtoW7uNEXEtJLUvzmmDw05SrgXv6amXsNhXtlwZzqdP6EdVa1sKGtgE5dGvg0PEyG3GEGQ3S+HsdD9q6bbFMte/Ta4MIOf/ArbqG+G4lFYHM3SM0XBBPhSNNiU9r3bytc4p2SWp0DGgRxaGDozUQOc5KoreF+GLMwUBgzb3UrTj9pCSQX8okLoVk0ohlj48ZO8HdWZDDPOosOyg+kM/4XRKdzNDLSqY3qITD9AvFyEogRNsba+kLXWT0fVLpFEyOO60FTa72nDrRh7hVljDdEAMgkVakZKIY3VcjGs0BW9Ar9a6kZJP53/WwQAijYCPqwgOTHj7riwmZ4ExAwniiD/3WtiaqmCGGx+9/yrrkXR3wpir2HBSz97lz6bNzxa+rfhztSU10XAj9ylp58OLDMz6klnSP27hXhHIvcPHkY7GQNA7xUrdMPalSCaeBYYYTsiJCIRzNpXpmbiJ8Grc1uMITR4Rl/jYWP4sm1/SppdDQohMEzeInCn8OORz0Y3ej/U2eg78T/8jrA1rto2uEH+ebsCF3EDAoL3joaoQWDFcAhWQ+1T15b8iOcnrsWEMT9UBljYkNu6qZFG8WT07kOv8xiYWJ0HAzustcZJ6J9FrrvIzk0yUF4YIcGCBoU35XhvzdSnPR2/E5cNXOjBo7o6NuebuaFK/E79DwqpZcPgsE7HhhckRdOxBrlBgcrpG8v+5iC2HAYrt+CYSBiWOy+YpOeYqQG9fYa/l8g4FwWGkIwI9vujAFeaCl0rtIdyud++INDim4UNkAu85E/5yY+svunhvg8b1p7Nb0JrHXyXGTDGixWJLXxUmerRQoPiDngTk8NgTRgmCB8sxYY2VGcgkW3YEMwNzc3nt4SIYaPuJb/IveXSaSBsuAe5tTvBDZB+4BEG2zGFrvbbx9a78cES2heeNp7dc2CUUVITFnkh4AvTcLEehZNZEob77PrzKycNxMdHZq0DE/Dg9ySlOdfw8tugSaIsL5R00l0NIn1HlX6YMzteX97A787J95mY1WvmsHVlQ8pRgaoZdlGUJvTMtxHHEOLCBl8xWVe2kHyhLqDAOKbuYorxQnoO3hee9EXKy6lpnMlclaaK0NtkDGBCh07BKGTWEhJpSCIetSL2c/+B0wIQcJ/I6hcuswMAY2ioYTh15nt8k612kNFvRsYUdyJMU8mwFBH+8364xwhsWsRFRTAMTcnMdj2SwY59uBUXtPxuJfgTE07LnSCFsFzDIP8bgHvo3gHHThREdiurfKaakpz8npaqguvsD23G+tSirlUoi9n+qJeNf8IIUihEnmUl1Uc8LwLvA2UivnBbM8LGZm+wXX0coIpCfRrpXuYqghZbnWjzGWIPPUCa37qDyISkX0+cR7y+58WIz0DLxl6JVqgwQxqQTkerXyApCsg7vMEAT0OJoKInBlPCCvY+vNeeNWjjiwlkFOyoXtLz08Fv73E/Rb2N0lrR+dEAVNBnU49gbaiBkWDLofjRoimsOEYVAypVdJ9qQTQKKC+ZO/4YuU1WFoAB5zV7CKSlZIr0HKJAxd2L", + "args": "{\"SectorNumbers\": [91931, 1, 245, 1, 76, 1, 22, 1, 15, 1, 16, 1, 6, 1, 3, 3, 5, 1, 19, 1, 7, 1, 34, 1, 20, 2, 15, 2, 3, 1, 5, 1, 6, 1, 1, 1, 8, 1, 23, 1, 6, 2, 1, 1, 24, 1, 23, 1, 2, 1, 31, 1, 32, 1], \"AggregateProof\": \"2FjDS14gqfniry3yAjVvS5wbOl7agWjGIGGsQKMY4Vbzp8fS4pHt7mT5qtxJXGoUeAcBba5CLNcfRcGX1BfvOBxgiJZqrAtjLrjNbWWaTW1x/6oqRmOzjHoENpYtjE0LUdm4eEk63tEAybIqZq2BJarYiuAIkPoueZ7ToLTN1k1IpLT6QU/dxP7EVwB9ZYAQt+oY2uiwZtSPHfMQC+KfISHlnyG6yOelQLhWdJE2FT9DrGT+VSWBlx0Uo1vmYcgFg+hKCI7qm9Rl0Z5xbGYK3FsqAMnOMXUvoUrWsoyAmdG1z1hHDGfZzgp/JBMXaP0OE9NbgF34w/TIqSJMuOnr4uRRMBJaBXgEPuVxeAzTgkqPyzowD9JT1MfcV05dUY0T6/UshrnEoooI0QE2JHX5tDNX65YmlpZXccpK2C/Qu43jA+udbNG525QKsiku3OcT4CHgBt4gXzwVCNrFFXUNDtb9N14BDvGnvHd9tq3NF9o+i9meXErmGP0oSByFopwL33Oi6b8Ji4IW0dLdWc0DpI89tdjv9LX5KdM0bl8HAOtuWQ1IrAHpl/Jf+aMKBu4PK6oLTZ7ikOdKE1DSzwcY6/1S0zB/p/tJRmFb/3pTsq1r3X7HDATE4or2pZ+7ZK8VxfnUjnnWLMXcH4Nc2Uljma3aw8giY2y29GJHQYbka54ViSyDPwXkhzh5slfJlxUWYb/GYv5mGCRool9s5oJDeA5LlA4lR0iCBWKDi2/D3tqO3kmVHOkjnRo4RlZ60xYR6MDSWbOYMdp561b1mDp3yMu4/fLsGxqWPMtFfVlWB3CVV9zXunvU+r6wn+bF0iIVmmPd3+aLCfZ+8F6rMHjRStG9goAE3H5X/pZLRBTNEkdP3Cm9NxPaGDEzVNCRnXgLLWl66gU0ihn8gsolpQDjYS+1x3CYBUIM1C1xoN2XtFO9pIsz0D0vTHYwfMGkFdUGP7rJHAsMNlDo+t2mUoNhQLKLdi4HkIt043tMoRTMv/bNAEJXdYWqQgbKCNxcbmYR9EMSM8VioSWftNKCWeAeIYwKWUAIQlLoXptwTUK0JPk6oW09YcH2XfWGsTZgNq4SugkM2uQ/Latf1zVADVdCZNK57KQpH6J1SfL/5aJaVxXy4xJSZjm0jUdkMe/vAxoQV6ScqeI7FclHIOl9cCmuV9VGeDwqoqm0dS5g/ccen97DZfOA/s4Yeje4ZmRh788Q5e+cRwxwFXbYMYN7uee6UFHlZ7gSVJT1lcMzfAgMrs7OchGs8YTn02BlWpVwBTEQzq6wlN32DqhdPBB1CpPmrBegnLhhmhJ6zjPwYfUWsZzxjA5hD8WzsUncrsSQVtUMNHg+7uI+fNR9GG49VJK+rP6IF6yW4KnNEvOaKKg/MkEiY0+UpqDsNJyrWXcwIQ4XzIR6mm+oXWMEdQGytF6zTaCA4ZExMOMWGCkYIF3qJOEma+Jk4krPobw5Nj4fCcYKVHQhKE8gfuBxodaWFa2lh3Y8mjB0K+tzgx4/J6ybyoLjkvkj4Qzd28I9Aa3RvXQCFb0fQ6oYpaHz4NSMiUu6A++gHJl/tTMxI6IOIYbSx109rQ+AiRJqPMLs0m4b9XMGTMcuBrW1CbrcTKpJdKISlB3yDMR24iTtrg/DYx8Z/xxrJPbw53Fqrr1pXiqlU00UiZFrHUSX5GlZ6iRqS67pqQqzJUebwASQerd3L1zCzrWoZDyJT3+UDHLEZKOnvUsJM8xBQJp6OExJ/gsMH5/hRpqbUBnXTCpZRBC44iSKVhVnUXnjxmf61z2TF/mFExYC+qCTSep00ChJxYTEsGsLoK82oPJGcTS/GObTdZ8dPUJNwb4QGsBjSsJX5Qj/0XURA/xdyJ3sw81zV2TS6W4HY1PUf/NuWs7TCPsWAyAKHi/t0o80jK4oq3dpq8ZelBYZiZPYO15eC54IXZ/i8JmOWsRwTVh6x/3SN7CJEEW39s7v35z9fVnwbK4tbFHk+BNbAAIAAElUMot/O8xabfrWM81PiaeC8kKHgWcAYGJInPowRKSUKuP1zzrZdoxPp89s+Go2FIEw7yFM7t167IIKt3pbR8BULFnpAoubp+/8MpnwmOBICGldF1ZllFepII2IBwqGCnVMrbn7XnCrQmiTaY0uWNWenroXSRRANVFAfEf1VLiaSvFlr0wi7ecS0ZFei7BnBeEwLnzpiKnzvT0YJ5xxDKcQqEAfO/WqOTUZ1bk0eDn9DHhnjxvJN1idb5VouVyGDH2lOjMJPLzIOT571YioCBhx9Ub1axVF+DWU5e9VFQuZ+hkxb2mOUFuaIEgpEcxJF/TjeT2+gPHHrt4NRQDOIknQrxy+riaHKa4Ou3RjI0o3bVEjoB0TOeXlZ3Sr3XdjF0+ErRuJyGCO2I87psxck7ed9gEEGADBlkgCSNcno+WSCfcSmWxEdi2Ba40GKITkA4nJQFqxQTOpeo2rY/mTYco75o/X1QhZejSHRe1kKjrk9fipiHzCWXum1srURRT7BaSupsBwyL7W1nO4L73KWJWhBblNv+0orhmCY8yBJ+9dSwe5gfeQAx6zI5LO53MHB7qjPCDsygAJY8PoklY214bmr1FAmOyvyeu9r2nzv0ZuU2thsKkVW0cKxwiuS/Y9GLMJIkDhg/dSBildXIotacAfW3la4ePH+lVGP49m0kDxMrRd5Q6idIUtRb6zN6cjDD/ph9+Rj1FhOH13SyFUMyhbaK2W+U9MWI4nwMat526nCeJE4d9hDWslHCKrBsobExjJp9vktU34AabQhqnHCtwLlrefvqjc4b6y6uyIiGnE4fK68q/cVx2XFRCaqq2lEwtOM9+5bpX7WIlXc4nP1ibSakkdMiOoqX0YVqnqGQsyrWASACC5oLdYnA0us73HD+nO/sFnBIGQO7sqBCQVVsw1nTwHTTMyJMRwVPAdFaBF7+EJDFoEoQvGf6VxMZrsGZUi/DrzEd/Uq0DJ54c5htkPnhCXFxQOin89bq9kelv4vlMxCzzutqrVEP1+0gvBBrJl5iLwVTO6MtMQmNtxwpl16swxvTSbqhS87cHR9zcCMolBqxmHA11gI6nDDo43D5kuHrF/j7NNyy2wvbAQvInjsMLC2QUV2udaStfwkY43/Huo0RMzXw5p9iG8p83TFOweIal9FxvKeIK+0CuV41rfJRcjkCFjEuavA6KyEHTJKwde7/7LVuelnT3asqQFFFKFeueRhMja91FAp6tBpRyBGwJshJGm88djw+0er316OdXExMsSJ9/jnS84e9CoGT1lr7NPu5k8wN8JtYnq115Ot64E0pGhKpFQKKphXyFBktgWEhwbdOv4yK66biQtEQ5x/VDQHndxMgQLY1uDKuQwf/bpFk140+mxAX4Aidz1eRGqf3bnMNKd2G8E2DbxAeDk24+ebk8n1c/BuwCsql3OnpyG+b3S/MTvaXRADVyVAgP27fG4hBhcIVOMvSO3F42Yr74h7Ttw08UyJF5RewEiMqiDY6QESH9i0tPUcVwvkjFRagG8UlkKK7+haYMEB7KarFZloa4HztMzeM4I9dBzXHDSRZkwYIel2K0mel9bg4bcMBVQgk3Fc/ioUakCCjRIvMqLfwwZHzqrLiX7hqLk60ssaIzMSeLqF13Jmk3I08LkTmBcNogARRaCCxkeCvu1Brwitujqp1nE42RkjT4eMpWBcnNy7j6WdObzzdrppe1zknTi+SgXy7WFSIFUCuHV0+GxRdVM8ZN/xwsGhl9pn33reF+SjS+Ic76U5yJu41rCVZB3y/V4SODAT4b3EO9OOxNr5abvllzYuV6t/OqDbwPZfDkrbg4EPO8nJfT/t9vuo0cQbdytsILmkXu2EF6lfWukaRmbMrZyzOeBxs+2/Ra1xPXiIcxOfDgQ61z3Xtu1BXCi49uCmL6KmSW2ExC7ndKgAoA/NtDHzd13eiAky3G4H3lzStNtNQgTBCJd/x0v1w0YdpdqGLveE2ADCfX3MaDTHZ42fXll7BmA5fB2oUcqzcYjpY35hDYcWZ142alov5k9fQ1Z8YYNe82BD9vqWNWkdrYiN1xVhV3+PDjcM/9Ky1ezJWm00oLb0Pgo442woGKMgwXgyC8cGcCoCXcsyu5d7vjaEbAy2o/pL9x626gtu4QSXQjDPT1rcB7t2BqriSxhAi+EmfVE6RxJEw+THcAOh+tie79CW62qJR70v/jenEHe5JudmDAm7OXi0nd4BZM29T8olsMAViExFToGim/XPX+2ol9KBiXsK3U0pRdDoQB3HwKkPkc+guSigt+lwp0ZpyYxlUG/Dg6EBIx+N64xwDDs5Xys2Q4Pb6t4TX5+VAQI/ehWyt0sSgnM6lfopu9XpQugWxIQYGJNAWS6TN7Od84ZI509sC8vnKr9x5C6ewKV2ZzZlVcXdDaBC56l0F+BW01p8OST2SBeCQERWG2zz5N2asewLqh79XOnC4g2JOnT3pylSs3BeOIjA8z/YowyyCTHDwFLRgKIELRkX2q+XrFXK2QlyHOkhVMMs9AWQTakr9zrtyGNJQ8Fra8N7V5FKG91gG+zcCpYCM0+HBSDJ7VAAKR6uzIeNU2GLcOliEwLe/Tqo7Y2zHXOPbd2q3UVNlAi2AYzL9qCGKIFhecVAlxzfTMf8ACBtu9SgP72DB2z9oaokMZI6S0gAf277cc9Wpnm+ILeP1RvCxOQGCpB4Zca924cx+EYxNWR3MxtKniTUjOOp2gWvgJuU6a6d/vJ16clOPHFdQqzE4y4wR6lovbEIEZFn9Gflpr/QUHtphPUa+jSOlRC5Mn+5/+6//4S8oH/eFg3Bk4sDoJCcNWdUFeu87FUUVZghoEM8X53EY6/ra0R+ch5wb1MN1OG5HpE+QP8kwUnJ4pjCJ+gZJzwsGw+m8cbcX1cw45SYZZJ15peyKgU4r7v0Qh3QKPwlYQ1Q4UEcWZ000IXEnw9684M0ywlPZTzeIPt19mswIuErjwvEhjuaMLSOJbq8ovBFTzN7FjyalGOwMpWC0TATB//sSbkHJHCC6LhL+8/gMetUFJTWseG/eJ46E9kKc5m2C6NGkTNIzEc3tHVA2C0QeAHkCqD92VDtf3VJo8E1ZmetTVIg9uVP6VBTqWVBNQseKjyG+0KDOSfLtWaAVd3/B0ls+Kl7R2d06+oUk4Cau3Pfmam7TCBZ5eNlluQKk+5PjYYrpHFTbkCZfc1CaEH/ajdb5ARB9yWkJVTLozGFb+OaWhy/NIWOrfXEME2J+pKC5fUOFo+7RHIn4/bBsy81lSYKvfQBijdtq/RYDhP+N73QwXEqGP/PMit1/2ZzsoyyJVQTes5QWCv4ryKCEkl5f1mGA7gxI6QdplO8EhcDFq3h74MDQN2aqMCKCElpdOi5JLMMPPuU5o5OomYAuNSeVdcv1Fv2+NUBj56+nBKnqM6Qd79RaCQ6bjE2spDQGry83AJzohoxQ7K+8+pE+pCnf0tVNebtAE5X0Bsru/55Fia9nCaWaPhxQcpSnJdo5NS9rw0yrcwB2xUKuQ7DNqjML4kmDptbCbx5Z6W1uYraaJPq8lapJ1FUIxTUBl1bMFFB1y4bURkw/gwBGoXC+Ht3+O0bSEARYbdGUAmehl5QVbJaqBKGwKMs+SJ/WnBA4Cnf4xnmyMCQkLMxqu5E/xqk3uPKXJltoUJFLW5Xk2p59zxIsvTuDB9B/QrqSONCs+Twnk9gpFsnrbFQQobC9QpOVlpuW0j6hykXXtIEAp2TLh4nqaZFl+JYXGkR8tjZhYpMyFUk4qRQQvXLZLGEdFZSF007FYl/tj8tjtg8rqy19FuoZiJPlw99xwWTikTpRI5KD5oOtPO956m1UlfC88cdurMBeR72fWfXZ6HjVVoSBMm+InoYj1VRBVcGphewbaLWfCxNVnrrU0DwJcUFHqEh5G22QAGdltv7a798Agsn5GTqqkmcJyTGGpQS0+/WYAa+WV+5c6wMgRQcgRwBUnQXwDqWQPGOwwIPkLQsVKXnt0izzog171IRN2QTVqlyc+Dc0wvFOlAHcOkkYsnBFirKN5rudM5hTC+kL3eGl97gkAzXfzL06S54/462SJeuwyXAoNC4q2d8Rp2YZUrE90hW3JDqmsEOACvtdg9ihOQ82iRYca0GdO4/BHoYKV3sFrokWi1WG1r9yqdHz/BBBekGrDHWuR6Msd5zZkPkyFdiT0yjrAKv0Cz/sha41Af3q7QNdTk1LlT1tY0AA5aEjLvWaPB2tx1+cupEyWXOYSo309xugAuk4Rk0oKJs5GbeDaPa++LDFad+2OmoEHgGXaj8CwOUcjzKiBTkjbYCYf2GUczA+rbsUjP3gur+YSnH0kXOENtFbqShVFtedyLEb0LlSRSpyD5X/JoCHIp4oy0O/9RWcXJH95C/QzCLyBhT7GMu4OoK7qHyq30tKQdC47RcvXcYLH8/uoiF/E6vbuQ2iYFwc1FpF0gyj9t5Un5TyClnxy7HANuHqLpBIg6CHBb/4uV9BbZ+wxBPRCBLkvSZqaadil5+aIipLIBymPwUMWUp9pqqHxDhY0SwzCMALWcXbQBkIT2V8GsO3DhRjAgFPsTZ8iVBctX79/q9GDtNSph+x8CZaaCWGB7ADgSEQhyOLMbLdh1Yven4Vy2LSQ1OJy5lkyxrLPy05G8fsI41+KyjCzaGlF6056+oiaeA94LV9hF45kVf1YiLxk3+2SNR5vZ4ED9RZYjad2IfVWMPz7ICpcJupa6kWVs3+SnEWyGtPNCHjyv66uL9UFDjZyG7RswvZc9HgQMrf+6NQRF4z05DeEmw27D/nAnrk03ELpKB8TgJSBvxgQkMfgWhJdJYhP65fqFqB1yFAOpJ1zZmqtljGAsyqAJI+ah7GgOD70Z/ketrggjZsRyyM1w0Gu5lysK9gjF3ciiA+zx4OMIwm0ReYRgMVOEoA6EOQdmB9NWp7tTI5Xsv+vixGkUP//KrCixwOHHdi6L5Yltpr6geMRoOsuGkFqtuLx5jbjtAUIEUyuLcMggBKSQFNpvPdhFg9CUgXZZrBgeiWs3PsUSGcFRcrMsPSTIHyi6K8jEESVl9PuKNOaY3VwjrFpnfWDYg3456l/eLW77+pXLWziYe7V7DG5LTMkbJxMLDRd7D8QVZ05Mff6LlPFWi9B+5kD3ej/u0AjjnodKnQFWQkiJhk39AmVm2eF0Srz7c6tSBJZcfMvD6/3IdPB6jSk3Bp4UwvIs2tAuSJWTMY/Jf2nsB5SpRfnnPzCx97zVAC0AAGy32LHoM9S7aSFUDJQdrOweVn61/cG1nhtarF2sTRH5Z71U+1HLMjijBi953TODDNh+Nxt61GIzByq7+OodmxOxVTem0oD1GwLa/CdKUxwrGkn8rc/uWyG1295Yg+OyBO2s7aT16BUvAOcxaamfu4hhSRDE091+dc/ZB/sMeLoJb4T2eDsVBLpyw1JMm8L9CIyZHvXE6WENgY3cb6MiS5IcjFfgUWE+fPmJkvIBVGQaSqddV/p9d444UmxnVyqqBE25hW7DauPYTeaWcWRSApH3lfBkEQ+190qVosi8Y8ne5wR3aPSLgplySDVvDi/JCqAAo+3k3tdTjqO5VRTAYwAzRr+Fz+BJe5dEAsTwJq8OzqUNlW5o3zf6OFm1mH3oAUUhGegDRHO56EyAAMP9szuaxp1YE6W5DGXn70K50sTxfH+kOdjOWoiKqXytGKlTBfPT9ooiQ7mBpE3yrVDAOH4uMKzBhf9VnfKDMElBhHSReH7VTg/BV9r8CoQqa0+mCPJIhFZW4MKnA4UIpDsH01VygtrcquHbfuQ0X9Y7UZxRUZGYVeuvWFIzX1kDZSFPCu4UzbaKW+jx0M6pJYSwmcdcLcZI2yyrCee87r4byMuKisEb+Ld9OdbvYoMvqeqcATLZD6kl8qINE9BolLr2O4WfagllPKQ6phk+SjFQSoyn6wZEM6FwjaOnsBxcV/TWCFH6yBQf5Kxsu2MATap3TWLfGdtGjGEb2tk1pHl8Vw/p3yQvHbWv73UqjWzY4bvPGO8p0E3kX90l+GQ8qmCFaRiPs8fLFPgs1SlHpyXfakkzOdhf01B8VBLyZb2sKltACMXPFCUxQNp0N+sYOhw+87klDu90sMfcsFtRLpudTQI/DtJCleggxzFxGrcuH4QrDmWl5/GFniGOgU02pvBdesxo4B5LqGn3ZaqJAtpEOzfa/V6ys6/NCO5X5pwdMY4yFpJnd9drMe22JaKfuUeVHLIRGQv7//0fchlO6iatADnnmN8BRAum/ARF3hsaw0ARCoj0V7W6q2MXlMEUrxOWjmTG2CKNAprZ2DSqJNrH8IaT20btaftWNo+D4x8NTtn1CoyuXsX68qI+fcHmD9zbmz9891y31YhMJS+FXDQiWujYfCCVq6+ta4McGlbo3gxbECqzEaEmdXbnos3XUchJ+ARQ7UnVqVTrwdZyRtTJZAt4/IgyNM+lYN9Fhx4aaR2FBiq2oBOaatkj1mc7sjhweU3tciImjdhScy20hlPEVRcDBAjSlFwpl1COaLf5AHo7CCl+UcOQehDQwFoEj75fleKdzCYk7QZ7QkhQVZnwU5tDIcDMsG1IttZBugXL+w2tCUsSWkbNw/YhAyiW89UTUp6P05xp4ZB5Rw8PaQnDcb18Xt16YMg4br+ydJpRQ/m/Gfsm67FYdk130+JYKh6qDl7smo2zBf7/UONckpLmdkd/sb/PXUWODEYiVtileZj0Akq6R7uIeRm/4hlBWarTi73tYpGlUyJUlrtc3pqyH1k6jLkoWCr4qoOrZB+/vg0lBeFEBzx90P4T0RHcKzlBlqVuqiXSFs5SJlpEooMAI5eIEGBBltV/Kl29bDPhFqMxAWEg2oEpaHWKIGJbw9YSUFjxAho7DaVysSLb4Nv806x/+xlq88R2ziN3PNg0At1BECs7H5z+xcUohCXGmeOLgufxuZdwnV2kZ4gRVJa/NDdy+TmybSteGVJEty8ROG1iFRGPsxE2DFWgojdz7NuvGOvUXbcKUF+ZSMeary6FATuNEQTyfv80FAiznA/W5AATCygzcfagphR3Tf4xNRJqDtVlVIE0/YzxowDQXE+VYGiF7/CE/Z0+cCyl3PZvT1GGAHTJWjPKrvKtz9+kSJtj48WXC1dy+p6ojElLQXg0wpXZA2Kll52pixFhCYy76z96DROxzZS4F6mFP4vZDu/J8Azd73Q0gpG7tvXTnuNZC7IwQTPUVXj7IGf4sq+pRU9uE++uSajfActnls9cD3KHjqrIZurWWW4hW9WEXbW9HGY+r6yuDn9YCs6N8NxXN3mvB/c3BHdJ7gDwOgBJNa0mwFOScHVUD6xorYHmAIzQzAkVBHzfQHJsdgSlPq8cd9lPAiz4YBbthRK/TaA+fcPoRyS6O1KpFsfbaq8+bWEzjH4fQb+v5QICtncYCiALjKTqGLWT1fy977i6aLORCIP9zXy75qLLwfP1iMbT3o1UvDXtmkkA96Dxrj13ypDjwNAWC8nKXpktg56y8M8ZTESVYe4u5BupKkhZNpvLk7Q49gAQ86q6EHo4CysIPHm+kucJARXL3leAQ4NpRktwHSI87BWPWJVsleKjUfUqW/gWzzkPB4ra5TCsekGGZtHMTj0ZBw2wtK6cMDyPDb83MhjX2oPajy0oVQDiNNZ3vZ9aYVIecVaDe94+cfcmECL8sV2lGPolNgqqexZp01yisQwh/iFSXyBMhGMNFRMJtrTr/VCAwxHyJWqDE+bBbhVtHoLGE+cIm7H5oiB4tk0KqvLvsTx1sTPnUxmEVUIdEnnKSkFtHbcCAQ53w+tZy7F1JG8oEuR5ttno3/x/AlU3sDaNp5jQi7iriyyHLPXjPNiu0AiJN6G8mcsg7wQHXxrBOnoWDVBkFjsMQps++bnq8YqkfKMWV3URmCzj9DYBsH8jPoXkfMLWsANBM8rBq17uRciBBH+xev/vJN4VgJ4RHNZW2msYV/dpmy3FjhMPohMEYpnVhWle6yAr02GuJuQUiTTdB8nFPQtjkuvnMl+lRES4fBh/V0GEZvpKGjQo5JkHEpuDKimdtGWkcsPZ5iLAUdCjBI5m70BAtvrUfMy02M7sBvp/Mlbe8CtCXoZYx84byTHWkXy+YVeYF0RFDpL5hL1cEyuqb0bizMa8qwXcGBRR75noECx8Vpczn/Y/sKfuubfZ/MuJZNOiD+c1Vpe2jFqLFQF8WVWOME5VR38M9YGJSc9pRN7EmPLMIbD0BOIV4wbO+lB7CechrNKyAjS+A+3vBosTAL/MKy4sK79P2Lp96aRR+Sit+wIFJrU9sl2a06OUYJ+gxI3SW22D46vXGIo4EplSV/948JWYFUONAxmtVihoHFW86AfbWR/3d+pt0lJnsOHiF90lfjsjN136rP3pEpUM0F8NMmlY8ry6rVFzlP/7w7Mv4n9iUSjgjOE/VZRnbQFuk3lcSYl0VXc5aPrGBeWUfu4CBN1ZWahC0hzNWk+D43z8xyldkVcvMfGYOjwOGU3ID6gFJlnYs1n07MsyDu79uneDxku7Ezg8TisBxkM3JRH/W39ICAl0/te1ctwteDnaGLyVnGIeZMxArnEGCd/U+VMKE7HqsLF8gjTxx8r5wapp2Kxg3DxxzkuOog+ktfKLWaJr/7NUCVPII9wgBNDVxFMu89X4EgDgz44QZ2oow5Y1QUc6muEqA9szmngrbSCGSBp4vcMIRVRW5nURDlBDfSkVmt9B5O8Mbc63xRaR8J0ApbI11MzjnVrab7/s4fqs1KLwP3bARdkAsIgLC9ft+SZFYut8bTfBNA14mcl1Yl+hsJ73UpE6+q5KyPyGRVVqCqZXLGNJfslNSlwnCTD9Hn3CB++myqdWZEEpis2Ch3yWIoq7NI75yHrObkKR0aLK2Wln6qZsuP4P0g1wEPih+mvX9+KvRXr7TTaTU5ANRMIQ8UF9wsAvxN3IgJkswegrFtCUB/yQrIgePKpsBvkMNDm7mj8TeiOvbNFZORy7sarVU8Q1M2PRbtTWrRnoKW/CxcjuPhw3Y0MjS5y2E5UxTumx5utQzPwAbmyga/JZd06hNWtmKsLmqUN5pxDm5W+grBMmGIEHmSA7x9+6FinIleOKQvsHmxmzIzBvSm+Vvvk+QPtWN3NFvmjFxMFHGxUG9FvpKzJhnRCPEDpTB4deAcAT0yRHdOTLE/oFAeSGITaoFyNwW0Q4eNM+MBsue3fi776url+jd2Nyb/iBAvhjmWNiLJKGAG3xex8Puud1tNXks3g/+u5D+S9R8c4kvStAWUQYCTB62ZxsKJodAVApDPw/xJBNndHhO0FvXnWWaj9toQb0rvIY7wn1DLNDsgPBERVF9ex8e0PRPgwyB+gyDZZ/Bh6qC/U6mgAjL3fEhzcMpwTQeZOdWnl4pKkuEp5ncN4qupM5mwYLRsCHCbSGhoRZUriFnsY9SBtJs+Yh3QhPAt6QvkO/GgE5QQTLgtE4tmmQT9dCicPKbSYYC68LfZwXCbVdN0UdfyuagHl8Fi9nhisvAjJZdyjN9TJZId0zAR3kUSfENHkFgF1lB9VsgEvQNHPFsAVUFtFQj2akCUie64SP4qDFfcrDpqsWaaidJ5jLTwkUTwDUIPM+CESVDXRRRVe72ma7TP00ci9oWRlV36widl+eT7/YIVcJx8ddWrOX3Y3OszLR6PzUAejU67kEi3iN1NWQzdTBqlFURHGYWxKWp3TKoIDckabioexk6zmfnJWGuxG6wxAvFcaGYwKQ/dNIlJoR0sS0/UxZy7/MTlH0yFV/Y7WUGUw8Cypf265gluHDa1ffWV9lBE5GOQes4/h+F99bKxcmI/aaTLl9p66whsvImElsKybM2AGuCEedsaHILOcwUEuqFzGr4PBC3asss4hz2neXb/LpL7iv2cSXdNFn6ImF47xxIl8sRk7e3AXyJyXbRFiUDMYiOiOR/oEFGBZYgLyTMJimRm2cMYu0D3Bo/scYxiTjPNkqbNPNMq9VnmW/Nlw/GCadp7kxZDeb2e5P8EnXnoatMT+pAry7forsefk5UZVNMlVPE7YqcsVSM/AyAvTvFq9DXsX+6NBG47NR7zBFBCTDlJ937jS1oNeO9yD2QKSzxDWxiNpe6BNddHhDk/+GFL2MZTdvvN5l6nz5JqSkXdSCHMTT8AkJCQwtcsezTblgkSmKQbDiVRngok53n6IbEAv4mRuwwDK9PWM+/e0gmlqwHXao8BXjGtlrUuvAb+EzWFj9CGdLbxv6mXPKv6mFGR5J2TZNcTzfMPFBWVmweRxhoCvsTkT44zr+msBlW6xnsYjEewy/4A7bs/d/J5SYDEWRPNLhHGzYdp9ST1wtL3+IYHp9gwQ+jvXNSOthZwYvS0ISezO/uHgnYBED/JtpFvf8N5GV8qopw32z6F6GVKFqFUhbgNIr9HbXzvfayezhyR50rsUFlNvTH1yvZIhuGHxl/QrFMGkt7xF/a8P6ts5rneIpBkGyBW2nDcoPIaRXpoHdKVAIB12W6rO7OLO0CgDOWFP8vYrerYCMz2aouAlPF4gr8TBpWLUN4YqVVneia1xUYFtwSPX6/ecPl5uAAlFwNhAv8Rt8rJlWYwe0zyfOokLDjFpfYZ74L21zlPY4UKwgZSwc/dkLZAlz28alE6vcc9mijY9Vr903giHjubQelZJfBZe0dsnVwWF90GQDF7mUab7KDVW2nwDbMxAtEUQPl2H02Jc1fxze0rHFOgeeXgE3+rjtF+eHvAuKomo6j4Ubyki5a9BF+3Hs23zjCgK7XpC6gjobxbT5BrcN/gAUSrk0f/o865T+qqonsI4S43D8C7KGeso1ZhQO3Mm6Dif9/7l3AHMvWyGzVwvGZN4Rz2YwdGgfy2GwRkOkPCwL2U1ATJydE8BUzCRJxiD1BUj0MJjLxZBrocWVBaNBvcUqkQZauaHZIkWPISUuJtv1VfyMf1xSBUkfNZGJ6l4hEHCjGBCrfAq2fnpMbMNegIikZuj/mDRVoA/9z49MlI7XXSxPHoWPNcYILriqxzUDBgMjCLzW/82cl8G9rCZgQJ4Y8NatnXPGRF62/cAPIulA+Z7ZfU9VSFdMNFe21ofQDhEkzsCL0Dmt11mlxZXh9xb8KkfOljB1q13IkmcC7QMO3R59NQ8hlbXc7+iMFIR3FvvcJ3vBcJgVMKhYSNwhvu3n7fAvkcJRsDk0pUlGneyLKIWH0W5t6Rcd/LAwC55JD2DT1h7TbahQWnjQpatPoMJnH7kBaz381KBqwf5bykWMm92oBmqI5uHlEA5dxx24AXJNbbIq7MfC26RVDCKcxbLvOM6JTIa9Nv1wNe/VRuuVN41yvYFxKho1RuXUMaGSFRRBgH9QJydf5Elaix+05q8+deKG+Y3MToF3ajvSYGuuShlbnIxurNw7QBXJ0/tSGRl2wtWG0H8XEPHv6gvcdDzSX6bqBPzh0LUH2xY6co++36MeoH660PKgaaXwcMiuAwZJ//TzOuqcYTRB9POXvrG8Vc9Ii2ceuKiixKUjkbffey/8myHeADTevZJt5V+7FfRGbEmqavGnCUim3LfNWvyAq6NTmO8iGioDKStOzGSxT6z7jG4hWQ0T+CvSt07kBjYOa7B5dajUcfLpDxJg1/B2W27qjFy1FzJMvwMUsHn/8aCuZ+Zacr1DSiHwwTsQCw8ici+MDVBFQZmKWkXn1n9+zuH7hfX8SI+Do9VFZ2JIcW3kIzndtloGIm68IHcsGLAGRS8DyLFjFU1/sNmnmXCBHVLmWnft3Qdx3aokW9vV6yk1QNqOVFf0DZYi159gCVW/gBLqTuENsgOpocIEwKLwZEnC3CxMusgC1BlYUO6t88D27aV7KVL+kqRMJ+XCC5z5Ry77rNknvqT/2BIEit1E28DktU9tVHfYAbU8zI65RO4VCyT5h88U1SZ9O8HjDfD3dn5fPh6flVyugabikgasBSKBEGsDrtZpuNfsVO90VoLDFO8R7zV2CXK3Y03VDyT+0C7kdbZACVtPIq+nuI3Fxr6LcP3rsgVFo5GON22WcZ+4vdOEMrJDuGNmg61LEFQVemqydjXrWHx1Z7I0xddNF3otZygLA8FHbwYdcO1zofMvz5Ie+TXxKyi/86g8EsOZAaXp8YSxbh/snnEqnujKQQX/on+zMDLUX+RdKUkHvKsoyuF9G6XBaHlCnHyHA2IZatxGqnVgKgQKqWgP9jpdRADaIbWZpwZodHEGMpTST0ytiSuuBuOKrhdz3WddFix0fdkgc37+4lcFtK5thvf9vUYg21raNlxoBHQEWbCbVFhn9ay8wguxoGmtsDY4FDyRPxOSkSksIfcIid3Xo+u8tHZ4PBV8DN2ax/q7+OZdq/bmcikUE+QjF2qvbaBqEGUdK2Fn84RjYl3RHalFCBtzhDNb9fFo8KiEWHEK1euMAr1lldjKVMnm3IIsiVF3ByeOTRgVsnKNShPQSFO/F7bHxbO5EI75gkP0Q41GbG2+WlBcgOF3jKZ5+9WpUlxxFtyveMdIN+uehxQQVNa1sjjWIOKJqNF2Yqy8sXYIn7lVey6ZfAPSFk977RhJ4mFjCZxN65RqD9/o6JhtGrI7u1qlzS0cXEjEFDm5qEFnX6+35s2aEKZpeESgbQz8qn/pFqQ+V/wMxnCF1asYsaCSnjfQTNd3qodzusqpechLFExtROg8qTFfu502DxrLAyrTCsxi1dN9AT1BBTB2ZOSKRNAmhWP1Jd/JN4zQFU5fdjHsub3ryq8bCFEIBzhy+1M7FPDQP9/3SYFFqRRQ28hErdQXGKoJzdVQI/tmQraPnPZJMl+54CyEyxN+eUyAdlj4BtpQsTj8ZpIIw3/A4mW0SF1cArsxwxQxF6aWpbD/DG4OZKPPd2rqe4WQmudLGSlQDiiVoZPPb1nu5ba2GOmzFQkSdthLimP/PRT61mMm3U+DgGy3LZXYkTLRbRli7QjmCySi79MRtomr57dFpIPC1MHcHfQeJBqf8ze1Q1dkRcZArDXZ+SMNTUoBbh3/6l1FDjlan/fwiqkY71TPTlMcZtF/SNBYz+aSVuXjR9hlruxm7CfAXCGrlkYCUTtvbGvrAG+U0X3elSCNrGlIiU/ELqZVVqF3vFiBP7W3HfsVui9OxI4ULC6jC0bVuhKcqrz2FyzL2aNI+P6UxSRZz+q6IqMjRJPRFheJmW3Aq/8nnDaBx5dvRWiF3AvbUVCsavqcBeVYPtY/M+Tzrt5ZH9yPgEkSq0zs4600rgAik9WWhaz+0OVgTyf2lag8llJdmiG4FPiObKSDbkrUUzahqo2/QAxQs2nvrx9nHqpWa5hldfHep3sYTXTJbnne7jpvR75KD2dOrV/MssIrBwpZIU67xq1hAnYfpRuJDrjt6spd903gEElXzxtqy5S34IvOvJ4AD1WTOirBsT9qYs8+7EfNHta8fgeWCBAIIHeji/bbJ+/L8IlHjdTJidyRjsUWeKJqEqj570KhaHPDwA+POgvxjb5bLZ2c4/VfUkpnHvijL0cNFL4IDeS3i/e8zqeiASqxEXxoA2e7iJNMkICQ1HIJRGV5AfbH18Xuqq6FOLAmrJSTkrcpM5dxCzBATVBHLDMkBBesqjlBEJyaP/LhHaX+Ywpht+BXLbJERbxaiS4DE572UyYNeZJMbtWmuXr72J0tBSk45LYbxhg9U0qF7fjbq292roUXCS4awk72NJlFKVpjov4LqL5um5eXPJvw7USjELdFKQdYweTH5LE5GEjYGLDeeIQjJT/IoLlFK84TPInAEsn7mxbJG+t36kWl7SiLA6KVDuPtTSOpkSjZ1a5YUwjxOtlWpy3Dm2kArSF6BnAbvwhQx+APmoScH4AOFnXiGHOvWM3t5h0RLp4NHeqz6efuNB5hnJJxfErDWdo8+uNspXWbCPQg5oWCqxZhBkh0GX3XU/3SYu+xfojZH+oySWiLXtOuhYPAzji7bT+x/exzc9fOk2L31OBh5gmT6keaFLleHfGcSK90bF0ZBfn+Ck8R4qobG81UlIjORrrOeQpgo6DjHA/chFE1OjK0cHwDE1ibJUx4wuRIkqIh2slaZOIUV3uzNppUMvlwowzPOUQOI6CZ0P5LGMfLMsQsytF+EXjNUfvWBqugqGpzK33DduI0XgB4uW5VP2vaAZRn6S9zB/Ls6ixH00H2YOc5NUaCF17Q96/YALm51TyODQJak4iQ5g6lAStrtYA+bk39J24x09xlPORSsfKOaZnU7J8rAuYxYWX3IKyvFb17bJO3HcbGkqwRqKnofkgV0/Wd32BKTjzP+X5AMn/ioITdCqN8A+6c2pUheSIbERZgUYQedLpEsCF2iCkWAG801iblV2FJ46EQEX2O4x5qlcmub9DAGQSgtZU9HDM7KjcR4oPsflnu6wr6rKZUH4PrPWqi0Hij6ZFDFFd9YRShrzoyB5dCFMi1tezLUw8/LmMlRsQGAp+0w1+vACFDKyN5PL3b9eY8K7HSNAYoWw/9yPybjr36BKabaH4nBZat5gMK9jPUCYnb7Vff778wxtWSUoCyqRmIqUP0sGJBKQyJ75JOEhlxGJpCDwtlOjkw9ofASNEPeA26We4yP0gsRlD78Jwi+dBrzV2hg+abl6t4jCSz2kNjEOZhTjwk+HQjKxJRV6QhfKIZ5q2Xu7hJOd2hwfBZ6PJCJRUQayk9P1kmnWXBYnRHDfSybS+MGY/bCoHHe71JUtTiSJeSd9nTOoB7XNjPrthPTNnl0+pGnhIq8WwwjLfiAJ09qlcAffFto38/fAiKrjQ9QyYJ1lfplsC1xpEsnMhEVwbaLm8AGy2/D70Fv3arGejOHW1HkZV/9ANbXn6C1bYohbaiA8A2qEHDS2J4XDvQGwUbyKF+1dvD81PjJoXeAoI71W9ff2a8HHhK4w8OlbheVf5XQGB+G0VfwwDeUfWdy/oNEUR049XbjTMt3GEgGcrx5qcMIFVh4I//y3s5b8EBIMi2CrQhLBzqyeh6htzyNis1Zq26iPM89yS0aCe0Etb+6MRYLw60PwJbJ65n22KZ5eMalswKetYZOXO+ChW49EJl53pn07h2rh0dQx8nGZwCYuIFEflrcTJ0qjfC53TrVyWaN1pqAFzNPgMNXUuJCZ2Q0J74bplNubDLEYrgDRPVgTOEtNsM9qhFjtAyAmFMQ1fqJkp6l3aonkqtTzE3rmY8hr3/5H/kYqip/ro1EXRwXl0n4HAVr88lvZL4SX/CDcIPGRzC9IjdZa6uThql0NsMvisUxBaxhEKFpzr2DBvvo0PQBQ0aVNNTrTDBjRwy4NhUWaHLYtf/ii/D/jcfUDOLL0FStqWeKAq+coIoA8X/N1c3TYAonYgjnBFCrhn4GbQ85y5dWt8NW5a0uTQsNwPaMvRpdANL3DAGQrN4CRxYAmpsjW6WXPouPZGqHWpwo5Eb+viwCgRypRK0zpM0AE+DjwPAU8q3lag/xUlBDWk0EJQhCXlPhb3vyuXd/Aaq+mEyQuIPCgkDrOrzji+PojVlsed4++Igqn4ppXY+CIHSy2ghejPAk657mDjT4AWphgJLG0NLJrz9FmZKL3+f0vCFQ80wu9MWWO7TCIweE8QFnRAcWj17AHTFxBKRcS1TO1plNwpsqLpVTfv4JPvsfEE8PdRzDcyYI+b0dsvrCmzkc+rfojZGuBk5N/EvI0nMSfUAL+yyTVB9sSidT8KG0z4Ua8VLsAFXmJaK6hUFB9PfDVC9NvzJscBQmgISKRV/uBjBiupiXVa/SJvsofulZqyWMi8qAFEYC239Hn2WFOsmcad47cXynDDCwPYhT1/j6qdaVDAvBT+IhCmjqGefaL0foNf5flpWK1StfKtvFsxH4/5F7w5rSklo3elhdheqLmFcu9vt+4HXZ9MH+T0g1kyCljU7o+VvYYIp3WS2AgQB99TVXRs17BROOo5rdA9L5yqgC2Y9NT9ssy+YwiY0Kf7lf6AsbT7nOvU4ouT9De9/pgoF7mHgydDgwkLZgY3v3l+BpkNNpfCNu5CKURfRwQH2WDcMkUJnTiuELMmOC/HxBzEJFUaP+Q16ICxTQNf3qgjJd1VNCgFDCqpowYHhjMEx9HL9/OoUTCskYRrrBuzwy4YAhdbCs/Ip7Hwvq6MB4x4fPII+BRWy6Esp7JV9gwklh+UOc9ILa5ve2uIDCJ8G0iQHt+Gdz6z5EYeVTOYZP5LoQbfXhPlrFrWeOLI87JykF9hy+pATaD96hrV+CLzW81Uln3zgrN+sIiyQZ+fcGyP4O6rsxOefdS/7tz1K7DUyu1y8Kj6onMn1fwWfEbvW90xHe3SmvrDz6Q+tDBGbOZPF+xOyAnwb4ExcTeFhcZ4oQwjIc9UC7FYfGGK8GQRWdZkmgX6QiwCZzl7Fqra2+h0YlM2gDnr3Ljdrs6Q+ABH4LLmp38+MwpnD3khjGeMo2DLktYx8na/24tU16KTUc/Pvfxfux5d6VNEd6jdHTwQkd8KdBy7OJJ4WdIIhEQ3F5XTv3tEMbf5h+8GZMOGglhH/xBYxKPH4UM+EV/uxSiMZzUMRe/JE3rMfKokDAx6ZydReEShnz0BfG3r/HhZELd048oZSZvyJ8jEqCvRBjaXe88zn5ymWYIozgml8Dg0jK8VPzSbvGdJv2JjcFJ3aTMO0B07QB+ktdmhcACRUa7AuR0dXl0HZ4CbD3AkiGFXzjkF0kUyrY6ya4HDCIR6UPItXSLJW9FTyGYbZw1bXyU25sbAK7AIwinO+JSvdCpB8htfCXWL2ctyIYsKCcbD/JBJ0iwGRavY01J4lzZo6oeAB/PIa8bafIUjUzjkTArGv+nw/lUSeBLb0toUjy8xPvwNSAgRfvbFCIToSswVv9APMl151OZxb0wnaTcoHFsy9PJBKLAxdLRIlZ0ExtJ46UGJGJhnPp5BI8yTvHvz5LAUhzx+cVyL5q7adcrDDEJEF8sw2XAR7DiNVF/G2rdjd4oAuQjqdulpAcXupNmb6o4AYpzPuatHT+6+NC+xrAF+fdHvnRe2OFwaTolsm3oZBm7G2QTSWlKGy1vxmtfRCaTDAW0/crcuitPRzI6YGBG58UaHejwIPsK4mbVzuEaRl5H41YwdebRcaDywOxHDAsud+nLFsjf+mAgGZIJUYCHjW9Ji/zpHolELNU2PFvnbb5flhfBaheNcNgh/iXL+PJdN2MiWSICSbr8IfDro7ATnvlaga8qCFGGAPFZaEyXipWNxqqKe/U/6Za7hlc1MfmZe5MrfVDWRlBPTdBeswDAAiaGRj2PIwTUoBydQcScVE+2vAM9pr6MfUCWhGuHFmlscud8uEyopUzLazVRM3Ff/Zty0R8bwpaq95fkQpSDNtjv+y7uymhmlXyGUjnlBvCSjkbnrsLMzl56sjx1/uAiBrR74SEBUauAAfWgQdWcD3rhxLq8aA9z/XOyiP73KYDQXisdijfyJGbq9zWNoPGLoHIvay4fR6sY/pdlzacBiizilSgADpxBY7BguVia1HP999kvHsjFcMnxxuKRirGOzjGFK7LePJE2S2sxp7DRFklulMOdnuBrYgzO+fdzVn+DpS/3muZI16WOyxavzKCyDAn8MfnntRYfSP74NCpNr+LHOeAhdCFqEZjyLU2pWrZDzq9Vp3GxozwxTu4NcCBnXabtHCkyhJCb0GQBuNL3ALCpdaFvkGxBjPEjyBSiMkXY1pN96V2JIBtT6AnUyJGdn2k3Gb+WPWVmCY+X/3Uo6sEVZneJf5tRktYafSWnVJtWWQWtFMVlZCBUvuICkqBeFk7CiOGNyD/ChR2n+e0DlZaF70ibY1z2AfMRGQ2EA2d4qMK6DqjAIDeDIutqKBFuhQQ2iNnrHRUvVgGfzQV8ys76n/zruY8i6vhiRm8bhLaJPuM+LkYa+vvlleOz5XFd2Nj810RndBDYHSoLxe6R1shFCNrlj/7TZvQ0jero4/WDyJyzftwJ5woiXDtYj1AlP7/fROMPBvdwKJAMaeCAJrRk7HphuzSF4v0B6UwkBr474eTf/aPLTrBVFRtqiXBIyzAAC7Iwbq9dy4jlPRHJaMuu5O0cvc7OXgFaUC1eptxOrbH/d1Ijb/aCmyRSjzDFR7HVr5BHMdhxzq++YgEHqiiQbrM96LxHb8TVdC0a12aFi48PMQf+FvoEiZJinhFs7FFTlRr+U/2569oTz+BJwFWKvoPmIkk9oLoiwILBjJKYxfFaug3MRSlHtp7MuyFactcfcFU4zBKPcFHsqUn6wy/PJVk33l+cyI0IIWhqhWr/ox3O64rFjFW9mAJ7mDExjmuxa9VmVuEg/bcy16zn25jqxnSqmAoTkt37p+oVv3M0d5WZUtA8yGGyLCahUBAiTXLwg8SpytACKMqoEMPGB/iQ7HcrgZLmIjWD2a71L975ea1L7l9iV1up7qtqwGE7RZghrJc2EaX/yIrtZw0v0qiPcwqv8YGhAbTcFPmyX+nztlovImGiM/PL6JTIazCMu7RskTC50ijNOcAzky1EzsPRtyCiO6seWA/fdRl2qCprR4OK/kfz1IoZDD7eVLFTk+8Fs0Q5n8vqnvq+2w++h2nmnhi9ISdQ5JdTHkGwSfrQUB4ho5vuNjlGlUfF2lCjZqPbJvdVFQ2Q7MIA2N4Vb/ik2DRz+9sCJ02pCU4KlkN2jyExiDlFiGZVV1u580GUs1/8sfbyxqcGQA/z6ECNY2IR2xm6+8nNvva7tHkMls/EgPVDTo0FxXIw3kHFgLDsQGlWfi4fAsApz0ekyriylOH5xdocJt7Wmt0PrXmLCUfhrplhu5CkvZE4re4K9/AcJpcDVA6cSwWf9X5giyFpuifU9cO22V5hQy19hrYv1MrU3tpdR79c+p+QHLEy66BpmdVXWjHOljpo7ivC5ICEF5yyz+TzTiRFTbKnSHqNgf3PFojXsu3fQMh6I5gCCrCwqIct2X3MoqlrICag62P74BMS/HDiiOJ551hY1ElxkKOrIbpC8QM5wVdX5x655NCtJdVQAx2Ah5on/Qivd1GIx0kwvkbJzLo9JKxjjQV5TqRSgdxFB9ZFCbg9rv/wj0D3dhUyrc4PHZjgZALMW2xIeyV8ZXQmbUSfsOx12XjnzKjzlGOkTZSa/+8L9HaAAmDa0s/X1PVJIqnyV0vLbcXQBwhRyp0s31OQsWo2ZE3WNCtn6aIUY7E6N2XUTGnUJpFj5fVugQ4p0tCgg6BHr+hutW9kp30l39DRPklusBO6x7YWBf0Q1m5L3rFu8HR5ItCujVnYp/rzVdH9NGww11amyme4PvyxTb0G7s8PzK999eiBggdMZMEs+TjXdgjPK7GcNsVEvUcHT0I1hJIOqri40YBBHM1NMBS7rMKbEIysn3eINlc91nXQaBZVTguKlsAX5mPb3HiT7uR56F3wAa5QOI18DQW1TWQ2bEsfop8YNb89++IOPUk17cgqdnt+FCEhlVynhy7vzfXpFUylqwyauovOy+wcRifezwNOWvCpT17JBl1Z1NBT1AHG1Mht9sFiMFe8i3wRGC87Dixobrluns31PpaQDq/OX1w9lKSIUFLZjIGF2siTcSskdw9jIxAatpDQDjRpC14JlzvE8qMUjo9HDNXIW+ULQozyGHm8mXRKbAXErwP7cX7sVWG3BSGSWvPGB3gJhHbnH5SNC2t9votJycYNztG8UqmRl2nqL3R3rBK9dX3VwH8J4V/Oi1El+8rej16HCYAEYC1ae9KvZ0Zxxa0Y3IdOLhzqkemNjrjP6EhiUN0zwbaC4la0uREaGOeBneGneBBzbLCO1jh7ywR24Z580EAACSUQCJ3d/4pj4zkvcRj+dtC0CfuVVyFCDsPr7SPFc3hhfZYzkUIc5HHsiH2vxjCA2JhaHHZdmX4Mv8dNJWLRJdvdJNfCmnDXEz22ZW5dgMxy0TUSpRtnYogxVu4B+ji86RqI8o3l5y3uSLNOrQE2ZPdlMVI4mpBiJIu78ycxxgZXe83zwPA1n65FkOZGYfuC+dSzKiEyIANOe5cM6hpKzC+XZJYh2VDv5W9vo7RX8o1UheOJMNxM5AwA60kn0LyrRPHoc3xWfPZ+Xmhfx4FzMVSSgnT2zCBLwMe8CZFBt4s/8xq3hAZLgmhdrjs1rKkX0xncPQaNWn9lDLiboensDqyY8XkUD9A53VHm5BaDFNX38bU7VW8y1KGRFor0CAOuRr7hJiAr4J/ZSR2iugZJBPKqO8JvrvAwSrmWKBT/70Fa2+qnAVy+ANzmft2JTtV/eOKexwn8anJjixZBWxC6pIHNvNWFZYDVTkW8nBHViQ0umQbUo7SZK5w38W4KRv/gbvYxMhq6gCC6TljdVEymV5/5Vusj+hCE/bTEobDYWlRhSrSMxPFIEo68M1xxkMZRbF8PBTRg3ZJuJhTtUMpQ1N6slKdfUhFgE38Uqgp4S5erWSQolk3/H4FWKxrGmtb/yRJKGCA6XFpr9/tmDr/frIqBk1Hz6lGQd0UGz1vzcpnhjlYLqfYVwCo9W+qnWyqKrlKXrawGOTFVpkYKpNkkMOio7wG2EfCxHvLC0VYALUs8b/gxN84TuCYORmgd1wawTtN4h/BZBl3ve4Xlxc6a5ZWLusmZFWD+Qd+6vTAlAG0xrcSibi4tNanCWX9+5GekmrhGgUcXJlA1PkVYUejZWE4HfKG0jaDQ3zmuiADZlfxyfX+sI46idD65BLb1rkmXPxvMlpsP5UdaW+EmtuaLR7/Ak/PaGSAjAzsfgz0AGr/OUVWVuM5pQP4WMq4hXcRZox1d97E51VFwY3be8ldzwPplb8Apb+BkcP72Z0SwsXYD+Osit/GA47o0Y53/EVHV4lpv4Ee2xW+2+vyc1elM55xzy9/e0/GOj1pG1O0UuxwF0Ar8AUbvZrlRF3T2bNkrwgos0K2H0+NI1yoV+8DlkdNQMU7PNyCQ7DxgLYu8fgFJ10VHdo/mbERL2LCTxFqd+4MOg94gbexQvNB+83Bkrfw0AMggJ2BLsT4rQhSabXmZsxVc/NhglQbvR+w1KKiIj7d3H5y+cxxFQt8kZ8CfTYY2gx7y3lCEDwzHvNsVGwCXh0wSMe4ChoIl20ehzORyyhvs86j3kCQ6N+Ml8pGDRVT6js+slIGWU1/2gKt0I5kAxbCOKQGLBQ96Gz8FBAZtGpw9Z+Hdu6Lw8yzr4rnOA6zpqz9udwA6GQqnC+DtMZFnYNgvwNW7DoEP81UDV/9WK5aWr4nf3dbhppcuQx7ixZCCmIW70cDo7bLBpxiel9PszIX/AvVm9dLLjYymZzzOOygOYGc4vMA3aFQA0g+Y+dW6qV2gxNCxbriYFYwws8O0uEE5fhBJVbPMM8zM/ylVygWo1hc6Y+gDZQRMa8y7rjyHP7OUI8E+9sN5l0+zJMMXq1oaDHb53V1OfWwLdidawZYF0Ab+JNImZHysitq3vm9OIIoABNApI2m+vLYfBtmQhs/DUnYX3a8Ww8AlWuk+X4nMd+Ad9NCcR1tXfN4HQoSDzPfpSABXIO5qtWFYswyZCGnRj09KtHd/gzxTdT2h1bUgApGgtE0JBUETY3nWkGl5BWNdjqBacL1anX4katG/QlrmSf+7RSxnUe8bZIDOa6IINw4gTKBLUwAvjmgNHB4mvd06JTFIaMu41UA3AfYfFrJ0iwV3CmZCKEh/RBgWR627qhqiaVpmLEYi/IyU9wXcSFYBfvBgdXd/R9ReMeF9Wi6X4Uu6jidD6rHysoz86Znp2rtI6+rJn3yt+lfMGqjAavU714B08v4YwqcojgzDywmTO1LLI364ujxaYSb+fnCRZF8mCVAnygADau/nRE/ONcZJ9rD6K1SpPBAiGtfeUSmt5JP8sKp6i/nxf7ayqpKd1f8FbP8oXV+D20a9WJ8I+N7696GUGWpF84F77qzYCFmAdEyFgyGhlt9WjVKjXAvLTy0vDCI+fOwyJlwxq4kLX2tSQzEAeG6Q8rvUTIeqJUqrYcYb45FSwgxXxXW5EstVv+spyJhbWDsnq7d884CMfC7zV+Fb2mYcFKlhxQCpqxew5VsCOYaZZWMGuMSG6d6Yg4hw76WixmdeKDa9E28aST2ehfD3H+WIjCOSiJlV+SXU6mC4IjEI3aYJzd9Tz37uJHCbZ6aHGsKHGj3fYO3reMGWLcD+MfTcD4FzakU/m/MqyNPM05iZ0XZVauCidRZtJWdLd5NcJbVqc3TFKGMt5uHt6uFUnQgYCb6hdNAnv12kdkIPmqW9lrYFkAEtkkqZbHtLA1p86bMi2gdx+HrH10lZAIAtEuIhLbnF79IBQtrNs39ElEGwshm+q5Kodc+3yt43TnN5I42YNiKJwVLKI/zl1QEpxQPKk0Zxf3+vBI+E3hhLQ6XnZB1lDv8SB/Hzdql547dc74ojAfMhrDpsw1llNJA3PW7b0CudIgwEexDnxmvYAwuCtEj3M749+JjnYOf2ZLW/gLXYXqPzLWxyApYJl+GSHVTkB3HdYYBVjn+1Kb3EFOmNblAwopIZJU5FyKBkubtgLzVvHfnyV7hBH6XYpuFnD3IGGsQZ3h2sDk0IwDNGnikliNxxzPZ2w/iPfZqnZrP+qbaHqcYZFtLOT1ZZPvEt7IJ0y8XHRjHxzkTS/QqQjmMwI7i8RKp2CrQAaGvv0x/qjZd1TaYBRbu03M1sgKCijCCYlCV/f5CFMrfhltl57Mvx9EDYuhKhb9JlTCZD7p6hd2aSKF6j/a/z4f8ydsFDv0FuQhgULjYRqpLm2vmQSbFuPVfx+vlDCsHwdSAfp7AL415z5zQBiFrS0OMf0XEXIcqe5uZrexs8XoYftOlgTKFQ2DwwrrAVSj99WxnxahAbPo7i6/n622Gz7ejmPxEeKIK5cwiTE3ED6P52arY88s6fGZtv03h6kPf293yNoUZb5HS2eeK0oQljQX1hY/BUwTQHuT0qtdEDjoNhVICf5DqCQ2xeUl+Ri81pAbs/7YD/uh/u7klxtHKLZMD/DOFc2aCR64groAhxsI2emqJepz+2SGFNBwnW3u/xsk8YFPxBnZQ5+aKKysxUbQJaSqAOl0MLcIP7AYlXRR6I1TkSY4PrOIfBi4J9hRT/wjWh/wXYhdLkRLcy9zCqKuvgLtB+rUTN/5ereXUdQpTbWw/qGiLHjMbrptql9zMGd6BSPMdVfC74qYVGENqys1NPCKFYJhFxNKynbCSoxDnL/t6n5Hg0sTmltaD7Cx/LrkohVcCILvd4C1rDp/K2FqUWeTC9SOQKGiVcF6Oy35C+Y3Lvc7DKNvvqK/Ln2/+wN0vYVj5I8bXGbiCS1QA7ywLfBJFw22hvR7hIiHWlqNM/6Fnw09TXq8UR3o7paQ80uV8yIYFh/gZuRuGGEz3/e+p+SDCOPh5frszLkXrepj49j/1xGixY1gIfElfBlr1pMqKyy/t2HBIWevFvrY5ZXwaPv2AfKd5NLsUHkVchgXvjwH75nhJazJ98BBwOmXbOYGeaPkiSpVHSKx72FNeMeh1b0uBI9zvi5aOg4GoDA8ictAXew1puA37ICrlHuWh++v+bIyVvhBDebwVbkJitd1V1UOCgqmVbCQVMxw6QteNFYtKjo8r9pV7h2hOY6Kp0N13hEF+EldAdvKSOT1l1MSlpgWGa199ttTwk2wargkgODurtHYJHvj2pge5JeVNhpwdFVvUQ1MJhwI2eFkGvmKI/dIERxIv0+lnQCbzh82Tzr59BRdQV0H5u79V9wptqSblNT1P58dGLUBaKCy4na1yrAYFZ80DsuLT6yBIwUuW4TXe5u3Of8opL9rL/fJVXBqFx8IC3VNz4uDgGRJFJ8UwUPMFfKGa2P8goSPBhkJG6j6Ura7Hu2vo3LfpYlTmDW1yQNRImAO4P7KALZRcf5vOl5ZCN7KxGdrHC/O1+tf2NYcMP/Iktm4yvS2z1FAVdBgP4UVqd1furbLLbd17thqdc7XB25lV+WN9D009LOdLCJQhEqfPEfTDRU6iYYEyBqHzFrTVyqyq/U/UHxsqmDFfSu+DsOstciHB1XJeBEt8P9a9MLYaqTJ4T66Zy7k4Ul/qjr6yOKCJd042aFsvrc6VmqtAKzt9IvdwyS9pI569DVKQHahypU0jFXeuLAmSyUlfAL0L4GsKgH+psNPU9OnKgcxGV/Ovh9W2WK+dd3ZC55HJweCgBOSmr3UanMXBIHDWOlhgw3FyvSEiVq0rJiu5L6rAUYethdXK2us+/9abJZqsRuVi6N2vSs6BawAINa4eelBuAIPd37hf6ymR6WdNyTXAd3BF5oDC7cDTGxO4fWGWNE/GyEwh/jaAzEd4GnxYrTuRVjWQYp+RUfuZC3N40mEDVhMdxaFOzo0dt44N5b+9ljrA0zc2+zmxMucfTx2ym6DX+VkpsQBrhs2Iff4iYxxE1+RhlkgNgf80smIL2nFmLNNjPMjLpLyBVL+R8JEpuPR9uE+NjBtWvXn2RVY3AmKBqb89/BKYcm6jI1zcvZ+Nta23SaJ/t/HjqfqfLDC9AaqG3o51c3VIafipWG5FuEWEm6K/5am6EiZ5Z7enCHlHL5U8as3KdxMtq1IMV9fqF5BeurWS/9r9R7Fpss3+FlcEPlB7kDkzBX41JXZiAr8z6xwm0Brj5EU/XhL3NwHc/dVIx/pkavInlzQa0JLmxrnEQSCzLi5S30vUP0ytxTkA++of/zh+lL18QRZWkVYbDPR4c6KDNmiboShhUl+VeQ2EZJsc3eBwupwlFv/3UdRJEtsK6cVpqhbiLDsqhkpheJsExIh//OiEH1n6LQ8HBdiEucWGs44LZwPD9Z7r4fPS9/XRfj990YNl7F+HMAPBauKr286xu5eZwdadv3O8LlMFih5ztR/ATanPpzcMcGK9LJ0PpMbABDNMJT/RkuoRyUMDSdvJLuooYghhDxvHsNqF3IvQyddz7G1SEEehu1cU+wRkRqwA5SsI7M6XfwjWEQGUA/0tJlxgH3Re4leiFTLDNBDqnDgdwB7+WFzB2HnuK5Rj5yuU3TjGibcroUyxVBprct7WCcFKnEkEUe7aCMPDt+d4xEPhc5WzZYEKw9uHzrCStn/JazWZRd5NehnDOqAH4vxwFV9IEc+mH2m+OcKGFO+9vlBMCAgWQ4+6yre9mw2q04hNpvsKHZ048qzzPNznXfrvwve5KXoL6zueGEoCMrVOBRMQHZ2oeBxpU1zxxdDzS9++2SjW7bTOkiZ/G8rbgauItqdm/9jWbJghJTKBn1ATSeD/RoA46A6gLsNWPcVHYbSaGxqUo89wR28h/p3ULfpYmYo/gc19VOPDEGcFlmnJSFuaEylTu03GPnqciEy0QNtiBIjk01NDurkEfbVzj4JKrP5P4kMLxhEY1gpCGE7e6fsuIea4GQB2wacOzFeJ7BwGnR8paFaEoaW3EIKF41Cyg2x25bI/BrhHvhrFn26bh1f/5nugF4ovj4oNGKGFwxQ8w43uojHpm0wvgn84mOUwgjVGAWv313EgobHBbPY/8+Ny+Dxerv7G8q24BKfBasUIlCFORn5Vojxyg3+sErl+ky88cX56uyJrFsEEyE3o8gZFh37jvZs7P0TCTNwSpwf5r+j82SKcHAMVESPWdk+wQjPuA5mmlqPIJPfFxEfGp1z6PzQnqIqXEtGBFdnzjGop0laMWzMKcsbqWB14BHr3HIiws+bhAYhzMIdEn8oCYC3bobElAZGM85vzRGU1pSIEsEd8s4kECPiLzjZL99b79g2Vt5ZlsypuF4jDdRvjX+ZOyQOzp/oveMJsQEOOUuzFKbNhgkXLCqjPgvDXTwY6FDXbqkIwtTk+RK9BG+KXL9nP7sZhibpS6kiHfTjlSHXGV7L1RDMUclgnr2aM6ISQhiO2RWkPVke5vgkAtRJQPsY2b/pqmB5Cvs4MMDxMrn66fvUMKKK9dwGI1HAe1QqsA+ElsJHamazFgY9Fbuncn/v5pXoTXILtBGl88tE7SYEnQxpkwM1oQfYtqaXZAB1XwXSmU5hKJiN42BsDwHBDkQZE7YMbvFBaaSeb+YOQlRXko/O/BRPH/ywpBL/T62dB79hyjSPS4Xx28F7FS53pcy225fg/hufd2X9Ktyga/dAdVx9Cp+c4JsddqMP7HNR2v8YLtGbOEhYNO65D1UVdJacSlg9M5/oWFh0uGmd0KD/8BdUFIrDI40HC/jaKzcuOK2/tKgF4DHvWoKTBVGUXPuXRMJPSu8KvTR1B++Loa0B4KUb5nWzEP25n3TAEPGxY29BvxKEItK0DldADHeFBfVPUlzMgiQQZQQl6QxyYKrQ9YqyZNtBfWmzLzNPqxs/krXMW1kV/qfkiX6YBsaAz9+tEdYGYs7U+6vUzQQ4Lo8/yy8lx37plD5irXUMcc4WbZizrgKq1hygiWHMGLu+9jDXX4W6scGHydy5D2UKU+DsUfbUn+O5ca5KIRgYjIeC8U1vIwiMRvuTbaqqEyzFgZQEPaoNToejiZ1iMSuzkyehFCnjCtDiyxeBYU9ppgtloHlxOgBa7Hrc4HadBOyo5yp+6tL26eC8pNYrSQZ9uv5NQcSlXmUVfyf7y6/GDCShdie71ePJ3SekJX6+E3h/2fXRj+Ek7mZQz9aLOVOsKO4kqNGA8GbLDyP41Fa9rlocmyxf8TEDiGQmM66lBaJ7dBb/+rowQQNp5nKPKF4HDuWEFAAdWv+3i1xoKwlw1Z5R9FWbAstuOqA6jf8MELkJjtwGIj7fl9uMlLhWCMLyS2vIdB1iI77wSlBMNrlf8VNz84nSk4DSlCJBE0vlFvEq59hsIkdeefml7hM8h+LTdQmGCbGibEufhEnL/slOfoIQpEuG4mqNxkh0jmxhFY6Vh6NY0t1Jc9BmUPKlvGqo2dXZtp88s8ZSbJ/mBJoJH4T+7c9R9j4wfSVf03nqEExKweGnMFKD46W5e6d3DVkppnL7GBMuGp3gX/p18V41xpdI1dxitC+Lz7GVB3u4AELdCgqMVNynxcxoAG+VEBlPGmb4yWeeDTh0FUT6v+DY5SLiLP0zPDPKYSir3us/F6w5s4TprV07XsMPJYiLEEFrbFkcIUI2H5Y8WvdrGPc9ipp9FyAAs8l3RiN+bfvnC3bQUdrb/xFOC/xZlCSWEF/jQHk7eIvhR5gXcXuPLQlOglRgLnL/wrN5dw1zNhVDDVehN/rBSAB8EAMUtb1ZBwrrC0NsSlQ45sre0HQVfTSeqygS3ze/wAYB/Y+6cumeFdZadvI04VmdXr7Yw4A8rlakYPSLa58X2n2+73D/BhBK9E2BO45qFnNQabAAthttB4pPC+ImgIQfPmbOFElTwVRS1ZCFQ4O8CttNaD/BbFm/kXLdGpof/KNk3ZPvOf43EpDrKWpLX4FsiCP11zy5w5jX8UC9gyjN27jc9HGNb5pOGIKw2niS+1OLW5x5/rJjGbTRnwqQOc2OF1oTGyQMl24jcwkAcpeMBcj/s95zzWgg+6XgMeMzVjN0UuLN7m/bGBQ0a9liuBYwPwQ9YvNupSjCMSBVf9cJwSirJ2bp+7pL5ow6hjeqfTtQuXPTiKEtAnWYBEVq2VJwMW3zpwyLna8d6c+2iePfeIYrLNeIUxX/9lmRey4uXKzQKZfvHVE2D5OPGp0N94Vc9da4arXE8lSlAT/rXZpzgBgBt4CTmYGW8RvDSmD+KogzR+FDrilYCzTKVVh25shAUFOcNXznLb9U9Czt4R4qhkcJ/1joC+3gfpgJ8LEeQR3Q13Se43NsDDmNb+RzPSUQ1qniCJdmyo670V+mijugIXaGZHKdUe2xuv2Am9oqXOEozeeKfQpeBtVby5kdTNLvNq3VnaCMx8btiLurLDoXnq0Q7ifpTiytwYNtKbvwJojLXSsr51IrFXnGGrfcOBd4/sgsF7Px7z8rM9q6E1Ng+ruvvi5twAmseeT91UTw4S+JWVeIoZleAkdblzNEf2Jw62dUKg0Zn1LaErSUOiksc5d+Gd3s5+YHcN+zH9QvZ0ZlE+DioOehESC+2BpoV2fZglssR0AMCA6ATKZcDpruwpYzj7WXNUNukh37r8MDUwbkof2LWVfpAHx1ILGPw/FNuR66pYkzg6xh/7PC42oJLYioy7x3oqbyWbW8fDUxEYz3dhMJA6yFArf2FFdmT4UJCmY9e63B8IP5UtCNvSdnUbFXGstv3vnHRWgGqZeS1PPQGvUc/jqKEsi10xPpna0GmKhQJOAKjMnRodqaCjWnSdeanXUDt7ZdslycqBgG1+WRmD91s0UCGezxzcJZAptcy7aUa9vzdSrUrU092+T6uQJT2YwGCtJ/qFsiL32QuEL4LUYzf8VcCLj90guakZ/7GVQRznbCr7fX9bi5nbVMRSfT0ECOscpGdlsIUi0Bom11zZufp2FFFhhVZY86ILaDktlGdcSeq2omRVZCshtbsjGb8qiWNgDbw2hwIAfm8ZPpUM7J65KuDk36GDSMPSbhH1OHPBQa8ZYhtNXGOJghhDpo3g4mMYOnljoDmmIVSTUtfb7IFAFwA3FthrGn9mYHwtcOAfyH2beHr9vAGscIWo4ZUPya6BzDDHESYKMXXS9/0sslqDaMFH6Rf86KhYdnMwwnTmFl4CtSRp0QfrpZPU3goOVmHwjwc9aLEgPwOyss6fHQEYHnEHKgHwRUUHnisHOYXDPROi2swKy5j8ChNyTiKtJ6orV9OK9ZMTE7FfgbmLV3MN77F7AKncvf5GFmDGyW78ZKJlHOvpNdwE5MKEv49vvX17DJgksRYfSP/ZwmczcvTJWvAcST7TePzGyhxXOxK6WPD7J/+X33q1Yk6AO1w+rezA3HU3xTj6XJGlQIgXaJaDO6D48YQai8dpMUQkYU9X795jMcQ3TmGpHbPe8w+klktyOYneMhYKrmlMWQCDGg6q+vGAsHX4M7ZIQuoQABH/OTrGF0q4UBsMUISxjS+C0L6T+axVob8JOC+KMtGnmiz4TyCA7fV36llQftz1Rld3scj3/3iDeyvZHfDS7XEWvrqjhdxiUDn+PS5U2Qrqg6ohz2FsHIPlkfgvEHDMumARme+meThAElC8y6pWF4l4ukuZWfi8dv00BLN/cUH/XamtzdExVE3cuf1B7KYExsU8OYzUwsFl27aKp6zdQ0rRs87B5VxXb1PrQoMqAYyzBZRmc2A4GVwzVXicPs6Rl0OIDj63PS5M0+qeM/52+gmYSTUtYbXnPXko7i/7o28wWCNjfyBN5RMHCKptnSsbsjr6W9G/nQC3csZfpckgIv9HxZfjRakaHgJA9Gzij5tOunlzyXCcRC4coFqVSECSgHEApMz8Jl/DyyY5Ac0AUv2flK2PvPgFtuWNXv9h7aG1PuVlPgGULYzJoKVXYrPNxw3TAWGe9ShII7U5hI0KhJALj5mL6iw3eBeIsoisG6J1BWJM7QEeYmBuEG8Zt0TehOqcooo8PQSHzgChq+qKvfseXF1JFXCXPgeAPEdRUcxOcXTKB2FG0V+J0Ylj965Q9N2UjQht9Iie1+GWgYjIQu8sOXZ9JuNjhwY82/v1qujcYi86HlDcJdedJRhimNJ7r7aYCXk+i++NSrZbEFB1kbSAQaClQbGZl3WZ2mfenPTbrNNEUsEOr91D1aLXJkZ/Bm2arJMmzzyNloCWO1oWQaJp5djOSrOmCE7m4lULw6xYyM4clhGL/dt/K7IH9G2FLrc+GdaFYGobWCNSp1fJC9KZhoeE5/k2EgCwP6khmCNXyDvAgeE45bxERI6MOo5pou04QYWrbzAd2nPXeS1lUCXTRMEeR8dmR9FWjov1jMxSE44BmnFCksJlNQMBXYWZr76goiqsuAQR1br3NVlW6NTX7NEgbEcqitmw2p9Zz6Bu4OeUv2GU1nhxNsvWT/1+VrsqrVD7rk918WZRE0HJB6KuNNAHGuzVJ9vGSi9icgawbXGPXQFwN4KS3FIMf3ZU6/x6knOA5d8d2J058UJT5jB2szkD/xeXV1MvBZgIOACDq3IeUNC672odDd5ZZ4OgheB+692GUeMlu6ZCn/UIquSOdvlR83/mzdBKb5bEcp/MgIkx75CRZIGdHgk2EJZefJjQ7YfCmmHkPlXvD/Qbp1Yyo1nUZ/YNPkNgBfXLnvSpmkmdAiCWFkLg/mmtYq5AuHTt+a45uPcPTLl3y/xNctdv9UerlKA5ze1pGXxQDX4XeRAPsYFzbd8bHYcb+yPh4F3d91ikMfgqScwDO5m1CriPs+PB4OdRj0OkA/3jgvifYPkqlEChYDCt6Gfsi0EPNASnb90WpA7xrC0Z5awZ4nQu/0oVB6BAD5gYlp96PrD77mycEDA7YDXk2oxMTIBl3d1oYfBvucYDgluQf6ASvWzz6DtnWGBTwHYMvw9vP0EdtsZsEJDDOCMy2x1SKf6fWgi+zhAwGvLsAvNe41R54zajIEg+uBdLeILArRUDXK6Og3/K76BVMf/cXA+g1auAstJ4oOBXGBxhCJ27xH2/beocElD4eCjIGL7Eg19KJd/YkNZh3TGdW4BMzDjQsWDcpOrqYA+99cYKAWyjrjrljNVZQorKUk4hHDMMpW7zB7kkO6Ut/NDMsbPXnP2Pu1hOmOVpGRw7098EdPV36jr1h1sHXMYx1yuJ16MA8BeBbDYG3T5FBPEGkPCBi4EoxIwbnG99MmAmtZAEsCRtAyGpeB6Q6PPYE9Id0IDtQZAzspQxDmElmcBk44yNUuPQPvOK7zhTcN6GGxfhcCPOjWwDlWs8NuT3RFeSD2bZT4Gy5kAtOdBW6BC3g+A6FZRvRqQXyTFCg0w35U9MQMbPDxoinPdpttTNjdfsmC19xpci43Yztp6X4rDsUFzZpZjVinnrpdYuXPrRmac4Jv2w6csepxcvP8+s58unTmd6YlH8SFb9ZX8OyvC+wxSPygPeeE0ViSUaGphPooK9vTBa/Btn7Q4uZHyOPbmChRBIrCw1JyRr+9B7x2CFvfHBmQd50twKO1IyRU6vguG4KKnqbR3GMRY6kt29Ryqo8JOrnz9KdDOi8gdGQnCssYEqGONmkzgGJLtlcBYUdQ4s1uG0Ec+dbcihYVvruaot+G1ht9BlgLOBC18h48BQvanu/KwQR1QDHEpqUpAjMWwXa+fYRVSOwLm/prK0SzYc96pJl2a1vhhe97gbqpDAIe+GYvkKOwGx2LIiTIokaJqyBBAdwQzmJzSr7BTRtkoGtxOO4TYV3jsmzTDAI/B0gnnfWEq9/3CquhhPelaqyUiIwiqPHRYf16Lhzd5JSjygC4fvA+cjQ3s4wWGaaODMLqh1jQQuqZwyTizQ62TYglHoLFPclMrKQhsB+e7wkV0spKMkkvlyIVATiOoE/rFXuvsDt4IVup8ejyf2xS3FZ/hDYivJv/aSxFSEo8QdxPLrz5E1DZ9vl8HUeeBdLAExCglyFLNjsZ0l4FOFq0+/w2WmsMhiIqOooVUEbJtut/CPAq2q/a9g2rpFas87JPCo6onDq6Zs9lq9I6Ve8n8pOcI95gfpaccQgT7xrXwJ8maSBdpQ3yceiRXr5sTyX+F22Nul1MCaV1s8DiAN3L2D/a2ykBit6eaEYnkMaaEie8r7p03IPWmiCrtQpcircdCNXKob1WhGo2tOmM/GZzhxBRsblltwWwVCutHgRWvxc6y9ebLhEMa6rm0TOEEcL8DGQGqFvGVfhbOaRKIIhhSRZFdqo35mkBbx1efHPjPSg3aNg/8Jj6NX0WJZS8kqvDAQ64f/OMJq/8w5wsRvXB9HydxLsoX6g2RUnggxCw4yOSsIWMmJhTgmpGog3DiSbBGLvjS7OYgmAZsi9h9F6pzk0VRuLT7z1UBPsVLgpw/EUo8NuHMm3zjZ88sk4Yyl/5FoQ+P7CpT0HZ4Zq4dNqN2WL9m8BD+V9CzpMFpD8KIooJegD/hjO+Es9P55u0DGMOBeWumHbz9fqsoLtISODYxUnRL2I2zv6eqX5ciD18LNYLmS5iu+X08emvLmeZo7QTCEVkfbAHnOjAikq3Q1k4atDmOYoPAJ0ZdHKknpX00bDg3PnEPfhpllj0So9TEOdoDn+tGCIupiyhEx589IT3IKk4V9PTcdvE+f4iDJRDlJVAj0ttBTN7rDrMnu7ksd9OFmILhE8s2AUyDDpT5Js2ph96HE4/NJTJ1IsLkOy93E1D9bTtT1GieUoxWeRl1IOGCa2zbEUZxUbXcbY+K4e+GrIBUtgQcuNtg/ykYkwSsl8MXO/yYNkZiEPyTvvv8DGlDtJKRsVo/kX+TL/OWE65U3HMpJoffExDSN9wUyvAS5mEsvcRMEl8EF1jT4z2Bej3FIvQo9ZEAt1j1G9XaTt4mnG6x8xTMomYx3o3G+kkL8f5xCycaWwlN909IphG/xsHCYWp67ZbsZ31D4vniquHlZ/ezuQU3yNcfhS91CCAPYyVDXNuFU6ai1nlpDUhgfP8AZMZ55/9Q8SzsT6QV+5Xa0uYD6MCU3ws640HWoBeaVnMS7ap2VyCFFsH78p+viMxAtK1n3RpO/i5DuQy4tA1rvdCE0WpFgJj/ECypE29FZVkRhPjKO42htiqjO6w+++yGaoz6EREdTYaya7jrn39eHBjlopTfiAWHTeKNQy7bUV3pVVXMChI9pvivQH6Yw7TDHKzPV0SAACyiRvxrYflEaK5hqghqP1HfMQAvwf+u01iN5vEyjxsmVVqhRLW1nYtB1TRm/IqO96J1blpM2eWEXABgy2GZgcPc6iaAu5NCJmQEM6PyeADICl8+2Li2PQ3CBnF8A57W9/BCb6u7tEQVK6gu16xahAyjigxrRgBIxfoEWjUjZdmh3F9iY9PqsR+Fm3+C15c5XpjABAZwt78mudezV/0aB2SrgYDuvPNufKM2JQRHmHVyZlfDx+ClbJ4APSNdTUtpbK/c/avAbvndjxpCjDeFlfvgl4cHrJkh4MP/nkBDcUralYglm4888MYGeuEfhIhWtGjg2UMyKy5AYZHcc+mtkWZqnXBkElO8VUU5WChXV8dQGtCoajp7XthD/ULOGD6v1r0PjeKiIao1dvdMDey1t0YTUBAQbo3BUgSoh8pmoaoMMze3TQFZ5pwAEisI6e4qeyWTO9zOwKYHYWAoSkD1Ud2gJ5GSYY1kkvFYvin3jPRYCvqmfyXqa5iETnT3+FhFICdLGyjCekYD4icg+p0rmAra8M9ejTwG4An+dVMsK/71YIAZuxPuvN+BKTV1hITPvP6vsJJzloNhMdVO5E8aAXb5HwaDRhK5hXB/5iSOt/j2aw780iFkh0vFUHd4++S6VrKrfvjK71E+EHFfPgcG5mS2Xb/NLaMC0LQT5pButq/3O4ptLlyHtLECbfXM9rRCb/8CwD91AWC2bnTH0wY/3O4vCcI6o6Qb9BGIysigGvSTVuH428B/HgPEse3+3aZDoo1I/IuMfPTkdrB33KfKdoQvsh3dz7m16/G9x3DBpUL2eyXPPO8c479EU7fVPRC486HV1w27RIqVB9xaHTe/sCcI5QB5UedgunVaSypuAUSE0yAy5Xdl0zbA8yi68ge4eExcmTbDQLNBvVDKnlItRsq8CqxC+s+zyJlaGwbkiq8vl3Zs/npB1DeCZtnqLQwMrVTzr9xhuQnFtJoQoHOSj33Mg2fs/tRvaUpHqPZPCe6O9I+JVdg9Qq9AL8wWDNAWkjDbuZyj782o25w5IFkbUz9Vbw7wns3uvGBHeLv3wK9EPNlE4/h9fEtCO92+ZFdN8GFb+magbaCPftLuTbwIO3bYtphCT6YSq/cqDNTxtLOv+Wcg0X7lhURF8V8JcaJt0HXVY+OJsl0Z7w8RsJ2g/pavDx+k8wTip0uh4xprVgQ4VRyWG0VIW2VGP4nbGWoGFass5AJDuZhZBAbGjI8VqVh508+N7+NDs5MgWuebOlrnT2SRnxLnkIRATJ/YWL7QFsrPPiw2H8hdCSKQd08ykfWAiL5BOL6nrx17YBplrehqflgyGzSI3xQE4bKGDw96lgnPC1VKICbYty/psJAe6MA4aRH0HPp5zhi6PuiqPTpGmeMP8Xq33zxFPBIxKeE7e5Uoeg1ZJfLuTP5F0opcxcBVdTlzq7tAei9ATHd9YrKLJ32aN4kWLH4DCx08DRNT9dBL3uc6mo5MYdn6ClmsGGk62VB+AtVNRibyf1UXJPNeJihurpfnCTtCzjOH0TDpDixpcgpIZmCwNz5uYTrQi2owRjXsaBHu6HY9Ish8uatAjM38x8gxfJqFyR1OQ/bMBOhIGcBgTM7EjeDHQ572z2MrpVlkuM5zJ7hruoNG4oym/gPzOjfCPQwDaEqjwzXCQyp3/vTZceHmPV6H44EqhySc0V+nIXb79tujbY2vOSYKXxtnpOMn7jdE9r2R2yVgq/yPbBIqJmeTXwuKz9VjUQ8ZmGK8o98PixK16IxfPJseE6hmeT6Llx3CtLgxGRl8UFlgWZ1pnxaeUKVEiNKrDCurW9D/VCPFD476rHU72Tr5ffXkMaEHSnKEB/5vh7EO5gsrLCJERYpHy/RK9KnyWDW/Bxgi+wUQ7leDVQW1ZM0yEp+3WvZpS6FCCgfBEFcWUzYcfpTj12Yt1d3FCZS93L9BaF7aF6QMVO0U0TTKYcxj/rOJYzP/to+F0DskU8YnRpNRW0A8H050pvUFEek2Fa9NrxKPKZLB4UMiaScy/+758Tr2xLg50S2Ca5DIVP1WaTadLsZqe0ZD8TtsOW9OXM0Ex/2CDxiSF+b1RjsdAozErWCrvxH0lfbNYaJHyp/J7t5x3Rgc7Bge1gtJvmw38Wr6fWDwD2Ap4OMcCeQ7g1pR6hg/LSDj3ngwYifuZZnRRQv5UTd9//5fbSzfxRwUrEsc1k1CCZTqNYl9pu2YtKPuTn/SADbk3A+KaUAvsk8djVQxLihWGCH1AF+3NFfZLnvJ9FLtMOxgCXACtcjGcsjWY/MDsTzT3H/zqSpcoZANn/DKef9FpMwVGEEq7flcnPAhfU3Fdx4Mv1osR5CzeciGrE9cuHBoKIOKrZAcmyubBhG2Zbd0JLNLWh5u6BQREzEIHtXign4BPzz7WmFoUkko2GsXFdBDdMKXaMcxHUOykTZ0R8Hxw69OjaCCoYwYDyCOz5QwhLC7t+tK9Y7e2rJ6IH3OtNRflh/r4jFAAbWkT8zMl+aGihAgkoKGKxHbbO9vd9gDexsZppiFGAlOQv/9+ERx/7oXL4GLpXT/USrjjAOFSWG1TrIClXYbpDuMdYonp8DeZXem7f6kwbh7tk5w/e+W7YMHh0W7qOxj6QXqzHw8MyS9VtPY5+nELzV6oWzu6p9nKSvw0OsRFXJyhD/vC3z7aFu7jRFxLSS1L85pg8NOUq4F7+mpl7DYV7ZcGc6nT+hHVWtbChrYBOXRr4NDxMhtxhBkN0vh7HQ/aum2xTLXv02uDCDn/wK26hvhuJRWBzN0jNFwQT4UjTYlPa928rXOKdklqdAxoEcWhg6M1EDnOSqK3hfhizMFAYM291K04/aQkkF/KJC6FZNKIZY+PGTvB3VmQwzzqLDsoPpDP+F0SnczQy0qmN6iEw/QLxchKIETbG2vpC11k9H1S6RRMjjutBU2u9pw60Ye4VZYw3RADIJFWpGSiGN1XIxrNAVvQK/WupGST+d/1sEAIo2Aj6sIDkx4+64sJmeBMQMJ4og/91rYmqpghhsfvf8q65F0d8KYq9hwUs/e5c+mzc8Wvq34c7UlNdFwI/cpaefDiwzM+pJZ0j9u4V4RyL3Dx5GOxkDQO8VK3TD2pUgmngWGGE7IiQiEczaV6Zm4ifBq3NbjCE0eEZf42Fj+LJtf0qaXQ0KITBM3iJwp/Djkc9GN3o/1NnoO/E//I6wNa7aNrhB/nm7AhdxAwKC946GqEFgxXAIVkPtU9eW/IjnJ67FhDE/VAZY2JDbuqmRRvFk9O5Dr/MYmFidBwM7rLXGSeifRa67yM5NMlBeGCHBggaFN+V4b83Upz0dvxOXDVzowaO6Ojbnm7mhSvxO/Q8KqWXD4LBOx4YXJEXTsQa5QYHK6RvL/uYgthwGK7fgmEgYljsvmKTnmKkBvX2Gv5fIOBcFhpCMCPb7owBXmgpdK7SHcrnfviDQ4puFDZALvORP+cmPrL7p4b4PG9aezW9Cax18lxkwxosViS18VJnq0UKD4g54E5PDYE0YJggfLMWGNlRnIJFt2BDMDc3N57eEiGGj7iW/yL3l0mkgbLgHubU7wQ2QfuARBtsxha7228fWu/HBEtoXnjae3XNglFFSExZ5IeAL03CxHoWTWRKG++z68ysnDcTHR2atAxPw4PckpTnX8PLboEmiLC+UdNJdDSJ9R5V+mDM7Xl/ewO/OyfeZmNVr5rB1ZUPKUYGqGXZRlCb0zLcRxxDiwgZfMVlXtpB8oS6gwDim7mKK8UJ6Dt4XnvRFysupaZzJXJWmitDbZAxgQodOwShk1hISaUgiHrUi9nP/gdMCEHCfyOoXLrMDAGNoqGE4deZ7fJOtdpDRb0bGFHciTFPJsBQR/vN+uMcIbFrERUUwDE3JzHY9ksGOfbgVF7T8biX4ExNOy50ghbBcwyD/G4B76N4Bx04URHYrq3ymmpKc/J6WqoLr7A9txvrUoq5VKIvZ/qiXjX/CCFIoRJ5lJdVHPC8C7wNlIr5wWzPCxmZvsF19HKCKQn0a6V7mKoIWW51o8xliDz1Amt+6g8iEpF9PnEe8vufFiM9Ay8ZeiVaoMEMakE5Hq18gKQrIO7zBAE9DiaCiJwZTwgr2PrzXnjVo44sJZBTsqF7S89PBb+9xP0W9jdJa0fnRAFTQZ1OPYG2ogZFgy6H40aIprDhGFQMqVXSfakE0CigvmTv+GLlNVhaAAec1ewikpWSK9ByiQMXdiw==\"}", + "return": "", + "returns": "{}", + "epoch": 1313154 + }, + { + "cid": "bafy2bzacebyvmrxtynl72qqgrwu3q6mnwd7b2igvwwikh7cussll4bwlsqpy4", + "signed_cid": "bafy2bzacebyvmrxtynl72qqgrwu3q6mnwd7b2igvwwikh7cussll4bwlsqpy4", + "from": "f3tehpvupmrgejr3yg6r55y7hqrlzrko67ignegzbfhtcoi3m5xgslhacazgx2rgycekdhumjjktl7lapsojaq", + "to": "f01387570", + "name": "ProveCommitSector", + "method": 7, + "params": "ghoAB/f3WQeAquNfbgkVA7UZTUeaapiU3p8OQ1JRq7WnF54AegVlPeur8423TJsEFkKDAa2s9ZyssAHYQTcdtTcAemSsuZImK4lHmqBeJRyO4+83TLr6eCfcWnf8bAxsCcdKx/WXIeiGC0OuxhwZb0bFaapnQKdGp9qomyCByUNe2W7ur2TCPT5KmTyBq1ucCBzoxdwWh8lLtrdWs847tPUbvMKMmZd6MR5vFy72NpPcC46zslX+UQ1OLTmNu0HLQnu67CKWcECKjxHMVORigbZoyEBbjLYem/zX2bkko6OjFcxp0ty7JUGuoepb0X+JiIB/jJ0IVoUzpbcI4u1cPL/aehqX59KceWxfL5rMVVLptECwDnz+lDmA60Do4OfeHAtwFzvAp0MlFAGVgHYjSq88A2fBHnHxj80TIBuZ6nMsM4BA/t1gX27qaMoninowYYAbu7IiEHR9tu1AsWglpLItCr5Wjto2saBakZdeZcfoS4VNXWAHS+aPsZGc0VxcXs8N6MWGUX3MrYpxtF8AEUqUXAINBeBsexGrHGsb7U9ruocM7wFqtHW938jfdf1QSL4+CJCDdZG6tgiUU8Yr5bXZwhjdY64gCDG4HXSmohFB3cZ9PVNnTrkIa/qy4vJAoWxK34CEWlieEEqNF71tMuxCo2Gx4PXkjfcfmvoLGVlamHSBmk8XI/ms2MyKM4yro2o+fP28IrmjjZzk6MspHHwtTX/1ug/usREt4sf6rZ6UWvL0m+GXmhQB9gmF5DyJ2Uz3gsY56dbLs/ybgBLpm9FLqSZ8tDyMJKfq6yWpvzMTYpeiEf/w4TByLcW+/fVGPrjv7l+b/M+Th0EsYvfn11UOkoadt8io+B8dpJTD5e3WDtM6NF2FwyiJjOFkTEqdQR80Df9tEUeRAfEjP1s4ks97gkN4Yfa9a7+KXV3zKF9UTJx0pPVp+tD39HARrYj7UeWrALnpaakAiVDUclKw9jQeprY8szizR/P/nnVEJD2lPBKnl/yHJK5zzP5hGAdMzXO+rNLmnygPr7+gPkn+f7RtZXich39ufEzhWR4xm2Ljm8rUhG9eeq7mFQ0Ho9grPuyM+VLeVQ9TibiOMBiPjkDpDHkeGU7Ic2liKfkiLhLQmQig9dlpT3zu7djUBzKRNarLKpDqk3lKAFClLiAae+vmpW0Bqsz0L6m72HUC4/pcle2yyiXaWbQbPSSpPHbYbAv+KDP+WoARk/kfQnNPLt3kY4KCQX5tmE0g+QQzfbI9deuK13DqZQzeMlN2zonaREewD5DnHB0+qQ8rIuWO2Hs8/aiTp+U6iC+HpcguwJtXiBxodnIsbKVlSw0gZaePzUNkxtfwlvnpr6LtOcv3DAXffsLe2OJafEBbRVFcClemk1Z+3uqSLvcoXt6VU54GV7k5AdNYCaSaA6WQLWOCw+RaWICJeY2d1z830otd0jzkF77CGrK9kyQC/zS+jMCa7Da8SvROuBAWtFovBoFv6hWnw7HQub32UCwx11dk1w/tuCOn8hvqn0B/awN+W7P0ffZYBKzAy4zXqCWeaLVbKzWoWTy/XtKjUrK/Z9cYYeJRqDOo1QZvXAqPam8T41dvp4wd1tsWPznhh1k1fL7QRhKIp5OVX3LodGTezAeMJITrhzBzktk0/YG4waa6HNamdbl+bGbmN65kAOmyWtD9dGDT/KgFF/ieCDHuBoYCJ05ZvM3GhtVeRZFw5inzAbIm+d351A8n0CscthewNt8aUGX1JoxSQr+eGpbNgriP0KoTNC5GWP++3qUNh0zuATQvypsJhPFvXwrQoXlPhWqkxM7nv6HRvIxcZtlILo0VqDV4TcEBDtgsBCcHpblzfusImJRH7UqvyI0Ogk3itpHJMP3dB2k65l7uiA94//YyRmBk41BRib9MX8CyQ5IGJfdVImSSv/+dgoP9F17wA6iLuoVoy9zWRXgTv7tK8TB1tTyRi5qBU04eCN6mnzft6JnfWr9QnJNnr2XVr0Lm0/Q9AAeVn9FKJNZ6GJ+69n7RoG1dxVOto47FNNYkBgpPPK2AoJcUTXuL3cGKq4N/rbcVG1UdLZlDkqEliuV3v8DucLSj07DY+rORCaRmfg9FfyblkGaVhSFJHEdXhwNgkbyas+NviQN776z1U+9+Cfshu91PYDgV9x2bmnOErg/1/5bx2pt7T8uy8kRXBR5HDdyMRUsS2dluaErWkVcqAH+Z/uX/nSRetDlXA8RegSsTwc7NoELwQv7S2USxtQzGw4k7i8UlxvBHdvNvESXYxGkau4JElu2F9Wwafanyneiv31ML0ettYTN4pwW2gl3rYP1MXahySaglVpMLnrBsuI4fBhSHKxQlTT4ORuQkmehnhoK4y+91K8wdNLqOt591LwuuRJrZf927Vca6JHFu8qFMRVQdCfMz4EUl2X1enB1yjB9c0wLJ4VV86eqvBQPla8TSn0bc+2p+5oIuCVPRhwyg26pgw07Cc0bkyOoJE/NC3liXz8WpCyDPyNnShpUoey2RNgxN0aFod5t6l3pYauaQmcTHRCVTDnD7nkH0hxPIlO9ZpzAbca9SOvR/", + "args": "{\"Proof\": \"quNfbgkVA7UZTUeaapiU3p8OQ1JRq7WnF54AegVlPeur8423TJsEFkKDAa2s9ZyssAHYQTcdtTcAemSsuZImK4lHmqBeJRyO4+83TLr6eCfcWnf8bAxsCcdKx/WXIeiGC0OuxhwZb0bFaapnQKdGp9qomyCByUNe2W7ur2TCPT5KmTyBq1ucCBzoxdwWh8lLtrdWs847tPUbvMKMmZd6MR5vFy72NpPcC46zslX+UQ1OLTmNu0HLQnu67CKWcECKjxHMVORigbZoyEBbjLYem/zX2bkko6OjFcxp0ty7JUGuoepb0X+JiIB/jJ0IVoUzpbcI4u1cPL/aehqX59KceWxfL5rMVVLptECwDnz+lDmA60Do4OfeHAtwFzvAp0MlFAGVgHYjSq88A2fBHnHxj80TIBuZ6nMsM4BA/t1gX27qaMoninowYYAbu7IiEHR9tu1AsWglpLItCr5Wjto2saBakZdeZcfoS4VNXWAHS+aPsZGc0VxcXs8N6MWGUX3MrYpxtF8AEUqUXAINBeBsexGrHGsb7U9ruocM7wFqtHW938jfdf1QSL4+CJCDdZG6tgiUU8Yr5bXZwhjdY64gCDG4HXSmohFB3cZ9PVNnTrkIa/qy4vJAoWxK34CEWlieEEqNF71tMuxCo2Gx4PXkjfcfmvoLGVlamHSBmk8XI/ms2MyKM4yro2o+fP28IrmjjZzk6MspHHwtTX/1ug/usREt4sf6rZ6UWvL0m+GXmhQB9gmF5DyJ2Uz3gsY56dbLs/ybgBLpm9FLqSZ8tDyMJKfq6yWpvzMTYpeiEf/w4TByLcW+/fVGPrjv7l+b/M+Th0EsYvfn11UOkoadt8io+B8dpJTD5e3WDtM6NF2FwyiJjOFkTEqdQR80Df9tEUeRAfEjP1s4ks97gkN4Yfa9a7+KXV3zKF9UTJx0pPVp+tD39HARrYj7UeWrALnpaakAiVDUclKw9jQeprY8szizR/P/nnVEJD2lPBKnl/yHJK5zzP5hGAdMzXO+rNLmnygPr7+gPkn+f7RtZXich39ufEzhWR4xm2Ljm8rUhG9eeq7mFQ0Ho9grPuyM+VLeVQ9TibiOMBiPjkDpDHkeGU7Ic2liKfkiLhLQmQig9dlpT3zu7djUBzKRNarLKpDqk3lKAFClLiAae+vmpW0Bqsz0L6m72HUC4/pcle2yyiXaWbQbPSSpPHbYbAv+KDP+WoARk/kfQnNPLt3kY4KCQX5tmE0g+QQzfbI9deuK13DqZQzeMlN2zonaREewD5DnHB0+qQ8rIuWO2Hs8/aiTp+U6iC+HpcguwJtXiBxodnIsbKVlSw0gZaePzUNkxtfwlvnpr6LtOcv3DAXffsLe2OJafEBbRVFcClemk1Z+3uqSLvcoXt6VU54GV7k5AdNYCaSaA6WQLWOCw+RaWICJeY2d1z830otd0jzkF77CGrK9kyQC/zS+jMCa7Da8SvROuBAWtFovBoFv6hWnw7HQub32UCwx11dk1w/tuCOn8hvqn0B/awN+W7P0ffZYBKzAy4zXqCWeaLVbKzWoWTy/XtKjUrK/Z9cYYeJRqDOo1QZvXAqPam8T41dvp4wd1tsWPznhh1k1fL7QRhKIp5OVX3LodGTezAeMJITrhzBzktk0/YG4waa6HNamdbl+bGbmN65kAOmyWtD9dGDT/KgFF/ieCDHuBoYCJ05ZvM3GhtVeRZFw5inzAbIm+d351A8n0CscthewNt8aUGX1JoxSQr+eGpbNgriP0KoTNC5GWP++3qUNh0zuATQvypsJhPFvXwrQoXlPhWqkxM7nv6HRvIxcZtlILo0VqDV4TcEBDtgsBCcHpblzfusImJRH7UqvyI0Ogk3itpHJMP3dB2k65l7uiA94//YyRmBk41BRib9MX8CyQ5IGJfdVImSSv/+dgoP9F17wA6iLuoVoy9zWRXgTv7tK8TB1tTyRi5qBU04eCN6mnzft6JnfWr9QnJNnr2XVr0Lm0/Q9AAeVn9FKJNZ6GJ+69n7RoG1dxVOto47FNNYkBgpPPK2AoJcUTXuL3cGKq4N/rbcVG1UdLZlDkqEliuV3v8DucLSj07DY+rORCaRmfg9FfyblkGaVhSFJHEdXhwNgkbyas+NviQN776z1U+9+Cfshu91PYDgV9x2bmnOErg/1/5bx2pt7T8uy8kRXBR5HDdyMRUsS2dluaErWkVcqAH+Z/uX/nSRetDlXA8RegSsTwc7NoELwQv7S2USxtQzGw4k7i8UlxvBHdvNvESXYxGkau4JElu2F9Wwafanyneiv31ML0ettYTN4pwW2gl3rYP1MXahySaglVpMLnrBsuI4fBhSHKxQlTT4ORuQkmehnhoK4y+91K8wdNLqOt591LwuuRJrZf927Vca6JHFu8qFMRVQdCfMz4EUl2X1enB1yjB9c0wLJ4VV86eqvBQPla8TSn0bc+2p+5oIuCVPRhwyg26pgw07Cc0bkyOoJE/NC3liXz8WpCyDPyNnShpUoey2RNgxN0aFod5t6l3pYauaQmcTHRCVTDnD7nkH0hxPIlO9ZpzAbca9SOvR/\", \"SectorNumber\": 522231}", + "return": "", + "returns": "{}", + "epoch": 1313305 + }, + { + "cid": "bafy2bzacear32z3taevptweb4ulolo4rtnyguuo73lzyowmzls6ff42bhbziw", + "signed_cid": "bafy2bzacear32z3taevptweb4ulolo4rtnyguuo73lzyowmzls6ff42bhbziw", + "from": "f3wbujhwxxei35nbw35fjbbxvpfhpchrcvrdu3zp5zc5j24imwflwzoju3f7k44tpchjlgezocltxywlf23hpa", + "to": "f05", + "name": "PublishStorageDeals", + "method": 4, + "params": "gYOCi9gqWCgAAYHiA5IgIAF5oxBRTfpGwWtXlVh25eIxh5FNpNSHII4/5Wjzdj8/GwAAAAgAAAAA9VgxA7Ohd9PEUxPNtY8Vfp081PLIm88mu/K/F1WNCOgvKWzArBxqMfDMFKHJQyQ7w6lZj0QAufFPeC5RbWRUY1RZaWZaZnhxMzRKMUtrN01udnJacFNKRkprRnVLd01WbWdWdzd5eEZVGgAUQUMaACuR1EBIABSqc4LFsChAWGECkhe35Gdbr2c49cpnA3ZOfMZD3bH6wteWc5zOEXYGD50bvOu76AYLX92w2vLXg26oEG/2gmGtzQcq136r8AzDNlm/UGP+M6YIv2zK6qQD22Ye3s6AhbnF8sX3Q9PG4pXCgovYKlgoAAGB4gOSICBvlAbRYyvoq9ieJBjIas9kys73eYhyi+1cy1exfE0aDxsAAAAIAAAAAPVYMQOzoXfTxFMTzbWPFX6dPNTyyJvPJrvyvxdVjQjoLylswKwcajHwzBShyUMkO8OpWY9EALnxT3guUW1iTEZRSnFYRlhHYkZ5TExmTlg0UlY5aW42RjdUQkZ5V2I1MUd1WGRCd0tjOBoAFEGZGgArkdRASAAUqxRm8mkHQFhhApEiw5bZMQEfBZN/BVV2QPCcIajl5FiEKUiDEiWwrZ5f5BeY2xWwdDZhgEpUVFIsEgWbx4VolOCRD33wwcSkEAQOMw3n2pFX458zpziiJtLIr9Mx5lR1S4U5Ocy2i1rccIKL2CpYKAABgeIDkiAgXHglthrR1HLdjpOHzLNiqsoZWslyHtWo2uXMmwP1rxgbAAAABAAAAAD1WDEDjKJm8VicJvX+Xqsct/xDnSICkVuwDPNphcMU9mTwRpehNR0Nbuvi8s9xvZUzrWuNRAC58U94LlFtV3h0RkhTN1R3MXhTeTZDU1hFbVg4TXd6eThYNEFMQ2NxYVAxUGoybTNVMnMaABRBqBoAK5HUQEgAClWNlqeIYUBYYQKFQQ9D3ULBwLeinEep2S5b1pgNdDI+yvhNYyzqm/YWPlURpIJnAamYbGNlNr2KcOoPHKSfAnTC4Fr0fI5L/Vn1cMgHEH61DyQ6SAPFqC0riUbMrGc9RFpNEHDiuds1f5w=", + "args": "{\"Deals\": [{\"Proposal\": {\"Label\": \"QmdTcTYifZfxq34J1Kk7MnvrZpSJFJkFuKwMVmgVw7yxFU\", \"Client\": \"f3woqxpu6ekmj43nmpcv7j2pgu6lejxtzgxpzl6f2vrueoqlzjntakyhdkghymyffbzfbsio6dvfmy643x4y7q\", \"EndEpoch\": 2855380, \"PieceCID\": {\"/\": \"baga6ea4seaqac6ndcbiu36sgyfvvpfkyo3s6emmhsfg2jvehechd7zli6n3d6py\"}, \"Provider\": \"f01308857\", \"PieceSize\": 34359738368, \"StartEpoch\": 1327427, \"VerifiedDeal\": true, \"ClientCollateral\": \"0\", \"ProviderCollateral\": \"5816912626167848\", \"StoragePricePerEpoch\": \"0\"}, \"ClientSignature\": {\"Data\": \"khe35Gdbr2c49cpnA3ZOfMZD3bH6wteWc5zOEXYGD50bvOu76AYLX92w2vLXg26oEG/2gmGtzQcq136r8AzDNlm/UGP+M6YIv2zK6qQD22Ye3s6AhbnF8sX3Q9PG4pXC\", \"Type\": 2}}, {\"Proposal\": {\"Label\": \"QmbLFQJqXFXGbFyLLfNX4RV9in6F7TBFyWb51GuXdBwKc8\", \"Client\": \"f3woqxpu6ekmj43nmpcv7j2pgu6lejxtzgxpzl6f2vrueoqlzjntakyhdkghymyffbzfbsio6dvfmy643x4y7q\", \"EndEpoch\": 2855380, \"PieceCID\": {\"/\": \"baga6ea4seaqg7fag2frsx2fl3cpcigginlhwjswo654yq4ul5vomwv5rprgrudy\"}, \"Provider\": \"f01308857\", \"PieceSize\": 34359738368, \"StartEpoch\": 1327513, \"VerifiedDeal\": true, \"ClientCollateral\": \"0\", \"ProviderCollateral\": \"5817603649071367\", \"StoragePricePerEpoch\": \"0\"}, \"ClientSignature\": {\"Data\": \"kSLDltkxAR8Fk38FVXZA8JwhqOXkWIQpSIMSJbCtnl/kF5jbFbB0NmGASlRUUiwSBZvHhWiU4JEPffDBxKQQBA4zDefakVfjnzOnOKIm0siv0zHmVHVLhTk5zLaLWtxw\", \"Type\": 2}}, {\"Proposal\": {\"Label\": \"QmWxtFHS7Tw1xSy6CSXEmX8Mwzy8X4ALCcqaP1Pj2m3U2s\", \"Client\": \"f3rsrgn4kytqtpl7s6vmolp7cdturafek3wagpg2mfymkpmzhqi2l2cni5bvxoxyxsz5y33fjtvvvy3vhmouaq\", \"EndEpoch\": 2855380, \"PieceCID\": {\"/\": \"baga6ea4seaqfy6bfwynndvds3whjhb6mwnrkvsqzllexehwvvdnolte3ap226ga\"}, \"Provider\": \"f01308857\", \"PieceSize\": 17179869184, \"StartEpoch\": 1327528, \"VerifiedDeal\": true, \"ClientCollateral\": \"0\", \"ProviderCollateral\": \"2908816373418081\", \"StoragePricePerEpoch\": \"0\"}, \"ClientSignature\": {\"Data\": \"hUEPQ91CwcC3opxHqdkuW9aYDXQyPsr4TWMs6pv2Fj5VEaSCZwGpmGxjZTa9inDqDxyknwJ0wuBa9HyOS/1Z9XDIBxB+tQ8kOkgDxagtK4lGzKxnPURaTRBw4rnbNX+c\", \"Type\": 2}}]}", + "return": "goMaACpszRoAKmzOGgAqbM9BdA==", + "returns": "{\"IDs\": [2780365, 2780366, 2780367], \"ValidDeals\": [0, 3]}", + "epoch": 1313165 + }, + { + "cid": "bafy2bzaceca4prkb2oqks4cs725cdyjb47ex5wdat6yp3sogib5hkn5vcncsu", + "signed_cid": "bafy2bzaceca4prkb2oqks4cs725cdyjb47ex5wdat6yp3sogib5hkn5vcncsu", + "from": "f3tbhpikpglhqj7qmswiw6nwfgqpgjvmwlcc5ukfnzeymuylyjzp7t2vv66ryv4neebnqe2qxwxh2qiwyd7buq", + "to": "f054464", + "name": "SubmitWindowedPoSt", + "method": 5, + "params": "hQ+CggBAggFAgYIIWQGAtOGtRsUT7T8hxIFA4zh69EKTiyW8yqGz90ZYyCW5hpvFD0B2giSQkzP5wLhDP8COjWCXux2Y9gTgUGQP3+Xxmh+0tJ4VaBY3vwCL8Gpj196RC0uwA+pDFiQ196YvFdQGEvAg0GGEqTdE1mRGZxdo0p/tq0ktqkYGxJ8z6NXAnj4q4R9sqU0mkak7tR9A0Plskcgnsw9UvfaKOfSPxoSDz/k4DDomSE/ajISBuo/YcaNlwDfNhvJZrE9wmLv97UTikVDyS2CEqXvhAlPszzce0UTf5sCfuhmMISrt11iGcf7ENhFGnHJJV6DTjq9gXBOppDKLg34aQ49vW9KS12sQuV9wF2NrcViCrq/8qKo5neWW3LDDCnG4XitH2y+1iFnEBY0+MGIwnNyQMCjFY6um8cKyZWWm4EluuFXH0J4VI86Tz8qvHDJexPD7OoZ3rOttkJMksHLiSktnDo91LpW7OdAN+iSv6Oygx2FjtDvMsDrK9ec4TlONtWJYZZxZKiuuGgAUCbhYIE0zNyrN1RzzemrLDx2lG32SANfm1t1jizkTL9JkO1pX", + "args": "{\"Proofs\": [{\"PoStProof\": 8, \"ProofBytes\": \"tOGtRsUT7T8hxIFA4zh69EKTiyW8yqGz90ZYyCW5hpvFD0B2giSQkzP5wLhDP8COjWCXux2Y9gTgUGQP3+Xxmh+0tJ4VaBY3vwCL8Gpj196RC0uwA+pDFiQ196YvFdQGEvAg0GGEqTdE1mRGZxdo0p/tq0ktqkYGxJ8z6NXAnj4q4R9sqU0mkak7tR9A0Plskcgnsw9UvfaKOfSPxoSDz/k4DDomSE/ajISBuo/YcaNlwDfNhvJZrE9wmLv97UTikVDyS2CEqXvhAlPszzce0UTf5sCfuhmMISrt11iGcf7ENhFGnHJJV6DTjq9gXBOppDKLg34aQ49vW9KS12sQuV9wF2NrcViCrq/8qKo5neWW3LDDCnG4XitH2y+1iFnEBY0+MGIwnNyQMCjFY6um8cKyZWWm4EluuFXH0J4VI86Tz8qvHDJexPD7OoZ3rOttkJMksHLiSktnDo91LpW7OdAN+iSv6Oygx2FjtDvMsDrK9ec4TlONtWJYZZxZKiuu\"}], \"Deadline\": 15, \"Partitions\": [{\"Index\": 0, \"Skipped\": [0]}, {\"Index\": 1, \"Skipped\": [0]}], \"ChainCommitRand\": \"TTM3Ks3VHPN6assPHaUbfZIA1+bW3WOLORMv0mQ7Wlc=\", \"ChainCommitEpoch\": 1313208}", + "return": "", + "returns": "{}", + "epoch": 1313244 + }, + { + "cid": "bafy2bzacecfzupggl37u6xouegkkyil43iuk537evpfylb3tv2cmoizl56jiq", + "signed_cid": "bafy2bzaceaqwuevt2xtcg3mski57p7tqsddug4k66gfm7l2rjotu62nl5uqhk", + "from": "f1tjkexqva5dieqajbwzdv53oc2eusoxakndzw6xq", + "to": "f066563", + "name": "TerminateSectors", + "method": 9, + "params": "gYGDGB8ARMDc1iA=", + "args": "{\"Terminations\": [{\"Sectors\": [105318, 1], \"Deadline\": 31, \"Partition\": 0}]}", + "return": "gfU=", + "returns": "{\"Done\": true}", + "epoch": 1313775 + }, + { + "cid": "bafy2bzaceaztftt42liz3bzn7olfob6dhbh6frdy42zf6mx4ixsgrrn2jfvhq", + "signed_cid": "bafy2bzacebagx4bvh3tzsnr6jlv6fyhwcpq6l6mxyl6pyux2xaumxpltpute6", + "from": "f17dcx5hzrn66bvbncrsi33r23yjfmikjjw6dm5hq", + "to": "f1qkz22dohzpl7w3xgnmb2fzmbkyibilm5d7r4hhq", + "name": "transfer", + "method": 0, + "params": "", + "args": null, + "return": "", + "returns": null, + "epoch": 1314580 + }, + { + "cid": "bafy2bzacecmsrcizhqk3q3ay2elv46eu67nfa2okw7krzkdwya7yrjy2oklpc", + "signed_cid": "bafy2bzacecmsrcizhqk3q3ay2elv46eu67nfa2okw7krzkdwya7yrjy2oklpc", + "from": "f3qkep6wkjcmty4hf3qlvnxprzn3db3syhacotyys3rsvnulu6uubdwsm5zuc32ekcqkyvub5g22exznraaqxq", + "to": "f0392813", + "name": "WithdrawBalance", + "method": 16, + "params": "gUsAPKil82716rQAAA==", + "args": "{\"AmountRequested\": \"286453000000000000000000\"}", + "return": "SgAROlpFgAT/jDo=", + "returns": "\"317799398871308143674\"", + "epoch": 1313281 + } +] \ No newline at end of file diff --git a/venus-shared/utils/utils.go b/venus-shared/utils/utils.go new file mode 100644 index 0000000000..ac7b057887 --- /dev/null +++ b/venus-shared/utils/utils.go @@ -0,0 +1,70 @@ +package utils + +import ( + "context" + "fmt" + + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/types" +) + +var NetworkNameWithNetworkType = map[types.NetworkName]types.NetworkType{ + types.NetworkNameMain: types.NetworkMainnet, + types.NetworkNameCalibration: types.NetworkCalibnet, + types.NetworkNameButterfly: types.NetworkButterfly, + types.NetworkNameInterop: types.NetworkInterop, + types.NetworkNameIntegration: types.Integrationnet, + types.NetworkNameForce: types.NetworkForce, +} + +var NetworkTypeWithNetworkName = func() map[types.NetworkType]types.NetworkName { + typeName := make(map[types.NetworkType]types.NetworkName, len(NetworkNameWithNetworkType)) + for nt, nn := range NetworkNameWithNetworkType { + typeName[nn] = nt + } + + return typeName +}() + +func NetworkNameToNetworkType(networkName types.NetworkName) (types.NetworkType, error) { + if len(networkName) == 0 { + return types.NetworkDefault, fmt.Errorf("network name is empty") + } + nt, ok := NetworkNameWithNetworkType[networkName] + if ok { + return nt, nil + } + // 2k network do not have exact network names + return types.Network2k, nil +} + +func NetworkTypeToNetworkName(networkType types.NetworkType) types.NetworkName { + nn, ok := NetworkTypeWithNetworkName[networkType] + if ok { + return nn + } + + // 2k network do not have exact network names + return "" +} + +type networkNameGetter interface { + StateNetworkName(ctx context.Context) (types.NetworkName, error) +} + +func LoadBuiltinActors(ctx context.Context, getter networkNameGetter) error { + networkName, err := getter.StateNetworkName(ctx) + if err != nil { + return err + } + nt, err := NetworkNameToNetworkType(networkName) + if err != nil { + return err + } + if err := actors.SetNetworkBundle(int(nt)); err != nil { + return err + } + ReloadMethodsMap() + + return nil +} diff --git a/venus-shared/utils/utils_test.go b/venus-shared/utils/utils_test.go new file mode 100644 index 0000000000..8d18797036 --- /dev/null +++ b/venus-shared/utils/utils_test.go @@ -0,0 +1,63 @@ +package utils + +import ( + "context" + "testing" + + tf "github.com/filecoin-project/venus/pkg/testhelpers/testflags" + "github.com/filecoin-project/venus/venus-shared/actors" + "github.com/filecoin-project/venus/venus-shared/api/chain/v1/mock" + "github.com/filecoin-project/venus/venus-shared/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestNetworkNamtToNetworkType(t *testing.T) { + tf.UnitTest(t) + for nt, nn := range NetworkTypeWithNetworkName { + got, err := NetworkNameToNetworkType(nn) + assert.Nil(t, err) + assert.Equal(t, nt, got) + } + + nt, err2 := NetworkNameToNetworkType("2k") + assert.Nil(t, err2) + assert.Equal(t, types.Network2k, nt) +} + +func TestNetworkTypeToNetworkName(t *testing.T) { + tf.UnitTest(t) + for nt, nn := range NetworkTypeWithNetworkName { + got := NetworkTypeToNetworkName(nt) + assert.Equal(t, nn, got) + } + assert.Equal(t, types.NetworkName(""), NetworkTypeToNetworkName(types.Network2k)) +} + +func TestLoadBuiltinActors(t *testing.T) { + tf.UnitTest(t) + + ctx := context.Background() + + ctrl := gomock.NewController(t) + full := mock.NewMockFullNode(ctrl) + + for nn := range NetworkNameWithNetworkType { + full.EXPECT().StateNetworkName(ctx).Return(nn, nil) + + assert.Nil(t, LoadBuiltinActors(ctx, full)) + + for _, actorsMetadata := range actors.EmbeddedBuiltinActorsMetadata { + if actorsMetadata.Network == string(nn) { + for name, actor := range actorsMetadata.Actors { + res, ok := actors.GetActorCodeID(actorsMetadata.Version, name) + assert.True(t, ok) + assert.Equal(t, actor, res) + + _, ok2 := MethodsMap[actor] + assert.True(t, ok2) + } + } + } + } +}